text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
'''
Copyright (C) 2012-2015 Diego Torres Milano
Created on Feb 2, 2012
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Diego Torres Milano
'''
__version__ = '10.6.1'
import sys
import warnings
if sys.executable:
if 'monkeyrunner' in sys.executable:
warnings.warn(
'''
You should use a 'python' interpreter, not 'monkeyrunner' for this module
''', RuntimeWarning)
import subprocess
import re
import socket
import os
import types
import time
import signal
import copy
import pickle
import platform
import xml.parsers.expat
import unittest
from com.dtmilano.android.common import _nd, _nh, _ns, obtainPxPy, obtainVxVy,\
obtainVwVh, obtainAdbPath
from com.dtmilano.android.window import Window
from com.dtmilano.android.adb import adbclient
from com.dtmilano.android.uiautomator.uiautomatorhelper import UiAutomatorHelper
DEBUG = False
DEBUG_DEVICE = DEBUG and False
DEBUG_RECEIVED = DEBUG and False
DEBUG_TREE = DEBUG and False
DEBUG_GETATTR = DEBUG and False
DEBUG_CALL = DEBUG and False
DEBUG_COORDS = DEBUG and False
DEBUG_TOUCH = DEBUG and False
DEBUG_STATUSBAR = DEBUG and False
DEBUG_WINDOWS = DEBUG and False
DEBUG_BOUNDS = DEBUG and False
DEBUG_DISTANCE = DEBUG and False
DEBUG_MULTI = DEBUG and False
DEBUG_VIEW = DEBUG and False
DEBUG_VIEW_FACTORY = DEBUG and False
DEBUG_CHANGE_LANGUAGE = DEBUG and False
WARNINGS = False
VIEW_SERVER_HOST = 'localhost'
VIEW_SERVER_PORT = 4939
ADB_DEFAULT_PORT = 5555
OFFSET = 25
''' This assumes the smallest touchable view on the screen is approximately 50px x 50px
and touches it at M{(x+OFFSET, y+OFFSET)} '''
USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES = True
''' Use C{AdbClient} to obtain the needed properties. If this is
C{False} then C{adb shell getprop} is used '''
USE_PHYSICAL_DISPLAY_INFO = True
''' Use C{dumpsys display} to obtain display properties. If this is
C{False} then C{USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES} is used '''
SKIP_CERTAIN_CLASSES_IN_GET_XY_ENABLED = False
''' Skips some classes related with the Action Bar and the PhoneWindow$DecorView in the
coordinates calculation
@see: L{View.getXY()} '''
VIEW_CLIENT_TOUCH_WORKAROUND_ENABLED = False
''' Under some conditions the touch event should be longer [t(DOWN) << t(UP)]. C{True} enables a
workaround to delay the events.'''
# some device properties
VERSION_SDK_PROPERTY = 'ro.build.version.sdk'
VERSION_RELEASE_PROPERTY = 'ro.build.version.release'
# some constants for the attributes
ID_PROPERTY = 'mID'
ID_PROPERTY_UI_AUTOMATOR = 'uniqueId'
TEXT_PROPERTY = 'text:mText'
TEXT_PROPERTY_API_10 = 'mText'
TEXT_PROPERTY_UI_AUTOMATOR = 'text'
WS = u"\xfe" # the whitespace replacement char for TEXT_PROPERTY
TAG_PROPERTY = 'getTag()'
LEFT_PROPERTY = 'layout:mLeft'
LEFT_PROPERTY_API_8 = 'mLeft'
TOP_PROPERTY = 'layout:mTop'
TOP_PROPERTY_API_8 = 'mTop'
WIDTH_PROPERTY = 'layout:getWidth()'
WIDTH_PROPERTY_API_8 = 'getWidth()'
HEIGHT_PROPERTY = 'layout:getHeight()'
HEIGHT_PROPERTY_API_8 = 'getHeight()'
GET_VISIBILITY_PROPERTY = 'getVisibility()'
LAYOUT_TOP_MARGIN_PROPERTY = 'layout:layout_topMargin'
IS_FOCUSED_PROPERTY_UI_AUTOMATOR = 'focused'
IS_FOCUSED_PROPERTY = 'focus:isFocused()'
# visibility
VISIBLE = 0x0
INVISIBLE = 0x4
GONE = 0x8
RegexType = type(re.compile(''))
IP_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}$')
ID_RE = re.compile('id/([^/]*)(/(\d+))?')
class ViewNotFoundException(Exception):
'''
ViewNotFoundException is raised when a View is not found.
'''
def __init__(self, attr, value, root):
if isinstance(value, RegexType):
msg = "Couldn't find View with %s that matches '%s' in tree with root=%s" % (attr, value.pattern, root)
else:
msg = "Couldn't find View with %s='%s' in tree with root=%s" % (attr, value, root)
super(Exception, self).__init__(msg)
class View:
'''
View class
'''
@staticmethod
def factory(arg1, arg2, version=-1, forceviewserveruse=False, windowId=None):
'''
View factory
@type arg1: ClassType or dict
@type arg2: View instance or AdbClient
'''
if DEBUG_VIEW_FACTORY:
print >> sys.stderr, "View.factory(%s, %s, %s, %s)" % (arg1, arg2, version, forceviewserveruse)
if type(arg1) == types.ClassType:
cls = arg1
attrs = None
else:
cls = None
attrs = arg1
if isinstance(arg2, View):
view = arg2
device = None
else:
device = arg2
view = None
if attrs and attrs.has_key('class'):
clazz = attrs['class']
if DEBUG_VIEW_FACTORY:
print >> sys.stderr, " View.factory: creating View with specific class: %s" % clazz
if clazz == 'android.widget.TextView':
return TextView(attrs, device, version, forceviewserveruse, windowId)
elif clazz == 'android.widget.EditText':
return EditText(attrs, device, version, forceviewserveruse, windowId)
elif clazz == 'android.widget.ListView':
return ListView(attrs, device, version, forceviewserveruse, windowId)
else:
return View(attrs, device, version, forceviewserveruse, windowId)
elif cls:
if view:
return cls.__copy(view)
else:
return cls(attrs, device, version, forceviewserveruse, windowId)
elif view:
return copy.copy(view)
else:
if DEBUG_VIEW_FACTORY:
print >> sys.stderr, " View.factory: creating generic View"
return View(attrs, device, version, forceviewserveruse, windowId)
@classmethod
def __copy(cls, view):
'''
Copy constructor
'''
return cls(view.map, view.device, view.version, view.forceviewserveruse, view.windowId)
def __init__(self, _map, device, version=-1, forceviewserveruse=False, windowId=None):
'''
Constructor
@type _map: map
@param _map: the map containing the (attribute, value) pairs
@type device: AdbClient
@param device: the device containing this View
@type version: int
@param version: the Android SDK version number of the platform where this View belongs. If
this is C{-1} then the Android SDK version will be obtained in this
constructor.
@type forceviewserveruse: boolean
@param forceviewserveruse: Force the use of C{ViewServer} even if the conditions were given
to use C{UiAutomator}.
'''
if DEBUG_VIEW:
print >> sys.stderr, "View.__init__(%s, %s, %s, %s)" % ("map" if _map is not None else None, device, version, forceviewserveruse)
if _map:
print >> sys.stderr, " map:", type(_map)
for attr, val in _map.iteritems():
if len(val) > 50:
val = val[:50] + "..."
print >> sys.stderr, " %s=%s" % (attr, val)
self.map = _map
''' The map that contains the C{attr},C{value} pairs '''
self.device = device
''' The AdbClient '''
self.children = []
''' The children of this View '''
self.parent = None
''' The parent of this View '''
self.windows = {}
self.currentFocus = None
''' The current focus '''
self.windowId = windowId
''' The window this view resides '''
self.build = {}
''' Build properties '''
self.version = version
''' API version number '''
self.forceviewserveruse = forceviewserveruse
''' Force ViewServer use '''
self.uiScrollable = None
''' If this is a scrollable View this keeps the L{UiScrollable} object '''
self.target = False
''' Is this a touch target zone '''
if version != -1:
self.build[VERSION_SDK_PROPERTY] = version
else:
try:
if USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES:
self.build[VERSION_SDK_PROPERTY] = int(device.getProperty(VERSION_SDK_PROPERTY))
else:
self.build[VERSION_SDK_PROPERTY] = int(device.shell('getprop ' + VERSION_SDK_PROPERTY)[:-2])
except:
self.build[VERSION_SDK_PROPERTY] = -1
version = self.build[VERSION_SDK_PROPERTY]
self.useUiAutomator = (version >= 16) and not forceviewserveruse
''' Whether to use UIAutomator or ViewServer '''
self.idProperty = None
''' The id property depending on the View attribute format '''
self.textProperty = None
''' The text property depending on the View attribute format '''
self.tagProperty = None
''' The tag property depending on the View attribute format '''
self.leftProperty = None
''' The left property depending on the View attribute format '''
self.topProperty = None
''' The top property depending on the View attribute format '''
self.widthProperty = None
''' The width property depending on the View attribute format '''
self.heightProperty = None
''' The height property depending on the View attribute format '''
self.isFocusedProperty = None
''' The focused property depending on the View attribute format '''
if version >= 16 and self.useUiAutomator:
self.idProperty = ID_PROPERTY_UI_AUTOMATOR
self.textProperty = TEXT_PROPERTY_UI_AUTOMATOR
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY_UI_AUTOMATOR
elif version > 10 and (version < 16 or self.useUiAutomator):
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY
self.tagProperty = TAG_PROPERTY
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
elif version == 10:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY_API_10
self.tagProperty = TAG_PROPERTY
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
elif version >= 7 and version < 10:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY_API_10
self.tagProperty = TAG_PROPERTY
self.leftProperty = LEFT_PROPERTY_API_8
self.topProperty = TOP_PROPERTY_API_8
self.widthProperty = WIDTH_PROPERTY_API_8
self.heightProperty = HEIGHT_PROPERTY_API_8
self.isFocusedProperty = IS_FOCUSED_PROPERTY
elif version > 0 and version < 7:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY_API_10
self.tagProperty = TAG_PROPERTY
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
elif version == -1:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY
self.tagProperty = TAG_PROPERTY
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
else:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY
self.tagProperty = TAG_PROPERTY
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
try:
if self.isScrollable():
self.uiScrollable = UiScrollable(self)
except AttributeError:
pass
def __getitem__(self, key):
return self.map[key]
def __getattr__(self, name):
if DEBUG_GETATTR:
print >>sys.stderr, "__getattr__(%s) version: %d" % (name, self.build[VERSION_SDK_PROPERTY])
# NOTE:
# I should try to see if 'name' is a defined method
# but it seems that if I call locals() here an infinite loop is entered
if self.map.has_key(name):
r = self.map[name]
elif self.map.has_key(name + '()'):
# the method names are stored in the map with their trailing '()'
r = self.map[name + '()']
elif name.count("_") > 0:
mangledList = self.allPossibleNamesWithColon(name)
mangledName = self.intersection(mangledList, self.map.keys())
if len(mangledName) > 0 and self.map.has_key(mangledName[0]):
r = self.map[mangledName[0]]
else:
# Default behavior
raise AttributeError, name
elif name.startswith('is'):
# try removing 'is' prefix
if DEBUG_GETATTR:
print >> sys.stderr, " __getattr__: trying without 'is' prefix"
suffix = name[2:].lower()
if self.map.has_key(suffix):
r = self.map[suffix]
else:
# Default behavior
raise AttributeError, name
elif name.startswith('get'):
# try removing 'get' prefix
if DEBUG_GETATTR:
print >> sys.stderr, " __getattr__: trying without 'get' prefix"
suffix = name[3:].lower()
if self.map.has_key(suffix):
r = self.map[suffix]
else:
# Default behavior
raise AttributeError, name
elif name == 'getResourceId':
if DEBUG_GETATTR:
print >> sys.stderr, " __getattr__: getResourceId"
if self.map.has_key('resource-id'):
r = self.map['resource-id']
else:
# Default behavior
raise AttributeError, name
else:
# Default behavior
raise AttributeError, name
# if the method name starts with 'is' let's assume its return value is boolean
# if name[:2] == 'is':
# r = True if r == 'true' else False
if r == 'true':
r = True
elif r == 'false':
r = False
# this should not cached in some way
def innerMethod():
if DEBUG_GETATTR:
print >>sys.stderr, "innerMethod: %s returning %s" % (innerMethod.__name__, r)
return r
innerMethod.__name__ = name
# this should work, but then there's problems with the arguments of innerMethod
# even if innerMethod(self) is added
#setattr(View, innerMethod.__name__, innerMethod)
#setattr(self, innerMethod.__name__, innerMethod)
return innerMethod
def __call__(self, *args, **kwargs):
if DEBUG_CALL:
print >>sys.stderr, "__call__(%s)" % (args if args else None)
def getClass(self):
'''
Gets the L{View} class
@return: the L{View} class or C{None} if not defined
'''
try:
return self.map['class']
except:
return None
def getId(self):
'''
Gets the L{View} Id
@return: the L{View} C{Id} or C{None} if not defined
@see: L{getUniqueId()}
'''
try:
return self.map['resource-id']
except:
pass
try:
return self.map[self.idProperty]
except:
return None
def getContentDescription(self):
'''
Gets the content description.
'''
try:
return self.map['content-desc']
except:
return None
def getTag(self):
'''
Gets the tag.
'''
try:
return self.map[self.tagProperty]
except:
return None
def getParent(self):
'''
Gets the parent.
'''
return self.parent
def getChildren(self):
'''
Gets the children of this L{View}.
'''
return self.children
def getText(self):
'''
Gets the text attribute.
@return: the text attribute or C{None} if not defined
'''
try:
return self.map[self.textProperty]
except Exception:
return None
def getHeight(self):
'''
Gets the height.
'''
if self.useUiAutomator:
return self.map['bounds'][1][1] - self.map['bounds'][0][1]
else:
try:
return int(self.map[self.heightProperty])
except:
return 0
def getWidth(self):
'''
Gets the width.
'''
if self.useUiAutomator:
return self.map['bounds'][1][0] - self.map['bounds'][0][0]
else:
try:
return int(self.map[self.widthProperty])
except:
return 0
def getUniqueId(self):
'''
Gets the unique Id of this View.
@see: L{ViewClient.__splitAttrs()} for a discussion on B{Unique Ids}
'''
try:
return self.map['uniqueId']
except:
return None
def getVisibility(self):
'''
Gets the View visibility
'''
try:
if self.map[GET_VISIBILITY_PROPERTY] == 'VISIBLE':
return VISIBLE
elif self.map[GET_VISIBILITY_PROPERTY] == 'INVISIBLE':
return INVISIBLE
elif self.map[GET_VISIBILITY_PROPERTY] == 'GONE':
return GONE
else:
return -2
except:
return -1
def getX(self):
'''
Gets the View X coordinate
'''
return self.getXY()[0]
def __getX(self):
'''
Gets the View X coordinate
'''
if DEBUG_COORDS:
print >>sys.stderr, "getX(%s %s ## %s)" % (self.getClass(), self.getId(), self.getUniqueId())
x = 0
if self.useUiAutomator:
x = self.map['bounds'][0][0]
else:
try:
if GET_VISIBILITY_PROPERTY in self.map and self.map[GET_VISIBILITY_PROPERTY] == 'VISIBLE':
_x = int(self.map[self.leftProperty])
if DEBUG_COORDS: print >>sys.stderr, " getX: VISIBLE adding %d" % _x
x += _x
except:
warnings.warn("View %s has no '%s' property" % (self.getId(), self.leftProperty))
if DEBUG_COORDS: print >>sys.stderr, " getX: returning %d" % (x)
return x
def getY(self):
'''
Gets the View Y coordinate
'''
return self.getXY()[1]
def __getY(self):
'''
Gets the View Y coordinate
'''
if DEBUG_COORDS:
print >>sys.stderr, "getY(%s %s ## %s)" % (self.getClass(), self.getId(), self.getUniqueId())
y = 0
if self.useUiAutomator:
y = self.map['bounds'][0][1]
else:
try:
if GET_VISIBILITY_PROPERTY in self.map and self.map[GET_VISIBILITY_PROPERTY] == 'VISIBLE':
_y = int(self.map[self.topProperty])
if DEBUG_COORDS: print >>sys.stderr, " getY: VISIBLE adding %d" % _y
y += _y
except:
warnings.warn("View %s has no '%s' property" % (self.getId(), self.topProperty))
if DEBUG_COORDS: print >>sys.stderr, " getY: returning %d" % (y)
return y
def getXY(self, debug=False):
'''
Returns the I{screen} coordinates of this C{View}.
WARNING: Don't call self.getX() or self.getY() inside this method
or it will enter an infinite loop
@return: The I{screen} coordinates of this C{View}
'''
if DEBUG_COORDS or debug:
try:
_id = self.getId()
except:
_id = "NO_ID"
print >> sys.stderr, "getXY(%s %s ## %s)" % (self.getClass(), _id, self.getUniqueId())
x = self.__getX()
y = self.__getY()
if self.useUiAutomator:
return (x, y)
parent = self.parent
if DEBUG_COORDS: print >> sys.stderr, " getXY: x=%s y=%s parent=%s" % (x, y, parent.getUniqueId() if parent else "None")
hx = 0
''' Hierarchy accumulated X '''
hy = 0
''' Hierarchy accumulated Y '''
if DEBUG_COORDS: print >> sys.stderr, " getXY: not using UiAutomator, calculating parent coordinates"
while parent != None:
if DEBUG_COORDS: print >> sys.stderr, " getXY: parent: %s %s <<<<" % (parent.getClass(), parent.getId())
if SKIP_CERTAIN_CLASSES_IN_GET_XY_ENABLED:
if parent.getClass() in [ 'com.android.internal.widget.ActionBarView',
'com.android.internal.widget.ActionBarContextView',
'com.android.internal.view.menu.ActionMenuView',
'com.android.internal.policy.impl.PhoneWindow$DecorView' ]:
if DEBUG_COORDS: print >> sys.stderr, " getXY: skipping %s %s (%d,%d)" % (parent.getClass(), parent.getId(), parent.__getX(), parent.__getY())
parent = parent.parent
continue
if DEBUG_COORDS: print >> sys.stderr, " getXY: parent=%s x=%d hx=%d y=%d hy=%d" % (parent.getId(), x, hx, y, hy)
hx += parent.__getX()
hy += parent.__getY()
parent = parent.parent
(wvx, wvy) = self.__dumpWindowsInformation(debug=debug)
if DEBUG_COORDS or debug:
print >>sys.stderr, " getXY: wv=(%d, %d) (windows information)" % (wvx, wvy)
try:
if self.windowId:
fw = self.windows[self.windowId]
else:
fw = self.windows[self.currentFocus]
if DEBUG_STATUSBAR:
print >> sys.stderr, " getXY: focused window=", fw
print >> sys.stderr, " getXY: deciding whether to consider statusbar offset because current focused windows is at", (fw.wvx, fw.wvy), "parent", (fw.px, fw.py)
except KeyError:
fw = None
(sbw, sbh) = self.__obtainStatusBarDimensionsIfVisible()
if DEBUG_COORDS or debug:
print >>sys.stderr, " getXY: sb=(%d, %d) (statusbar dimensions)" % (sbw, sbh)
statusBarOffset = 0
pwx = 0
pwy = 0
if fw:
if DEBUG_COORDS:
print >>sys.stderr, " getXY: focused window=", fw, "sb=", (sbw, sbh)
if fw.wvy <= sbh: # it's very unlikely that fw.wvy < sbh, that is a window over the statusbar
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: yes, considering offset=", sbh
statusBarOffset = sbh
else:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: no, ignoring statusbar offset fw.wvy=", fw.wvy, ">", sbh
if fw.py == fw.wvy:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: but wait, fw.py == fw.wvy so we are adjusting by ", (fw.px, fw.py)
pwx = fw.px
pwy = fw.py
else:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: fw.py=%d <= fw.wvy=%d, no adjustment" % (fw.py, fw.wvy)
if DEBUG_COORDS or DEBUG_STATUSBAR or debug:
print >>sys.stderr, " getXY: returning (%d, %d) ***" % (x+hx+wvx+pwx, y+hy+wvy-statusBarOffset+pwy)
print >>sys.stderr, " x=%d+%d+%d+%d" % (x,hx,wvx,pwx)
print >>sys.stderr, " y=%d+%d+%d-%d+%d" % (y,hy,wvy,statusBarOffset,pwy)
return (x+hx+wvx+pwx, y+hy+wvy-statusBarOffset+pwy)
def getCoords(self):
'''
Gets the coords of the View
@return: A tuple containing the View's coordinates ((L, T), (R, B))
'''
if DEBUG_COORDS:
print >>sys.stderr, "getCoords(%s %s ## %s)" % (self.getClass(), self.getId(), self.getUniqueId())
(x, y) = self.getXY();
w = self.getWidth()
h = self.getHeight()
return ((x, y), (x+w, y+h))
def getPositionAndSize(self):
'''
Gets the position and size (X,Y, W, H)
@return: A tuple containing the View's coordinates (X, Y, W, H)
'''
(x, y) = self.getXY();
w = self.getWidth()
h = self.getHeight()
return (x, y, w, h)
def getBounds(self):
'''
Gets the View bounds
'''
if 'bounds' in self.map:
return self.map['bounds']
else:
return self.getCoords()
def getCenter(self):
'''
Gets the center coords of the View
@author: U{Dean Morin <https://github.com/deanmorin>}
'''
(left, top), (right, bottom) = self.getCoords()
x = left + (right - left) / 2
y = top + (bottom - top) / 2
return (x, y)
def __obtainStatusBarDimensionsIfVisible(self):
sbw = 0
sbh = 0
for winId in self.windows:
w = self.windows[winId]
if DEBUG_COORDS: print >> sys.stderr, " __obtainStatusBarDimensionsIfVisible: w=", w, " w.activity=", w.activity, "%%%"
if w.activity == 'StatusBar':
if w.wvy == 0 and w.visibility == 0:
if DEBUG_COORDS: print >> sys.stderr, " __obtainStatusBarDimensionsIfVisible: statusBar=", (w.wvw, w.wvh)
sbw = w.wvw
sbh = w.wvh
break
return (sbw, sbh)
def __obtainVxVy(self, m):
return obtainVxVy(m)
def __obtainVwVh(self, m):
return obtainVwVh(m)
def __obtainPxPy(self, m):
return obtainPxPy(m)
def __dumpWindowsInformation(self, debug=False):
self.windows = {}
self.currentFocus = None
dww = self.device.shell('dumpsys window windows')
if DEBUG_WINDOWS or debug: print >> sys.stderr, dww
lines = dww.splitlines()
widRE = re.compile('^ *Window #%s Window{%s (u\d+ )?%s?.*}:' %
(_nd('num'), _nh('winId'), _ns('activity', greedy=True)))
currentFocusRE = re.compile('^ mCurrentFocus=Window{%s .*' % _nh('winId'))
viewVisibilityRE = re.compile(' mViewVisibility=0x%s ' % _nh('visibility'))
# This is for 4.0.4 API-15
containingFrameRE = re.compile('^ *mContainingFrame=\[%s,%s\]\[%s,%s\] mParentFrame=\[%s,%s\]\[%s,%s\]' %
(_nd('cx'), _nd('cy'), _nd('cw'), _nd('ch'), _nd('px'), _nd('py'), _nd('pw'), _nd('ph')))
contentFrameRE = re.compile('^ *mContentFrame=\[%s,%s\]\[%s,%s\] mVisibleFrame=\[%s,%s\]\[%s,%s\]' %
(_nd('x'), _nd('y'), _nd('w'), _nd('h'), _nd('vx'), _nd('vy'), _nd('vx1'), _nd('vy1')))
# This is for 4.1 API-16
framesRE = re.compile('^ *Frames: containing=\[%s,%s\]\[%s,%s\] parent=\[%s,%s\]\[%s,%s\]' %
(_nd('cx'), _nd('cy'), _nd('cw'), _nd('ch'), _nd('px'), _nd('py'), _nd('pw'), _nd('ph')))
contentRE = re.compile('^ *content=\[%s,%s\]\[%s,%s\] visible=\[%s,%s\]\[%s,%s\]' %
(_nd('x'), _nd('y'), _nd('w'), _nd('h'), _nd('vx'), _nd('vy'), _nd('vx1'), _nd('vy1')))
policyVisibilityRE = re.compile('mPolicyVisibility=%s ' % _ns('policyVisibility', greedy=True))
for l in range(len(lines)):
m = widRE.search(lines[l])
if m:
num = int(m.group('num'))
winId = m.group('winId')
activity = m.group('activity')
wvx = 0
wvy = 0
wvw = 0
wvh = 0
px = 0
py = 0
visibility = -1
policyVisibility = 0x0
for l2 in range(l+1, len(lines)):
m = widRE.search(lines[l2])
if m:
l += (l2-1)
break
m = viewVisibilityRE.search(lines[l2])
if m:
visibility = int(m.group('visibility'))
if DEBUG_COORDS: print >> sys.stderr, "__dumpWindowsInformation: visibility=", visibility
if self.build[VERSION_SDK_PROPERTY] >= 17:
m = framesRE.search(lines[l2])
if m:
px, py = obtainPxPy(m)
m = contentRE.search(lines[l2+2])
if m:
wvx, wvy = obtainVxVy(m)
wvw, wvh = obtainVwVh(m)
elif self.build[VERSION_SDK_PROPERTY] >= 16:
m = framesRE.search(lines[l2])
if m:
px, py = self.__obtainPxPy(m)
m = contentRE.search(lines[l2+1])
if m:
# FIXME: the information provided by 'dumpsys window windows' in 4.2.1 (API 16)
# when there's a system dialog may not be correct and causes the View coordinates
# be offset by this amount, see
# https://github.com/dtmilano/AndroidViewClient/issues/29
wvx, wvy = self.__obtainVxVy(m)
wvw, wvh = self.__obtainVwVh(m)
elif self.build[VERSION_SDK_PROPERTY] == 15:
m = containingFrameRE.search(lines[l2])
if m:
px, py = self.__obtainPxPy(m)
m = contentFrameRE.search(lines[l2+1])
if m:
wvx, wvy = self.__obtainVxVy(m)
wvw, wvh = self.__obtainVwVh(m)
elif self.build[VERSION_SDK_PROPERTY] == 10:
m = containingFrameRE.search(lines[l2])
if m:
px, py = self.__obtainPxPy(m)
m = contentFrameRE.search(lines[l2+1])
if m:
wvx, wvy = self.__obtainVxVy(m)
wvw, wvh = self.__obtainVwVh(m)
else:
warnings.warn("Unsupported Android version %d" % self.build[VERSION_SDK_PROPERTY])
#print >> sys.stderr, "Searching policyVisibility in", lines[l2]
m = policyVisibilityRE.search(lines[l2])
if m:
policyVisibility = 0x0 if m.group('policyVisibility') == 'true' else 0x8
self.windows[winId] = Window(num, winId, activity, wvx, wvy, wvw, wvh, px, py, visibility + policyVisibility)
else:
m = currentFocusRE.search(lines[l])
if m:
self.currentFocus = m.group('winId')
if self.windowId and self.windowId in self.windows and self.windows[self.windowId].visibility == 0:
w = self.windows[self.windowId]
return (w.wvx, w.wvy)
elif self.currentFocus in self.windows and self.windows[self.currentFocus].visibility == 0:
if DEBUG_COORDS or debug:
print >> sys.stderr, "__dumpWindowsInformation: focus=", self.currentFocus
print >> sys.stderr, "__dumpWindowsInformation:", self.windows[self.currentFocus]
w = self.windows[self.currentFocus]
return (w.wvx, w.wvy)
else:
if DEBUG_COORDS: print >> sys.stderr, "__dumpWindowsInformation: (0,0)"
return (0,0)
def touch(self, eventType=adbclient.DOWN_AND_UP, deltaX=0, deltaY=0):
'''
Touches the center of this C{View}. The touch can be displaced from the center by
using C{deltaX} and C{deltaY} values.
@param eventType: The event type
@type eventType: L{adbclient.DOWN}, L{adbclient.UP} or L{adbclient.DOWN_AND_UP}
@param deltaX: Displacement from center (X axis)
@type deltaX: int
@param deltaY: Displacement from center (Y axis)
@type deltaY: int
'''
(x, y) = self.getCenter()
if deltaX:
x += deltaX
if deltaY:
y += deltaY
if DEBUG_TOUCH:
print >>sys.stderr, "should touch @ (%d, %d)" % (x, y)
if VIEW_CLIENT_TOUCH_WORKAROUND_ENABLED and eventType == adbclient.DOWN_AND_UP:
if WARNINGS:
print >> sys.stderr, "ViewClient: touch workaround enabled"
self.device.touch(x, y, eventType=adbclient.DOWN)
time.sleep(50/1000.0)
self.device.touch(x+10, y+10, eventType=adbclient.UP)
else:
self.device.touch(x, y, eventType=eventType)
def longTouch(self, duration=2000):
'''
Long touches this C{View}
@param duration: duration in ms
'''
(x, y) = self.getCenter()
# FIXME: get orientation
self.device.longTouch(x, y, duration, orientation=-1)
def allPossibleNamesWithColon(self, name):
l = []
for _ in range(name.count("_")):
name = name.replace("_", ":", 1)
l.append(name)
return l
def intersection(self, l1, l2):
return list(set(l1) & set(l2))
def containsPoint(self, (x, y)):
(X, Y, W, H) = self.getPositionAndSize()
return (((x >= X) and (x <= (X+W)) and ((y >= Y) and (y <= (Y+H)))))
def add(self, child):
'''
Adds a child
@type child: View
@param child: The child to add
'''
child.parent = self
self.children.append(child)
def isClickable(self):
return self.__getattr__('isClickable')()
def isFocused(self):
'''
Gets the focused value
@return: the focused value. If the property cannot be found returns C{False}
'''
try:
return True if self.map[self.isFocusedProperty].lower() == 'true' else False
except Exception:
return False
def variableNameFromId(self):
_id = self.getId()
if _id:
var = _id.replace('.', '_').replace(':', '___').replace('/', '_')
else:
_id = self.getUniqueId()
m = ID_RE.match(_id)
if m:
var = m.group(1)
if m.group(3):
var += m.group(3)
if re.match('^\d', var):
var = 'id_' + var
return var
def setTarget(self, target):
self.target = target
def isTarget(self):
return self.target
def writeImageToFile(self, filename, _format="PNG"):
'''
Write the View image to the specified filename in the specified format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by this View unique ID and
format extension.
@type _format: str
@param _format: Image format (default format is PNG)
'''
if not os.path.isabs(filename):
raise ValueError("writeImageToFile expects an absolute path (fielname='%s')" % filename)
if os.path.isdir(filename):
filename = os.path.join(filename, self.variableNameFromId() + '.' + _format.lower())
if DEBUG:
print >> sys.stderr, "writeImageToFile: saving image to '%s' in %s format" % (filename, _format)
#self.device.takeSnapshot().getSubImage(self.getPositionAndSize()).writeToFile(filename, _format)
# crop:
# im.crop(box) ⇒ image
# Returns a copy of a rectangular region from the current image.
# The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate.
((l, t), (r, b)) = self.getCoords()
box = (l, t, r, b)
if DEBUG:
print >> sys.stderr, "writeImageToFile: cropping", box, " reconnect=", self.device.reconnect
self.device.takeSnapshot(reconnect=self.device.reconnect).crop(box).save(filename, _format)
def __smallStr__(self):
__str = unicode("View[", 'utf-8', 'replace')
if "class" in self.map:
__str += " class=" + self.map['class']
__str += " id=%s" % self.getId()
__str += " ] parent="
if self.parent and "class" in self.parent.map:
__str += "%s" % self.parent.map["class"]
else:
__str += "None"
return __str
def __tinyStr__(self):
__str = unicode("View[", 'utf-8', 'replace')
if "class" in self.map:
__str += " class=" + re.sub('.*\.', '', self.map['class'])
__str += " id=%s" % self.getId()
__str += " ]"
return __str
def __microStr__(self):
__str = unicode('', 'utf-8', 'replace')
if "class" in self.map:
__str += re.sub('.*\.', '', self.map['class'])
_id = self.getId().replace('id/no_id/', '-')
__str += _id
((L, T), (R, B)) = self.getCoords()
__str += '@%04d%04d%04d%04d' % (L, T, R, B)
__str += ''
return __str
def __str__(self):
__str = unicode("View[", 'utf-8', 'replace')
if "class" in self.map:
__str += " class=" + self.map["class"].__str__() + " "
for a in self.map:
__str += a + "="
# decode() works only on python's 8-bit strings
if isinstance(self.map[a], unicode):
__str += self.map[a]
else:
__str += unicode(str(self.map[a]), 'utf-8', errors='replace')
__str += " "
__str += "] parent="
if self.parent:
if "class" in self.parent.map:
__str += "%s" % self.parent.map["class"]
else:
__str += self.parent.getId().__str__()
else:
__str += "None"
return __str
class TextView(View):
'''
TextView class.
'''
pass
class EditText(TextView):
'''
EditText class.
'''
def type(self, text, alreadyTouched=False):
if not text:
return
if not alreadyTouched:
self.touch()
time.sleep(0.5)
self.device.type(text)
time.sleep(0.5)
def setText(self, text):
"""
This function makes sure that any previously entered text is deleted before
setting the value of the field.
"""
if self.text() == text:
return
self.touch()
guardrail = 0
maxSize = len(self.text()) + 1
while maxSize > guardrail:
guardrail += 1
self.device.press('KEYCODE_DEL', adbclient.DOWN_AND_UP)
self.device.press('KEYCODE_FORWARD_DEL', adbclient.DOWN_AND_UP)
self.type(text, alreadyTouched=True)
def backspace(self):
self.touch()
time.sleep(1)
self.device.press('KEYCODE_DEL', adbclient.DOWN_AND_UP)
class UiDevice():
'''
Provides access to state information about the device. You can also use this class to simulate
user actions on the device, such as pressing the d-pad or pressing the Home and Menu buttons.
'''
def __init__(self, vc):
self.vc = vc
self.device = self.vc.device
def openNotification(self):
'''
Opens the notification shade.
'''
# the tablet has a different Notification/Quick Settings bar depending on x
w13 = self.device.display['width'] / 3
s = (w13, 0)
e = (w13, self.device.display['height']/2)
self.device.drag(s, e, 500, 20, -1)
self.vc.sleep(1)
self.vc.dump(-1)
def openQuickSettings(self):
'''
Opens the Quick Settings shade.
'''
# the tablet has a different Notification/Quick Settings bar depending on x
w23 = 2 * self.device.display['width'] / 3
s = (w23, 0)
e = (w23, self.device.display['height']/2)
self.device.drag(s, e, 500, 20, -1)
self.vc.sleep(1)
if self.vc.getSdkVersion() >= 20:
self.device.drag(s, e, 500, 20, -1)
self.vc.sleep(1)
self.vc.dump(-1)
def openQuickSettingsSettings(self):
'''
Opens the Quick Settings shade and then tries to open Settings from there.
'''
STATUS_BAR_SETTINGS_SETTINGS_BUTTON = [
u"Settings", u"Cài đặt", u"Instellingen", u"Կարգավորումներ", u"设置", u"Nastavitve", u"සැකසීම්", u"Ayarlar",
u"Setelan", u"Настройки", u"تنظیمات", u"Mga Setting", u"Тохиргоо", u"Configuració", u"Setări", u"Налады",
u"Einstellungen", u"პარამეტრები", u"सेटिङहरू", u"Կարգավորումներ", u"Nustatymai", u"Beállítások", u"設定",
u"सेटिंग", u"Настройки", u"Inställningar", u"設定", u"ການຕັ້ງຄ່າ", u"Configurações", u"Tetapan", u"설정",
u"ការកំណត់", u"Ajustes", u"הגדרות", u"Ustawienia", u"Nastavení", u"Ρυθμίσεις", u"Тохиргоо", u"Ayarlar",
u"Indstillinger", u"Налаштування", u"Mipangilio", u"Izilungiselelo", u"設定", u"Nastavenia", u"Paramètres",
u"ቅንብሮች", u"การตั้งค่า", u"Seaded", u"Iestatījumi", u"Innstillinger", u"Подешавања", u"الإعدادات", u"සැකසීම්",
u"Definições", u"Configuración", u"პარამეტრები", u"Postavke", u"Ayarlar", u"Impostazioni", u"Asetukset",
u"Instellings", u"Seaded", u"ការកំណត់", u"सेटिङहरू", u"Tetapan"
]
self.openQuickSettings()
# this works on API >= 20
found = False
for s in STATUS_BAR_SETTINGS_SETTINGS_BUTTON:
if DEBUG:
print >> sys.stderr, u"finding view with cd=", type(s)
view = self.vc.findViewWithContentDescription(u'''{0}'''.format(s))
if view:
found = True
view.touch()
break
if not found:
# for previous APIs, let's find the text
for s in STATUS_BAR_SETTINGS_SETTINGS_BUTTON:
if DEBUG:
print >> sys.stderr, "s=", type(s)
try:
print >> sys.stderr, "finding view with text=", u'''{0}'''.format(s)
except:
pass
view = self.vc.findViewWithText(s)
if view:
found = True
view.touch()
break
if not found:
raise ViewNotFoundException("content-description", "'Settings' or text 'Settings'", "ROOT")
self.vc.sleep(1)
self.vc.dump(window=-1)
def changeLanguage(self, languageTo):
LANGUAGE_SETTINGS = {
"en": u"Language & input",
"af": u"Taal en invoer",
"am": u"ቋንቋ እና ግቤት",
"ar": u"اللغة والإدخال",
"az": u"Dil və daxiletmə",
"az-rAZ": u"Dil və daxiletmə",
"be": u"Мова і ўвод",
"bg": u"Език и въвеждане",
"ca": u"Idioma i introducció de text",
"cs": u"Jazyk a zadávání",
"da": u"Sprog og input",
"de": u"Sprache & Eingabe",
"el": u"Γλώσσα και εισαγωγή",
"en-rGB": u"Language & input",
"en-rIN": u"Language & input",
"es": u"Idioma e introducción de texto",
"es-rUS": u"Teclado e idioma",
"et": u"Keeled ja sisestamine",
"et-rEE": u"Keeled ja sisestamine",
"fa": u"زبان و ورود اطلاعات",
"fi": u"Kieli ja syöttötapa",
"fr": u"Langue et saisie",
"fr-rCA": u"Langue et saisie",
"hi": u"भाषा और अक्षर",
"hr": u"Jezik i ulaz",
"hu": u"Nyelv és bevitel",
"hy": u"Լեզվի & ներմուծում",
"hy-rAM": u"Լեզու և ներմուծում",
"in": u"Bahasa & masukan",
"it": u"Lingua e immissione",
"iw": u"שפה וקלט",
"ja": u"言語と入力",
"ka": u"ენისა და შეყვანის პარამეტრები",
"ka-rGE": u"ენისა და შეყვანის პარამეტრები",
"km": u"ភាសា & ការបញ្ចូល",
"km-rKH": u"ភាសា & ការបញ្ចូល",
"ko": u"언어 및 키보드",
"lo": u"ພາສາ & ການປ້ອນຂໍ້ມູນ",
"lo-rLA": u"ພາສາ & ການປ້ອນຂໍ້ມູນ",
"lt": u"Kalba ir įvestis",
"lv": u"Valodas ievade",
"mn": u"Хэл & оруулах",
"mn-rMN": u"Хэл & оруулах",
"ms": u"Bahasa & input",
"ms-rMY": u"Bahasa & input",
"nb": u"Språk og inndata",
"ne": u"भाषा र इनपुट",
"ne-rNP": u"भाषा र इनपुट",
"nl": u"Taal en invoer",
"pl": u"Język, klawiatura, głos",
"pt": u"Idioma e entrada",
"pt-rPT": u"Idioma e entrada",
"ro": u"Limbă și introducere de text",
"ru": u"Язык и ввод",
"si": u"භාෂාව සහ ආදානය",
"si-rLK": u"භාෂාව සහ ආදානය",
"sk": u"Jazyk & vstup",
"sl": u"Jezik in vnos",
"sr": u"Језик и унос",
"sv": u"Språk och inmatning",
"sw": u"Lugha, Kibodi na Sauti",
"th": u"ภาษาและการป้อนข้อมูล",
"tl": u"Wika at input",
"tr": u"Dil ve giriş",
"uk": u"Мова та введення",
"vi": u"Ngôn ngữ & phương thức nhập",
"zh-rCN": u"语言和输入法",
"zh-rHK": u"語言與輸入裝置",
"zh-rTW": u"語言與輸入設定",
"zu": u"Ulimi & ukufakwa",
}
PHONE_LANGUAGE = {
"en": u"Language",
"af": u"Taal",
"am": u"ቋንቋ",
"ar": u"اللغة",
"az": u"Dil",
"az-rAZ": u"Dil",
"be": u"Мова",
"bg": u"Език",
"ca": u"Idioma",
"cs": u"Jazyk",
"da": u"Sprog",
"de": u"Sprache",
"el": u"Γλώσσα",
"en-rGB": u"Language",
"en-rIN": u"Language",
"es": u"Idioma",
"es-rUS": u"Idioma",
"et": u"Keel",
"et-rEE": u"Keel",
"fa": u"زبان",
"fi": u"Kieli",
"fr": u"Langue",
"fr-rCA": u"Langue",
"hi": u"भाषा",
"hr": u"Jezik",
"hu": u"Nyelv",
"hy": u"Lեզուն",
"hy-rAM": u"Lեզուն",
"in": u"Bahasa",
"it": u"Lingua",
"iw": u"שפה",
"ja": u"言語",
"ka": u"ენა",
"ka-rGE": u"ენა",
"km": u"ភាសា",
"km-rKH": u"ភាសា",
"ko": u"언어",
"lo": u"ພາສາ",
"lo-rLA": u"ພາສາ",
"lt": u"Kalba",
"lv": u"Valoda",
"mn": u"Хэл",
"mn-rMN": u"Хэл",
"ms": u"Bahasa",
"ms-rMY": u"Bahasa",
"nb": u"Språk",
"ne": u"भाषा",
"nl": u"Taal",
"pl": u"Język",
"pt": u"Idioma",
"pt-rPT": u"Idioma",
"ro": u"Limba",
"ru": u"Язык",
"si": u"භාෂාව",
"si-rLK": u"භාෂාව",
"sk": u"Jazyk",
"sl": u"Jezik",
"sr": u"Језик",
"sv": u"Språk",
"sw": u"Lugha",
"th": u"ภาษา",
"tl": u"Wika",
"tr": u"Dil",
"uk": u"Мова",
"vi": u"Ngôn ngữ",
"zh-rCN": u"语言",
"zh-rHK": u"語言",
"zh-rTW": u"語言",
"zu": u"Ulimi",
}
LANGUAGES = {
"en": u"English (United States)",
"es-rUS": u"Español (Estados Unidos)",
"af": u"Afrikaans", # Afrikaans
"af-rNA": u"Afrikaans (Namibië)", # Afrikaans (Namibia)
"af-rZA": u"Afrikaans (Suid-Afrika)", # Afrikaans (South Africa)
"agq": u"Aghem", # Aghem
"agq-rCM": u"Aghem (Kàmàlûŋ)", # Aghem (Cameroon)
"ak": u"Akan", # Akan
"ak-rGH": u"Akan (Gaana)", # Akan (Ghana)
"am": u"አማርኛ", # Amharic
"am-rET": u"አማርኛ (ኢትዮጵያ)", # Amharic (Ethiopia)
"ar": u"العربية", # Arabic
"ar_001": u"العربية (العالم)", # Arabic (World)
"ar-rAE": u"العربية (الإمارات العربية المتحدة)", # Arabic (United Arab Emirates)
"ar-rBH": u"العربية (البحرين)", # Arabic (Bahrain)
"ar-rDJ": u"العربية (جيبوتي)", # Arabic (Djibouti)
"ar-rDZ": u"العربية (الجزائر)", # Arabic (Algeria)
"ar-rEG": u"العربية (مصر)", # Arabic (Egypt)
"ar-rEH": u"العربية (الصحراء الغربية)", # Arabic (Western Sahara)
"ar-rER": u"العربية (أريتريا)", # Arabic (Eritrea)
"ar-rIL": u"العربية (إسرائيل)", # Arabic (Israel)
"ar-rIQ": u"العربية (العراق)", # Arabic (Iraq)
"ar-rJO": u"العربية (الأردن)", # Arabic (Jordan)
"ar-rKM": u"العربية (جزر القمر)", # Arabic (Comoros)
"ar-rKW": u"العربية (الكويت)", # Arabic (Kuwait)
"ar-rLB": u"العربية (لبنان)", # Arabic (Lebanon)
"ar-rLY": u"العربية (ليبيا)", # Arabic (Libya)
"ar-rMA": u"العربية (المغرب)", # Arabic (Morocco)
"ar-rMR": u"العربية (موريتانيا)", # Arabic (Mauritania)
"ar-rOM": u"العربية (عُمان)", # Arabic (Oman)
"ar-rPS": u"العربية (فلسطين)", # Arabic (Palestine)
"ar-rQA": u"العربية (قطر)", # Arabic (Qatar)
"ar-rSA": u"العربية (المملكة العربية السعودية)", # Arabic (Saudi Arabia)
"ar-rSD": u"العربية (السودان)", # Arabic (Sudan)
"ar-rSO": u"العربية (الصومال)", # Arabic (Somalia)
"ar-rSY": u"العربية (سوريا)", # Arabic (Syria)
"ar-rTD": u"العربية (تشاد)", # Arabic (Chad)
"ar-rTN": u"العربية (تونس)", # Arabic (Tunisia)
"ar-rYE": u"العربية (اليمن)", # Arabic (Yemen)
"as": u"অসমীয়া", # Assamese
"as-rIN": u"অসমীয়া (ভাৰত)", # Assamese (India)
"asa": u"Kipare", # Asu
"asa-rTZ": u"Kipare (Tadhania)", # Asu (Tanzania)
"az": u"Azərbaycanca", # Azerbaijani
"az-rCYRL": u"Азәрбајҹан (CYRL)", # Azerbaijani (CYRL)
"az-rCYRL_AZ": u"Азәрбајҹан (Азәрбајҹан,AZ)", # Azerbaijani (Azerbaijan,AZ)
"az-rLATN": u"Azərbaycanca (LATN)", # Azerbaijani (LATN)
"az-rLATN_AZ": u"Azərbaycanca (Azərbaycan,AZ)", # Azerbaijani (Azerbaijan,AZ)
"bas": u"Ɓàsàa", # Basaa
"bas-rCM": u"Ɓàsàa (Kàmɛ̀rûn)", # Basaa (Cameroon)
"be": u"беларуская", # Belarusian
"be-rBY": u"беларуская (Беларусь)", # Belarusian (Belarus)
"bem": u"Ichibemba", # Bemba
"bem-rZM": u"Ichibemba (Zambia)", # Bemba (Zambia)
"bez": u"Hibena", # Bena
"bez-rTZ": u"Hibena (Hutanzania)", # Bena (Tanzania)
"bg": u"български", # Bulgarian
"bg-rBG": u"български (България)", # Bulgarian (Bulgaria)
"bm": u"Bamanakan", # Bambara
"bm-rML": u"Bamanakan (Mali)", # Bambara (Mali)
"bn": u"বাংলা", # Bengali
"bn-rBD": u"বাংলা (বাংলাদেশ)", # Bengali (Bangladesh)
"bn-rIN": u"বাংলা (ভারত)", # Bengali (India)
"bo": u"པོད་སྐད་", # Tibetan
"bo-rCN": u"པོད་སྐད་ (རྒྱ་ནག)", # Tibetan (China)
"bo-rIN": u"པོད་སྐད་ (རྒྱ་གར་)", # Tibetan (India)
"br": u"Brezhoneg", # Breton
"br-rFR": u"Brezhoneg (Frañs)", # Breton (France)
"brx": u"बड़ो", # Bodo
"brx-rIN": u"बड़ो (भारत)", # Bodo (India)
"bs": u"Bosanski", # Bosnian
"bs-rCYRL": u"босански (CYRL)", # Bosnian (CYRL)
"bs-rCYRL_BA": u"босански (Босна и Херцеговина,BA)", # Bosnian (Bosnia and Herzegovina,BA)
"bs-rLATN": u"Bosanski (LATN)", # Bosnian (LATN)
"bs-rLATN_BA": u"Bosanski (Bosna i Hercegovina,BA)", # Bosnian (Bosnia and Herzegovina,BA)
"ca": u"Català", # Catalan
"ca-rAD": u"Català (Andorra)", # Catalan (Andorra)
"ca-rES": u"Català (Espanya)", # Catalan (Spain)
"cgg": u"Rukiga", # Chiga
"cgg-rUG": u"Rukiga (Uganda)", # Chiga (Uganda)
"chr": u"ᏣᎳᎩ", # Cherokee
"chr-rUS": u"ᏣᎳᎩ (ᎠᎹᏰᏟ)", # Cherokee (United States)
"cs": u"čeština", # Czech
"cs-rCZ": u"čeština (Česká republika)", # Czech (Czech Republic)
"cy": u"Cymraeg", # Welsh
"cy-rGB": u"Cymraeg (y Deyrnas Unedig)", # Welsh (United Kingdom)
"da": u"Dansk", # Danish
"da-rDK": u"Dansk (Danmark)", # Danish (Denmark)
"dav": u"Kitaita", # Taita
"dav-rKE": u"Kitaita (Kenya)", # Taita (Kenya)
"de": u"Deutsch", # German
"de-rAT": u"Deutsch (Österreich)", # German (Austria)
"de-rBE": u"Deutsch (Belgien)", # German (Belgium)
"de-rCH": u"Deutsch (Schweiz)", # German (Switzerland)
"de-rDE": u"Deutsch (Deutschland)", # German (Germany)
"de-rLI": u"Deutsch (Liechtenstein)", # German (Liechtenstein)
"de-rLU": u"Deutsch (Luxemburg)", # German (Luxembourg)
"dje": u"Zarmaciine", # Zarma
"dje-rNE": u"Zarmaciine (Nižer)", # Zarma (Niger)
"dua": u"Duálá", # Duala
"dua-rCM": u"Duálá (Cameroun)", # Duala (Cameroon)
"dyo": u"Joola", # Jola-Fonyi
"dyo-rSN": u"Joola (Senegal)", # Jola-Fonyi (Senegal)
"dz": u"རྫོང་ཁ", # Dzongkha
"dz-rBT": u"རྫོང་ཁ (འབྲུག)", # Dzongkha (Bhutan)
"ebu": u"Kĩembu", # Embu
"ebu-rKE": u"Kĩembu (Kenya)", # Embu (Kenya)
"ee": u"Eʋegbe", # Ewe
"ee-rGH": u"Eʋegbe (Ghana nutome)", # Ewe (Ghana)
"ee-rTG": u"Eʋegbe (Togo nutome)", # Ewe (Togo)
"el": u"Ελληνικά", # Greek
"el-rCY": u"Ελληνικά (Κύπρος)", # Greek (Cyprus)
"el-rGR": u"Ελληνικά (Ελλάδα)", # Greek (Greece)
"en": u"English", # English
"en_150": u"English (Europe)", # English (Europe)
"en-rAG": u"English (Antigua and Barbuda)", # English (Antigua and Barbuda)
"en-rAS": u"English (American Samoa)", # English (American Samoa)
"en-rAU": u"English (Australia)", # English (Australia)
"en-rBB": u"English (Barbados)", # English (Barbados)
"en-rBE": u"English (Belgium)", # English (Belgium)
"en-rBM": u"English (Bermuda)", # English (Bermuda)
"en-rBS": u"English (Bahamas)", # English (Bahamas)
"en-rBW": u"English (Botswana)", # English (Botswana)
"en-rBZ": u"English (Belize)", # English (Belize)
"en-rCA": u"English (Canada)", # English (Canada)
"en-rCM": u"English (Cameroon)", # English (Cameroon)
"en-rDM": u"English (Dominica)", # English (Dominica)
"en-rFJ": u"English (Fiji)", # English (Fiji)
"en-rFM": u"English (Micronesia)", # English (Micronesia)
"en-rGB": u"English (United Kingdom)", # English (United Kingdom)
"en-rGD": u"English (Grenada)", # English (Grenada)
"en-rGG": u"English (Guernsey)", # English (Guernsey)
"en-rGH": u"English (Ghana)", # English (Ghana)
"en-rGI": u"English (Gibraltar)", # English (Gibraltar)
"en-rGM": u"English (Gambia)", # English (Gambia)
"en-rGU": u"English (Guam)", # English (Guam)
"en-rGY": u"English (Guyana)", # English (Guyana)
"en-rHK": u"English (Hong Kong)", # English (Hong Kong)
"en-rIE": u"English (Ireland)", # English (Ireland)
"en-rIM": u"English (Isle of Man)", # English (Isle of Man)
"en-rIN": u"English (India)", # English (India)
"en-rJE": u"English (Jersey)", # English (Jersey)
"en-rJM": u"English (Jamaica)", # English (Jamaica)
"en-rKE": u"English (Kenya)", # English (Kenya)
"en-rKI": u"English (Kiribati)", # English (Kiribati)
"en-rKN": u"English (Saint Kitts and Nevis)", # English (Saint Kitts and Nevis)
"en-rKY": u"English (Cayman Islands)", # English (Cayman Islands)
"en-rLC": u"English (Saint Lucia)", # English (Saint Lucia)
"en-rLR": u"English (Liberia)", # English (Liberia)
"en-rLS": u"English (Lesotho)", # English (Lesotho)
"en-rMG": u"English (Madagascar)", # English (Madagascar)
"en-rMH": u"English (Marshall Islands)", # English (Marshall Islands)
"en-rMP": u"English (Northern Mariana Islands)", # English (Northern Mariana Islands)
"en-rMT": u"English (Malta)", # English (Malta)
"en-rMU": u"English (Mauritius)", # English (Mauritius)
"en-rMW": u"English (Malawi)", # English (Malawi)
"en-rNA": u"English (Namibia)", # English (Namibia)
"en-rNG": u"English (Nigeria)", # English (Nigeria)
"en-rNZ": u"English (New Zealand)", # English (New Zealand)
"en-rPG": u"English (Papua New Guinea)", # English (Papua New Guinea)
"en-rPH": u"English (Philippines)", # English (Philippines)
"en-rPK": u"English (Pakistan)", # English (Pakistan)
"en-rPR": u"English (Puerto Rico)", # English (Puerto Rico)
"en-rPW": u"English (Palau)", # English (Palau)
"en-rSB": u"English (Solomon Islands)", # English (Solomon Islands)
"en-rSC": u"English (Seychelles)", # English (Seychelles)
"en-rSG": u"English (Singapore)", # English (Singapore)
"en-rSL": u"English (Sierra Leone)", # English (Sierra Leone)
"en-rSS": u"English (South Sudan)", # English (South Sudan)
"en-rSZ": u"English (Swaziland)", # English (Swaziland)
"en-rTC": u"English (Turks and Caicos Islands)", # English (Turks and Caicos Islands)
"en-rTO": u"English (Tonga)", # English (Tonga)
"en-rTT": u"English (Trinidad and Tobago)", # English (Trinidad and Tobago)
"en-rTZ": u"English (Tanzania)", # English (Tanzania)
"en-rUG": u"English (Uganda)", # English (Uganda)
"en-rUM": u"English (U.S. Outlying Islands)", # English (U.S. Outlying Islands)
"en-rUS": u"English (United States)", # English (United States)
"en-rUS_POSIX": u"English (United States,Computer)", # English (United States,Computer)
"en-rVC": u"English (Saint Vincent and the Grenadines)", # English (Saint Vincent and the Grenadines)
"en-rVG": u"English (British Virgin Islands)", # English (British Virgin Islands)
"en-rVI": u"English (U.S. Virgin Islands)", # English (U.S. Virgin Islands)
"en-rVU": u"English (Vanuatu)", # English (Vanuatu)
"en-rWS": u"English (Samoa)", # English (Samoa)
"en-rZA": u"English (South Africa)", # English (South Africa)
"en-rZM": u"English (Zambia)", # English (Zambia)
"en-rZW": u"English (Zimbabwe)", # English (Zimbabwe)
"eo": u"Esperanto", # Esperanto
"es": u"Español", # Spanish
"es_419": u"Español (Latinoamérica)", # Spanish (Latin America)
"es-rAR": u"Español (Argentina)", # Spanish (Argentina)
"es-rBO": u"Español (Bolivia)", # Spanish (Bolivia)
"es-rCL": u"Español (Chile)", # Spanish (Chile)
"es-rCO": u"Español (Colombia)", # Spanish (Colombia)
"es-rCR": u"Español (Costa Rica)", # Spanish (Costa Rica)
"es-rCU": u"Español (Cuba)", # Spanish (Cuba)
"es-rDO": u"Español (República Dominicana)", # Spanish (Dominican Republic)
"es-rEA": u"Español (Ceuta y Melilla)", # Spanish (Ceuta and Melilla)
"es-rEC": u"Español (Ecuador)", # Spanish (Ecuador)
"es-rES": u"Español (España)", # Spanish (Spain)
"es-rGQ": u"Español (Guinea Ecuatorial)", # Spanish (Equatorial Guinea)
"es-rGT": u"Español (Guatemala)", # Spanish (Guatemala)
"es-rHN": u"Español (Honduras)", # Spanish (Honduras)
"es-rIC": u"Español (Islas Canarias)", # Spanish (Canary Islands)
"es-rMX": u"Español (México)", # Spanish (Mexico)
"es-rNI": u"Español (Nicaragua)", # Spanish (Nicaragua)
"es-rPA": u"Español (Panamá)", # Spanish (Panama)
"es-rPE": u"Español (Perú)", # Spanish (Peru)
"es-rPH": u"Español (Filipinas)", # Spanish (Philippines)
"es-rPR": u"Español (Puerto Rico)", # Spanish (Puerto Rico)
"es-rPY": u"Español (Paraguay)", # Spanish (Paraguay)
"es-rSV": u"Español (El Salvador)", # Spanish (El Salvador)
"es-rUS": u"Español (Estados Unidos)", # Spanish (United States)
"es-rUY": u"Español (Uruguay)", # Spanish (Uruguay)
"es-rVE": u"Español (Venezuela)", # Spanish (Venezuela)
"et": u"Eesti", # Estonian
"et-rEE": u"Eesti (Eesti)", # Estonian (Estonia)
"eu": u"Euskara", # Basque
"eu-rES": u"Euskara (Espainia)", # Basque (Spain)
"ewo": u"Ewondo", # Ewondo
"ewo-rCM": u"Ewondo (Kamərún)", # Ewondo (Cameroon)
"fa": u"فارسی", # Persian
"fa-rAF": u"دری (افغانستان)", # Persian (Afghanistan)
"fa-rIR": u"فارسی (ایران)", # Persian (Iran)
"ff": u"Pulaar", # Fulah
"ff-rSN": u"Pulaar (Senegaal)", # Fulah (Senegal)
"fi": u"Suomi", # Finnish
"fi-rFI": u"Suomi (Suomi)", # Finnish (Finland)
"fil": u"Filipino", # Filipino
"fil-rPH": u"Filipino (Pilipinas)", # Filipino (Philippines)
"fo": u"Føroyskt", # Faroese
"fo-rFO": u"Føroyskt (Føroyar)", # Faroese (Faroe Islands)
"fr": u"Français", # French
"fr-rBE": u"Français (Belgique)", # French (Belgium)
"fr-rBF": u"Français (Burkina Faso)", # French (Burkina Faso)
"fr-rBI": u"Français (Burundi)", # French (Burundi)
"fr-rBJ": u"Français (Bénin)", # French (Benin)
"fr-rBL": u"Français (Saint-Barthélémy)", # French (Saint Barthélemy)
"fr-rCA": u"Français (Canada)", # French (Canada)
"fr-rCD": u"Français (République démocratique du Congo)", # French (Congo [DRC])
"fr-rCF": u"Français (République centrafricaine)", # French (Central African Republic)
"fr-rCG": u"Français (Congo-Brazzaville)", # French (Congo [Republic])
"fr-rCH": u"Français (Suisse)", # French (Switzerland)
"fr-rCI": u"Français (Côte d’Ivoire)", # French (Côte d’Ivoire)
"fr-rCM": u"Français (Cameroun)", # French (Cameroon)
"fr-rDJ": u"Français (Djibouti)", # French (Djibouti)
"fr-rDZ": u"Français (Algérie)", # French (Algeria)
"fr-rFR": u"Français (France)", # French (France)
"fr-rGA": u"Français (Gabon)", # French (Gabon)
"fr-rGF": u"Français (Guyane française)", # French (French Guiana)
"fr-rGN": u"Français (Guinée)", # French (Guinea)
"fr-rGP": u"Français (Guadeloupe)", # French (Guadeloupe)
"fr-rGQ": u"Français (Guinée équatoriale)", # French (Equatorial Guinea)
"fr-rHT": u"Français (Haïti)", # French (Haiti)
"fr-rKM": u"Français (Comores)", # French (Comoros)
"fr-rLU": u"Français (Luxembourg)", # French (Luxembourg)
"fr-rMA": u"Français (Maroc)", # French (Morocco)
"fr-rMC": u"Français (Monaco)", # French (Monaco)
"fr-rMF": u"Français (Saint-Martin [partie française])", # French (Saint Martin)
"fr-rMG": u"Français (Madagascar)", # French (Madagascar)
"fr-rML": u"Français (Mali)", # French (Mali)
"fr-rMQ": u"Français (Martinique)", # French (Martinique)
"fr-rMR": u"Français (Mauritanie)", # French (Mauritania)
"fr-rMU": u"Français (Maurice)", # French (Mauritius)
"fr-rNC": u"Français (Nouvelle-Calédonie)", # French (New Caledonia)
"fr-rNE": u"Français (Niger)", # French (Niger)
"fr-rPF": u"Français (Polynésie française)", # French (French Polynesia)
"fr-rRE": u"Français (Réunion)", # French (Réunion)
"fr-rRW": u"Français (Rwanda)", # French (Rwanda)
"fr-rSC": u"Français (Seychelles)", # French (Seychelles)
"fr-rSN": u"Français (Sénégal)", # French (Senegal)
"fr-rSY": u"Français (Syrie)", # French (Syria)
"fr-rTD": u"Français (Tchad)", # French (Chad)
"fr-rTG": u"Français (Togo)", # French (Togo)
"fr-rTN": u"Français (Tunisie)", # French (Tunisia)
"fr-rVU": u"Français (Vanuatu)", # French (Vanuatu)
"fr-rYT": u"Français (Mayotte)", # French (Mayotte)
"ga": u"Gaeilge", # Irish
"ga-rIE": u"Gaeilge (Éire)", # Irish (Ireland)
"gl": u"Galego", # Galician
"gl-rES": u"Galego (España)", # Galician (Spain)
"gsw": u"Schwiizertüütsch", # Swiss German
"gsw-rCH": u"Schwiizertüütsch (Schwiiz)", # Swiss German (Switzerland)
"gu": u"ગુજરાતી", # Gujarati
"gu-rIN": u"ગુજરાતી (ભારત)", # Gujarati (India)
"guz": u"Ekegusii", # Gusii
"guz-rKE": u"Ekegusii (Kenya)", # Gusii (Kenya)
"gv": u"Gaelg", # Manx
"gv-rGB": u"Gaelg (Rywvaneth Unys)", # Manx (United Kingdom)
"ha": u"Hausa", # Hausa
"ha-rLATN": u"Hausa (LATN)", # Hausa (LATN)
"ha-rLATN_GH": u"Hausa (Gana,GH)", # Hausa (Ghana,GH)
"ha-rLATN_NE": u"Hausa (Nijar,NE)", # Hausa (Niger,NE)
"ha-rLATN_NG": u"Hausa (Najeriya,NG)", # Hausa (Nigeria,NG)
"haw": u"ʻŌlelo Hawaiʻi", # Hawaiian
"haw-rUS": u"ʻŌlelo Hawaiʻi (ʻAmelika Hui Pū ʻIa)", # Hawaiian (United States)
"iw": u"עברית", # Hebrew
"iw-rIL": u"עברית (ישראל)", # Hebrew (Israel)
"hi": u"हिन्दी", # Hindi
"hi-rIN": u"हिन्दी (भारत)", # Hindi (India)
"hr": u"Hrvatski", # Croatian
"hr-rBA": u"Hrvatski (Bosna i Hercegovina)", # Croatian (Bosnia and Herzegovina)
"hr-rHR": u"Hrvatski (Hrvatska)", # Croatian (Croatia)
"hu": u"Magyar", # Hungarian
"hu-rHU": u"Magyar (Magyarország)", # Hungarian (Hungary)
"hy": u"հայերեն", # Armenian
"hy-rAM": u"հայերեն (Հայաստան)", # Armenian (Armenia)
"in": u"Bahasa Indonesia", # Indonesian
"in-rID": u"Bahasa Indonesia (Indonesia)", # Indonesian (Indonesia)
"ig": u"Igbo", # Igbo
"ig-rNG": u"Igbo (Nigeria)", # Igbo (Nigeria)
"ii": u"ꆈꌠꉙ", # Sichuan Yi
"ii-rCN": u"ꆈꌠꉙ (ꍏꇩ)", # Sichuan Yi (China)
"is": u"íslenska", # Icelandic
"is-rIS": u"íslenska (Ísland)", # Icelandic (Iceland)
"it": u"Italiano", # Italian
"it-rCH": u"Italiano (Svizzera)", # Italian (Switzerland)
"it-rIT": u"Italiano (Italia)", # Italian (Italy)
"it-rSM": u"Italiano (San Marino)", # Italian (San Marino)
"ja": u"日本語", # Japanese
"ja-rJP": u"日本語 (日本)", # Japanese (Japan)
"jgo": u"Ndaꞌa", # Ngomba
"jgo-rCM": u"Ndaꞌa (Kamɛlûn)", # Ngomba (Cameroon)
"jmc": u"Kimachame", # Machame
"jmc-rTZ": u"Kimachame (Tanzania)", # Machame (Tanzania)
"ka": u"ქართული", # Georgian
"ka-rGE": u"ქართული (საქართველო)", # Georgian (Georgia)
"kab": u"Taqbaylit", # Kabyle
"kab-rDZ": u"Taqbaylit (Lezzayer)", # Kabyle (Algeria)
"kam": u"Kikamba", # Kamba
"kam-rKE": u"Kikamba (Kenya)", # Kamba (Kenya)
"kde": u"Chimakonde", # Makonde
"kde-rTZ": u"Chimakonde (Tanzania)", # Makonde (Tanzania)
"kea": u"Kabuverdianu", # Kabuverdianu
"kea-rCV": u"Kabuverdianu (Kabu Verdi)", # Kabuverdianu (Cape Verde)
"khq": u"Koyra ciini", # Koyra Chiini
"khq-rML": u"Koyra ciini (Maali)", # Koyra Chiini (Mali)
"ki": u"Gikuyu", # Kikuyu
"ki-rKE": u"Gikuyu (Kenya)", # Kikuyu (Kenya)
"kk": u"қазақ тілі", # Kazakh
"kk-rCYRL": u"қазақ тілі (CYRL)", # Kazakh (CYRL)
"kk-rCYRL_KZ": u"қазақ тілі (Қазақстан,KZ)", # Kazakh (Kazakhstan,KZ)
"kl": u"Kalaallisut", # Kalaallisut
"kl-rGL": u"Kalaallisut (Kalaallit Nunaat)", # Kalaallisut (Greenland)
"kln": u"Kalenjin", # Kalenjin
"kln-rKE": u"Kalenjin (Emetab Kenya)", # Kalenjin (Kenya)
"km": u"ខ្មែរ", # Khmer
"km-rKH": u"ខ្មែរ (កម្ពុជា)", # Khmer (Cambodia)
"kn": u"ಕನ್ನಡ", # Kannada
"kn-rIN": u"ಕನ್ನಡ (ಭಾರತ)", # Kannada (India)
"ko": u"한국어", # Korean
"ko-rKP": u"한국어 (조선 민주주의 인민 공화국)", # Korean (North Korea)
"ko-rKR": u"한국어 (대한민국)", # Korean (South Korea)
"kok": u"कोंकणी", # Konkani
"kok-rIN": u"कोंकणी (भारत)", # Konkani (India)
"ks": u"کٲشُر", # Kashmiri
"ks-rARAB": u"کٲشُر (ARAB)", # Kashmiri (ARAB)
"ks-rARAB_IN": u"کٲشُر (ہِنٛدوستان,IN)", # Kashmiri (India,IN)
"ksb": u"Kishambaa", # Shambala
"ksb-rTZ": u"Kishambaa (Tanzania)", # Shambala (Tanzania)
"ksf": u"Rikpa", # Bafia
"ksf-rCM": u"Rikpa (kamɛrún)", # Bafia (Cameroon)
"kw": u"Kernewek", # Cornish
"kw-rGB": u"Kernewek (Rywvaneth Unys)", # Cornish (United Kingdom)
"lag": u"Kɨlaangi", # Langi
"lag-rTZ": u"Kɨlaangi (Taansanía)", # Langi (Tanzania)
"lg": u"Luganda", # Ganda
"lg-rUG": u"Luganda (Yuganda)", # Ganda (Uganda)
"ln": u"Lingála", # Lingala
"ln-rAO": u"Lingála (Angóla)", # Lingala (Angola)
"ln-rCD": u"Lingála (Repibiki demokratiki ya Kongó)", # Lingala (Congo [DRC])
"ln-rCF": u"Lingála (Repibiki ya Afríka ya Káti)", # Lingala (Central African Republic)
"ln-rCG": u"Lingála (Kongo)", # Lingala (Congo [Republic])
"lo": u"ລາວ", # Lao
"lo-rLA": u"ລາວ (ສ.ປ.ປ ລາວ)", # Lao (Laos)
"lt": u"Lietuvių", # Lithuanian
"lt-rLT": u"Lietuvių (Lietuva)", # Lithuanian (Lithuania)
"lu": u"Tshiluba", # Luba-Katanga
"lu-rCD": u"Tshiluba (Ditunga wa Kongu)", # Luba-Katanga (Congo [DRC])
"luo": u"Dholuo", # Luo
"luo-rKE": u"Dholuo (Kenya)", # Luo (Kenya)
"luy": u"Luluhia", # Luyia
"luy-rKE": u"Luluhia (Kenya)", # Luyia (Kenya)
"lv": u"Latviešu", # Latvian
"lv-rLV": u"Latviešu (Latvija)", # Latvian (Latvia)
"mas": u"Maa", # Masai
"mas-rKE": u"Maa (Kenya)", # Masai (Kenya)
"mas-rTZ": u"Maa (Tansania)", # Masai (Tanzania)
"mer": u"Kĩmĩrũ", # Meru
"mer-rKE": u"Kĩmĩrũ (Kenya)", # Meru (Kenya)
"mfe": u"Kreol morisien", # Morisyen
"mfe-rMU": u"Kreol morisien (Moris)", # Morisyen (Mauritius)
"mg": u"Malagasy", # Malagasy
"mg-rMG": u"Malagasy (Madagasikara)", # Malagasy (Madagascar)
"mgh": u"Makua", # Makhuwa-Meetto
"mgh-rMZ": u"Makua (Umozambiki)", # Makhuwa-Meetto (Mozambique)
"mgo": u"Metaʼ", # Meta'
"mgo-rCM": u"Metaʼ (Kamalun)", # Meta' (Cameroon)
"mk": u"македонски", # Macedonian
"mk-rMK": u"македонски (Македонија)", # Macedonian (Macedonia [FYROM])
"ml": u"മലയാളം", # Malayalam
"ml-rIN": u"മലയാളം (ഇന്ത്യ)", # Malayalam (India)
"mn": u"монгол", # Mongolian
"mn-rCYRL": u"монгол (CYRL)", # Mongolian (CYRL)
"mn-rCYRL_MN": u"монгол (Монгол,MN)", # Mongolian (Mongolia,MN)
"mr": u"मराठी", # Marathi
"mr-rIN": u"मराठी (भारत)", # Marathi (India)
"ms": u"Bahasa Melayu", # Malay
"ms-rLATN": u"Bahasa Melayu (LATN)", # Malay (LATN)
"ms-rLATN_BN": u"Bahasa Melayu (Brunei,BN)", # Malay (Brunei,BN)
"ms-rLATN_MY": u"Bahasa Melayu (Malaysia,MY)", # Malay (Malaysia,MY)
"ms-rLATN_SG": u"Bahasa Melayu (Singapura,SG)", # Malay (Singapore,SG)
"mt": u"Malti", # Maltese
"mt-rMT": u"Malti (Malta)", # Maltese (Malta)
"mua": u"MUNDAŊ", # Mundang
"mua-rCM": u"MUNDAŊ (kameruŋ)", # Mundang (Cameroon)
"my": u"ဗမာ", # Burmese
"my-rMM": u"ဗမာ (မြန်မာ)", # Burmese (Myanmar [Burma])
"naq": u"Khoekhoegowab", # Nama
"naq-rNA": u"Khoekhoegowab (Namibiab)", # Nama (Namibia)
"nb": u"Norsk bokmål", # Norwegian Bokmål
"nb-rNO": u"Norsk bokmål (Norge)", # Norwegian Bokmål (Norway)
"nd": u"IsiNdebele", # North Ndebele
"nd-rZW": u"IsiNdebele (Zimbabwe)", # North Ndebele (Zimbabwe)
"ne": u"नेपाली", # Nepali
"ne-rIN": u"नेपाली (भारत)", # Nepali (India)
"ne-rNP": u"नेपाली (नेपाल)", # Nepali (Nepal)
"nl": u"Nederlands", # Dutch
"nl-rAW": u"Nederlands (Aruba)", # Dutch (Aruba)
"nl-rBE": u"Nederlands (België)", # Dutch (Belgium)
"nl-rCW": u"Nederlands (Curaçao)", # Dutch (Curaçao)
"nl-rNL": u"Nederlands (Nederland)", # Dutch (Netherlands)
"nl-rSR": u"Nederlands (Suriname)", # Dutch (Suriname)
"nl-rSX": u"Nederlands (Sint-Maarten)", # Dutch (Sint Maarten)
"nmg": u"Nmg", # Kwasio
"nmg-rCM": u"Nmg (Kamerun)", # Kwasio (Cameroon)
"nn": u"Nynorsk", # Norwegian Nynorsk
"nn-rNO": u"Nynorsk (Noreg)", # Norwegian Nynorsk (Norway)
"nus": u"Thok Nath", # Nuer
"nus-rSD": u"Thok Nath (Sudan)", # Nuer (Sudan)
"nyn": u"Runyankore", # Nyankole
"nyn-rUG": u"Runyankore (Uganda)", # Nyankole (Uganda)
"om": u"Oromoo", # Oromo
"om-rET": u"Oromoo (Itoophiyaa)", # Oromo (Ethiopia)
"om-rKE": u"Oromoo (Keeniyaa)", # Oromo (Kenya)
"or": u"ଓଡ଼ିଆ", # Oriya
"or-rIN": u"ଓଡ଼ିଆ (ଭାରତ)", # Oriya (India)
"pa": u"ਪੰਜਾਬੀ", # Punjabi
"pa-rARAB": u"پنجاب (ARAB)", # Punjabi (ARAB)
"pa-rARAB_PK": u"پنجاب (پکستان,PK)", # Punjabi (Pakistan,PK)
"pa-rGURU": u"ਪੰਜਾਬੀ (GURU)", # Punjabi (GURU)
"pa-rGURU_IN": u"ਪੰਜਾਬੀ (ਭਾਰਤ,IN)", # Punjabi (India,IN)
"pl": u"Polski", # Polish
"pl-rPL": u"Polski (Polska)", # Polish (Poland)
"ps": u"پښتو", # Pashto
"ps-rAF": u"پښتو (افغانستان)", # Pashto (Afghanistan)
"pt": u"Português", # Portuguese
"pt-rAO": u"Português (Angola)", # Portuguese (Angola)
"pt-rBR": u"Português (Brasil)", # Portuguese (Brazil)
"pt-rCV": u"Português (Cabo Verde)", # Portuguese (Cape Verde)
"pt-rGW": u"Português (Guiné Bissau)", # Portuguese (Guinea-Bissau)
"pt-rMO": u"Português (Macau)", # Portuguese (Macau)
"pt-rMZ": u"Português (Moçambique)", # Portuguese (Mozambique)
"pt-rPT": u"Português (Portugal)", # Portuguese (Portugal)
"pt-rST": u"Português (São Tomé e Príncipe)", # Portuguese (São Tomé and Príncipe)
"pt-rTL": u"Português (Timor-Leste)", # Portuguese (Timor-Leste)
"rm": u"Rumantsch", # Romansh
"rm-rCH": u"Rumantsch (Svizra)", # Romansh (Switzerland)
"rn": u"Ikirundi", # Rundi
"rn-rBI": u"Ikirundi (Uburundi)", # Rundi (Burundi)
"ro": u"Română", # Romanian
"ro-rMD": u"Română (Republica Moldova)", # Romanian (Moldova)
"ro-rRO": u"Română (România)", # Romanian (Romania)
"rof": u"Kihorombo", # Rombo
"rof-rTZ": u"Kihorombo (Tanzania)", # Rombo (Tanzania)
"ru": u"русский", # Russian
"ru-rBY": u"русский (Беларусь)", # Russian (Belarus)
"ru-rKG": u"русский (Киргизия)", # Russian (Kyrgyzstan)
"ru-rKZ": u"русский (Казахстан)", # Russian (Kazakhstan)
"ru-rMD": u"русский (Молдова)", # Russian (Moldova)
"ru-rRU": u"русский (Россия)", # Russian (Russia)
"ru-rUA": u"русский (Украина)", # Russian (Ukraine)
"rw": u"Kinyarwanda", # Kinyarwanda
"rw-rRW": u"Kinyarwanda (Rwanda)", # Kinyarwanda (Rwanda)
"rwk": u"Kiruwa", # Rwa
"rwk-rTZ": u"Kiruwa (Tanzania)", # Rwa (Tanzania)
"saq": u"Kisampur", # Samburu
"saq-rKE": u"Kisampur (Kenya)", # Samburu (Kenya)
"sbp": u"Ishisangu", # Sangu
"sbp-rTZ": u"Ishisangu (Tansaniya)", # Sangu (Tanzania)
"seh": u"Sena", # Sena
"seh-rMZ": u"Sena (Moçambique)", # Sena (Mozambique)
"ses": u"Koyraboro senni", # Koyraboro Senni
"ses-rML": u"Koyraboro senni (Maali)", # Koyraboro Senni (Mali)
"sg": u"Sängö", # Sango
"sg-rCF": u"Sängö (Ködörösêse tî Bêafrîka)", # Sango (Central African Republic)
"shi": u"ⵜⴰⵎⴰⵣⵉⵖⵜ", # Tachelhit
"shi-rLATN": u"Tamazight (LATN)", # Tachelhit (LATN)
"shi-rLATN_MA": u"Tamazight (lmɣrib,MA)", # Tachelhit (Morocco,MA)
"shi-rTFNG": u"ⵜⴰⵎⴰⵣⵉⵖⵜ (TFNG)", # Tachelhit (TFNG)
"shi-rTFNG_MA": u"ⵜⴰⵎⴰⵣⵉⵖⵜ (ⵍⵎⵖⵔⵉⴱ,MA)", # Tachelhit (Morocco,MA)
"si": u"සිංහල", # Sinhala
"si-rLK": u"සිංහල (ශ්රී ලංකාව)", # Sinhala (Sri Lanka)
"sk": u"Slovenčina", # Slovak
"sk-rSK": u"Slovenčina (Slovensko)", # Slovak (Slovakia)
"sl": u"Slovenščina", # Slovenian
"sl-rSI": u"Slovenščina (Slovenija)", # Slovenian (Slovenia)
"sn": u"ChiShona", # Shona
"sn-rZW": u"ChiShona (Zimbabwe)", # Shona (Zimbabwe)
"so": u"Soomaali", # Somali
"so-rDJ": u"Soomaali (Jabuuti)", # Somali (Djibouti)
"so-rET": u"Soomaali (Itoobiya)", # Somali (Ethiopia)
"so-rKE": u"Soomaali (Kiiniya)", # Somali (Kenya)
"so-rSO": u"Soomaali (Soomaaliya)", # Somali (Somalia)
"sq": u"Shqip", # Albanian
"sq-rAL": u"Shqip (Shqipëria)", # Albanian (Albania)
"sq-rMK": u"Shqip (Maqedoni)", # Albanian (Macedonia [FYROM])
"sr": u"Српски", # Serbian
"sr-rCYRL": u"Српски (CYRL)", # Serbian (CYRL)
"sr-rCYRL_BA": u"Српски (Босна и Херцеговина,BA)", # Serbian (Bosnia and Herzegovina,BA)
"sr-rCYRL_ME": u"Српски (Црна Гора,ME)", # Serbian (Montenegro,ME)
"sr-rCYRL_RS": u"Српски (Србија,RS)", # Serbian (Serbia,RS)
"sr-rLATN": u"Srpski (LATN)", # Serbian (LATN)
"sr-rLATN_BA": u"Srpski (Bosna i Hercegovina,BA)", # Serbian (Bosnia and Herzegovina,BA)
"sr-rLATN_ME": u"Srpski (Crna Gora,ME)", # Serbian (Montenegro,ME)
"sr-rLATN_RS": u"Srpski (Srbija,RS)", # Serbian (Serbia,RS)
"sv": u"Svenska", # Swedish
"sv-rAX": u"Svenska (Åland)", # Swedish (Åland Islands)
"sv-rFI": u"Svenska (Finland)", # Swedish (Finland)
"sv-rSE": u"Svenska (Sverige)", # Swedish (Sweden)
"sw": u"Kiswahili", # Swahili
"sw-rKE": u"Kiswahili (Kenya)", # Swahili (Kenya)
"sw-rTZ": u"Kiswahili (Tanzania)", # Swahili (Tanzania)
"sw-rUG": u"Kiswahili (Uganda)", # Swahili (Uganda)
"swc": u"Kiswahili ya Kongo", # Congo Swahili
"swc-rCD": u"Kiswahili ya Kongo (Jamhuri ya Kidemokrasia ya Kongo)", # Congo Swahili (Congo [DRC])
"ta": u"தமிழ்", # Tamil
"ta-rIN": u"தமிழ் (இந்தியா)", # Tamil (India)
"ta-rLK": u"தமிழ் (இலங்கை)", # Tamil (Sri Lanka)
"ta-rMY": u"தமிழ் (மலேஷியா)", # Tamil (Malaysia)
"ta-rSG": u"தமிழ் (சிங்கப்பூர்)", # Tamil (Singapore)
"te": u"తెలుగు", # Telugu
"te-rIN": u"తెలుగు (భారత దేశం)", # Telugu (India)
"teo": u"Kiteso", # Teso
"teo-rKE": u"Kiteso (Kenia)", # Teso (Kenya)
"teo-rUG": u"Kiteso (Uganda)", # Teso (Uganda)
"th": u"ไทย", # Thai
"th-rTH": u"ไทย (ไทย)", # Thai (Thailand)
"ti": u"ትግርኛ", # Tigrinya
"ti-rER": u"ትግርኛ (ER)", # Tigrinya (Eritrea)
"ti-rET": u"ትግርኛ (ET)", # Tigrinya (Ethiopia)
"to": u"Lea fakatonga", # Tongan
"to-rTO": u"Lea fakatonga (Tonga)", # Tongan (Tonga)
"tr": u"Türkçe", # Turkish
"tr-rCY": u"Türkçe (Güney Kıbrıs Rum Kesimi)", # Turkish (Cyprus)
"tr-rTR": u"Türkçe (Türkiye)", # Turkish (Turkey)
"twq": u"Tasawaq senni", # Tasawaq
"twq-rNE": u"Tasawaq senni (Nižer)", # Tasawaq (Niger)
"tzm": u"Tamaziɣt", # Central Atlas Tamazight
"tzm-rLATN": u"Tamaziɣt (LATN)", # Central Atlas Tamazight (LATN)
"tzm-rLATN_MA": u"Tamaziɣt (Meṛṛuk,MA)", # Central Atlas Tamazight (Morocco,MA)
"uk": u"українська", # Ukrainian
"uk-rUA": u"українська (Україна)", # Ukrainian (Ukraine)
"ur": u"اردو", # Urdu
"ur-rIN": u"اردو (بھارت)", # Urdu (India)
"ur-rPK": u"اردو (پاکستان)", # Urdu (Pakistan)
"uz": u"Ўзбек", # Uzbek
"uz-rARAB": u"اوزبیک (ARAB)", # Uzbek (ARAB)
"uz-rARAB_AF": u"اوزبیک (افغانستان,AF)", # Uzbek (Afghanistan,AF)
"uz-rCYRL": u"Ўзбек (CYRL)", # Uzbek (CYRL)
"uz-rCYRL_UZ": u"Ўзбек (Ўзбекистон,UZ)", # Uzbek (Uzbekistan,UZ)
"uz-rLATN": u"Oʻzbekcha (LATN)", # Uzbek (LATN)
"uz-rLATN_UZ": u"Oʻzbekcha (Oʻzbekiston,UZ)", # Uzbek (Uzbekistan,UZ)
"vai": u"ꕙꔤ", # Vai
"vai-rLATN": u"Vai (LATN)", # Vai (LATN)
"vai-rLATN_LR": u"Vai (Laibhiya,LR)", # Vai (Liberia,LR)
"vai-rVAII": u"ꕙꔤ (VAII)", # Vai (VAII)
"vai-rVAII_LR": u"ꕙꔤ (ꕞꔤꔫꕩ,LR)", # Vai (Liberia,LR)
"vi": u"Tiếng Việt", # Vietnamese
"vi-rVN": u"Tiếng Việt (Việt Nam)", # Vietnamese (Vietnam)
"vun": u"Kyivunjo", # Vunjo
"vun-rTZ": u"Kyivunjo (Tanzania)", # Vunjo (Tanzania)
"xog": u"Olusoga", # Soga
"xog-rUG": u"Olusoga (Yuganda)", # Soga (Uganda)
"yav": u"Nuasue", # Yangben
"yav-rCM": u"Nuasue (Kemelún)", # Yangben (Cameroon)
"yo": u"Èdè Yorùbá", # Yoruba
"yo-rNG": u"Èdè Yorùbá (Orílẹ́ède Nàìjíríà)", # Yoruba (Nigeria)
# This was the obtained from Locale, but it seems it's different in Settings
#"zh": u"中文", # Chinese
"zh": u"中文 (简体)", # Chinese
"zh-rHANS": u"中文 (HANS)", # Chinese (HANS)
"zh-rHANS_CN": u"中文 (中国,CN)", # Chinese (China,CN)
"zh-rHANS_HK": u"中文 (香港,HK)", # Chinese (Hong Kong,HK)
"zh-rHANS_MO": u"中文 (澳门,MO)", # Chinese (Macau,MO)
"zh-rHANS_SG": u"中文 (新加坡,SG)", # Chinese (Singapore,SG)
"zh-rHANT": u"中文 (HANT)", # Chinese (HANT)
"zh-rHANT_HK": u"中文 (香港,HK)", # Chinese (Hong Kong,HK)
"zh-rHANT_MO": u"中文 (澳門,MO)", # Chinese (Macau,MO)
"zh-rHANT_TW": u"中文 (台灣,TW)", # Chinese (Taiwan,TW)
"zu": u"IsiZulu", # Zulu
"zu-rZA": u"IsiZulu (iNingizimu Afrika)", # Zulu (South Africa)
}
if not languageTo in LANGUAGES.keys():
raise RuntimeError("%s is not a supported language by AndroidViewClient" % languageTo)
self.openQuickSettingsSettings()
view = None
currentLanguage = None
ATTEMPTS = 10
if self.vc.getSdkVersion() >= 20:
for _ in range(ATTEMPTS):
com_android_settings___id_dashboard = self.vc.findViewByIdOrRaise("com.android.settings:id/dashboard")
for k, v in LANGUAGE_SETTINGS.iteritems():
if DEBUG_CHANGE_LANGUAGE:
print >> sys.stderr, "searching for", v
view = self.vc.findViewWithText(v, root=com_android_settings___id_dashboard)
if view:
currentLanguage = k
if DEBUG_CHANGE_LANGUAGE:
print >> sys.stderr, "found current language:", k
break
if view:
break
com_android_settings___id_dashboard.uiScrollable.flingForward()
self.vc.sleep(1)
self.vc.dump(-1)
if view is None:
raise ViewNotFoundException("text", "'Language & input' (any language)", "ROOT")
view.touch()
self.vc.sleep(1)
self.vc.dump(-1)
self.vc.findViewWithTextOrRaise(PHONE_LANGUAGE[currentLanguage]).touch()
self.vc.sleep(1)
self.vc.dump(-1)
else:
for _ in range(ATTEMPTS):
android___id_list = self.vc.findViewByIdOrRaise("android:id/list")
for k, v in LANGUAGE_SETTINGS.iteritems():
view = self.vc.findViewWithText(v, root=android___id_list)
if view:
currentLanguage = k
break
if view:
break
android___id_list.uiScrollable.flingForward()
self.vc.sleep(1)
self.vc.dump(-1)
if view is None:
raise ViewNotFoundException("text", "'Language & input' (any language)", "ROOT")
view.touch()
self.vc.sleep(1)
self.vc.dump(-1)
self.vc.findViewWithTextOrRaise(PHONE_LANGUAGE[currentLanguage]).touch()
self.vc.sleep(1)
self.vc.dump(-1)
android___id_list = self.vc.findViewByIdOrRaise("android:id/list")
android___id_list.uiScrollable.setViewClient(self.vc)
if DEBUG_CHANGE_LANGUAGE:
print >> sys.stderr, "scrolling to find", LANGUAGES[languageTo]
view = android___id_list.uiScrollable.scrollTextIntoView(LANGUAGES[languageTo])
if view is not None:
view.touch()
else:
#raise RuntimeError(u"Couldn't change language to %s (%s)" % (LANGUAGES[languageTo], languageTo))
raise RuntimeError("Couldn't change language to %s" % languageTo)
self.vc.device.press('BACK')
self.vc.sleep(1)
self.vc.device.press('BACK')
class UiCollection():
'''
Used to enumerate a container's user interface (UI) elements for the purpose of counting, or
targeting a sub elements by a child's text or description.
'''
pass
class UiScrollable(UiCollection):
'''
A L{UiCollection} that supports searching for items in scrollable layout elements.
This class can be used with horizontally or vertically scrollable controls.
'''
def __init__(self, view):
self.vc = None
self.view = view
self.vertical = True
self.bounds = view.getBounds()
(self.x, self.y, self.w, self.h) = view.getPositionAndSize()
self.steps = 10
self.duration = 500
self.swipeDeadZonePercentage = 0.1
self.maxSearchSwipes = 10
def flingBackward(self):
if self.vertical:
s = (self.x + self.w/2, self.y + self.h * self.swipeDeadZonePercentage)
e = (self.x + self.w/2, self.y + self.h - self.h * self.swipeDeadZonePercentage)
else:
raise RuntimeError('Not implemented yet')
if DEBUG:
print >> sys.stderr, "flingBackward: view=", self.view.__smallStr__(), self.view.getPositionAndSize()
print >> sys.stderr, "self.view.device.drag(%s, %s, %s, %s)" % (s, e, self.duration, self.steps)
self.view.device.drag(s, e, self.duration, self.steps, self.view.device.display['orientation'])
def flingForward(self):
if self.vertical:
s = (self.x + self.w/2, (self.y + self.h ) - self.h * self.swipeDeadZonePercentage)
e = (self.x + self.w/2, self.y + self.h * self.swipeDeadZonePercentage)
else:
raise RuntimeError('Not implemented yet')
if DEBUG:
print >> sys.stderr, "flingForward: view=", self.view.__smallStr__(), self.view.getPositionAndSize()
print >> sys.stderr, "self.view.device.drag(%s, %s, %s, %s)" % (s, e, self.duration, self.steps)
self.view.device.drag(s, e, self.duration, self.steps, self.view.device.display['orientation'])
def flingToBeginning(self, maxSwipes=10):
if self.vertical:
for _ in range(maxSwipes):
if DEBUG:
print >> sys.stderr, "flinging to beginning"
self.flingBackward()
def flingToEnd(self, maxSwipes=10):
if self.vertical:
for _ in range(maxSwipes):
if DEBUG:
print >> sys.stderr, "flinging to end"
self.flingForward()
def scrollTextIntoView(self, text):
'''
Performs a forward scroll action on the scrollable layout element until the text you provided is visible,
or until swipe attempts have been exhausted. See setMaxSearchSwipes(int)
'''
if self.vc is None:
raise ValueError('vc must be set in order to use this method')
for n in range(self.maxSearchSwipes):
# FIXME: now I need to figure out the best way of navigating to the ViewClient asossiated
# with this UiScrollable.
# It's using setViewClient() now.
if DEBUG or DEBUG_CHANGE_LANGUAGE:
print >> sys.stderr, u"Searching for text='%s'" % text
for v in self.vc.views:
try:
print >> sys.stderr, " scrollTextIntoView: v=", v.getId(),
print >> sys.stderr, v.getText()
except Exception, e:
print >> sys.stderr, e
pass
#v = self.vc.findViewWithText(text, root=self.view)
v = self.vc.findViewWithText(text)
if v is not None:
return v
self.flingForward()
#self.vc.sleep(1)
self.vc.dump(-1)
# WARNING: after this dump, the value kept in self.view is outdated, it should be refreshed
# in some way
return None
def setAsHorizontalList(self):
self.vertical = False
def setAsVerticalList(self):
self.vertical = True
def setMaxSearchSwipes(self, maxSwipes):
self.maxSearchSwipes = maxSwipes
def setViewClient(self, vc):
self.vc = vc
class ListView(View):
'''
ListView class.
'''
pass
class UiAutomator2AndroidViewClient():
'''
UiAutomator XML to AndroidViewClient
'''
def __init__(self, device, version):
self.device = device
self.version = version
self.root = None
self.nodeStack = []
self.parent = None
self.views = []
self.idCount = 1
def StartElement(self, name, attributes):
'''
Expat start element event handler
'''
if name == 'hierarchy':
pass
elif name == 'node':
# Instantiate an Element object
attributes['uniqueId'] = 'id/no_id/%d' % self.idCount
bounds = re.split('[\][,]', attributes['bounds'])
attributes['bounds'] = ((int(bounds[1]), int(bounds[2])), (int(bounds[4]), int(bounds[5])))
if DEBUG_BOUNDS:
print >> sys.stderr, "bounds=", attributes['bounds']
self.idCount += 1
child = View.factory(attributes, self.device, self.version)
self.views.append(child)
# Push element onto the stack and make it a child of parent
if not self.nodeStack:
self.root = child
else:
self.parent = self.nodeStack[-1]
self.parent.add(child)
self.nodeStack.append(child)
def EndElement(self, name):
'''
Expat end element event handler
'''
if name == 'hierarchy':
pass
elif name == 'node':
self.nodeStack.pop()
def CharacterData(self, data):
'''
Expat character data event handler
'''
if data.strip():
data = data.encode()
element = self.nodeStack[-1]
element.cdata += data
def Parse(self, uiautomatorxml):
# Create an Expat parser
parser = xml.parsers.expat.ParserCreate() # @UndefinedVariable
# Set the Expat event handlers to our methods
parser.StartElementHandler = self.StartElement
parser.EndElementHandler = self.EndElement
parser.CharacterDataHandler = self.CharacterData
# Parse the XML File
try:
encoded = uiautomatorxml.encode(encoding='utf-8', errors='replace')
_ = parser.Parse(encoded, True)
except xml.parsers.expat.ExpatError, ex: # @UndefinedVariable
print >>sys.stderr, "ERROR: Offending XML:\n", repr(uiautomatorxml)
raise RuntimeError(ex)
return self.root
class Excerpt2Code():
''' Excerpt XML to code '''
def __init__(self):
self.data = None
def StartElement(self, name, attributes):
'''
Expat start element event handler
'''
if name == 'excerpt':
pass
else:
warnings.warn("Unexpected element: '%s'" % name)
def EndElement(self, name):
'''
Expat end element event handler
'''
if name == 'excerpt':
pass
def CharacterData(self, data):
'''
Expat character data event handler
'''
if data.strip():
data = data.encode()
if not self.data:
self.data = data
else:
self.data += data
def Parse(self, excerpt):
# Create an Expat parser
parser = xml.parsers.expat.ParserCreate() # @UndefinedVariable
# Set the Expat event handlers to our methods
parser.StartElementHandler = self.StartElement
parser.EndElementHandler = self.EndElement
parser.CharacterDataHandler = self.CharacterData
# Parse the XML
_ = parser.Parse(excerpt, 1)
return self.data
class ViewClientOptions:
'''
ViewClient options helper class
'''
DEVIDE = 'device'
SERIALNO = 'serialno'
AUTO_DUMP = 'autodump'
FORCE_VIEW_SERVER_USE = 'forceviewserveruse'
LOCAL_PORT = 'localport' # ViewServer local port
REMOTE_PORT = 'remoteport' # ViewServer remote port
START_VIEW_SERVER = 'startviewserver'
IGNORE_UIAUTOMATOR_KILLED = 'ignoreuiautomatorkilled'
COMPRESSED_DUMP = 'compresseddump'
USE_UIAUTOMATOR_HELPER = 'useuiautomatorhelper'
class ViewClient:
'''
ViewClient is a I{ViewServer} client.
ViewServer backend
==================
If not running the ViewServer is started on the target device or emulator and then the port
mapping is created.
UiAutomator backend
===================
No service is started.
'''
imageDirectory = None
''' The directory used to store screenshot images '''
def __init__(self, device, serialno, adb=None, autodump=True, forceviewserveruse=False, localport=VIEW_SERVER_PORT, remoteport=VIEW_SERVER_PORT, startviewserver=True, ignoreuiautomatorkilled=False, compresseddump=True, useuiautomatorhelper=False):
'''
Constructor
@type device: AdbClient
@param device: The device running the C{View server} to which this client will connect
@type serialno: str
@param serialno: the serial number of the device or emulator to connect to
@type adb: str
@param adb: the path of the C{adb} executable or None and C{ViewClient} will try to find it
@type autodump: boolean
@param autodump: whether an automatic dump is performed at the end of this constructor
@type forceviewserveruse: boolean
@param forceviewserveruse: Force the use of C{ViewServer} even if the conditions to use
C{UiAutomator} are satisfied
@type localport: int
@param localport: the local port used in the redirection
@type remoteport: int
@param remoteport: the remote port used to start the C{ViewServer} in the device or
emulator
@type startviewserver: boolean
@param startviewserver: Whether to start the B{global} ViewServer
@type ignoreuiautomatorkilled: boolean
@param ignoreuiautomatorkilled: Ignores received B{Killed} message from C{uiautomator}
@type compresseddump: boolean
@param compresseddump: turns --compressed flag for uiautomator dump on/off
@:type useuiautomatorhelper: boolean
@:param useuiautomatorhelper: use UiAutomatorHelper Android app as backend
'''
if not device:
raise Exception('Device is not connected')
self.device = device
''' The C{AdbClient} device instance '''
if not serialno:
raise ValueError("Serialno cannot be None")
self.serialno = self.__mapSerialNo(serialno)
''' The serial number of the device '''
if DEBUG_DEVICE: print >> sys.stderr, "ViewClient: using device with serialno", self.serialno
if adb:
if not os.access(adb, os.X_OK):
raise Exception('adb="%s" is not executable' % adb)
else:
# Using adbclient we don't need adb executable yet (maybe it's needed if we want to
# start adb if not running)
adb = obtainAdbPath()
self.adb = adb
''' The adb command '''
self.root = None
''' The root node '''
self.viewsById = {}
''' The map containing all the L{View}s indexed by their L{View.getUniqueId()} '''
self.display = {}
''' The map containing the device's display properties: width, height and density '''
for prop in [ 'width', 'height', 'density', 'orientation' ]:
self.display[prop] = -1
if USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES:
try:
self.display[prop] = device.display[prop]
except:
if WARNINGS:
warnings.warn("Couldn't determine display %s" % prop)
else:
# these values are usually not defined as properties, so we stick to the -1 set
# before
pass
self.build = {}
''' The map containing the device's build properties: version.sdk, version.release '''
for prop in [VERSION_SDK_PROPERTY, VERSION_RELEASE_PROPERTY]:
self.build[prop] = -1
try:
if USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES:
self.build[prop] = device.getProperty(prop)
else:
self.build[prop] = device.shell('getprop ro.build.' + prop)[:-2]
except:
if WARNINGS:
warnings.warn("Couldn't determine build %s" % prop)
if prop == VERSION_SDK_PROPERTY:
# we expect it to be an int
self.build[prop] = int(self.build[prop] if self.build[prop] else -1)
self.ro = {}
''' The map containing the device's ro properties: secure, debuggable '''
for prop in ['secure', 'debuggable']:
try:
self.ro[prop] = device.shell('getprop ro.' + prop)[:-2]
except:
if WARNINGS:
warnings.warn("Couldn't determine ro %s" % prop)
self.ro[prop] = 'UNKNOWN'
self.forceViewServerUse = forceviewserveruse
''' Force the use of ViewServer even if the conditions to use UiAutomator are satisfied '''
self.useUiAutomator = (self.build[VERSION_SDK_PROPERTY] >= 16) and not forceviewserveruse # jelly bean 4.1 & 4.2
if DEBUG:
print >> sys.stderr, " ViewClient.__init__: useUiAutomator=", self.useUiAutomator, "sdk=", self.build[VERSION_SDK_PROPERTY], "forceviewserveruse=", forceviewserveruse
''' If UIAutomator is supported by the device it will be used '''
self.ignoreUiAutomatorKilled = ignoreuiautomatorkilled
''' On some devices (i.e. Nexus 7 running 4.2.2) uiautomator is killed just after generating
the dump file. In many cases the file is already complete so we can ask to ignore the 'Killed'
message by setting L{ignoreuiautomatorkilled} to C{True}.
Changes in v2.3.21 that uses C{/dev/tty} instead of a file may have turned this variable
unnecessary, however it has been kept for backward compatibility.
'''
if self.useUiAutomator:
self.textProperty = TEXT_PROPERTY_UI_AUTOMATOR
else:
if self.build[VERSION_SDK_PROPERTY] <= 10:
self.textProperty = TEXT_PROPERTY_API_10
else:
self.textProperty = TEXT_PROPERTY
if startviewserver:
if not self.serviceResponse(device.shell('service call window 3')):
try:
self.assertServiceResponse(device.shell('service call window 1 i32 %d' %
remoteport))
except:
msg = 'Cannot start View server.\n' \
'This only works on emulator and devices running developer versions.\n' \
'Does hierarchyviewer work on your device?\n' \
'See https://github.com/dtmilano/AndroidViewClient/wiki/Secure-mode\n\n' \
'Device properties:\n' \
' ro.secure=%s\n' \
' ro.debuggable=%s\n' % (self.ro['secure'], self.ro['debuggable'])
raise Exception(msg)
self.localPort = localport
self.remotePort = remoteport
# FIXME: it seems there's no way of obtaining the serialno from the MonkeyDevice
subprocess.check_call([self.adb, '-s', self.serialno, 'forward', 'tcp:%d' % self.localPort,
'tcp:%d' % self.remotePort])
self.windows = None
''' The list of windows as obtained by L{ViewClient.list()} '''
# FIXME: may not be true, one may want UiAutomator but without UiAutomatorHelper
if self.useUiAutomator:
if useuiautomatorhelper:
self.uiAutomatorHelper = UiAutomatorHelper(device)
''' The UiAutomatorHelper '''
else:
self.uiAutomatorHelper = None
self.uiDevice = UiDevice(self)
''' The L{UiDevice} '''
''' The output of compressed dump is different than output of uncompressed one.
If one requires uncompressed output, this option should be set to False
'''
self.compressedDump = compresseddump
if autodump:
self.dump()
def __del__(self):
# should clean up some things
if self.uiAutomatorHelper:
if DEBUG or True:
print >> sys.stderr, "Stopping UiAutomatorHelper..."
self.uiAutomatorHelper.quit()
pass
@staticmethod
def __obtainAdbPath():
return obtainAdbPath()
@staticmethod
def __mapSerialNo(serialno):
serialno = serialno.strip()
#ipRE = re.compile('^\d+\.\d+.\d+.\d+$')
if IP_RE.match(serialno):
if DEBUG_DEVICE: print >>sys.stderr, "ViewClient: adding default port to serialno", serialno, ADB_DEFAULT_PORT
return serialno + ':%d' % ADB_DEFAULT_PORT
ipPortRE = re.compile('^\d+\.\d+.\d+.\d+:\d+$')
if ipPortRE.match(serialno):
# nothing to map
return serialno
if re.search("[.*()+]", serialno):
raise ValueError("Regular expression not supported as serialno in ViewClient. Found '%s'" % serialno)
return serialno
@staticmethod
def __obtainDeviceSerialNumber(device):
if DEBUG_DEVICE: print >>sys.stderr, "ViewClient: obtaining serial number for connected device"
serialno = device.getProperty('ro.serialno')
if not serialno:
serialno = device.shell('getprop ro.serialno')
if serialno:
serialno = serialno[:-2]
if not serialno:
qemu = device.shell('getprop ro.kernel.qemu')
if qemu:
qemu = qemu[:-2]
if qemu and int(qemu) == 1:
# FIXME !!!!!
# this must be calculated from somewhere, though using a fixed serialno for now
warnings.warn("Running on emulator but no serial number was specified then 'emulator-5554' is used")
serialno = 'emulator-5554'
if not serialno:
# If there's only one device connected get its serialno
adb = ViewClient.__obtainAdbPath()
if DEBUG_DEVICE: print >>sys.stderr, " using adb=%s" % adb
s = subprocess.Popen([adb, 'get-serialno'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={}).communicate()[0][:-1]
if s != 'unknown':
serialno = s
if DEBUG_DEVICE: print >>sys.stderr, " serialno=%s" % serialno
if not serialno:
warnings.warn("Couldn't obtain the serialno of the connected device")
return serialno
@staticmethod
def setAlarm(timeout):
osName = platform.system()
if osName.startswith('Windows'): # alarm is not implemented in Windows
return
signal.alarm(timeout)
@staticmethod
def connectToDeviceOrExit(timeout=60, verbose=False, ignoresecuredevice=False, ignoreversioncheck=False, serialno=None):
'''
Connects to a device which serial number is obtained from the script arguments if available
or using the default regex C{.*}.
If the connection is not successful the script exits.
History
-------
In MonkeyRunner times, this method was a way of overcoming one of its limitations.
L{MonkeyRunner.waitForConnection()} returns a L{MonkeyDevice} even if the connection failed.
Then, to detect this situation, C{device.wake()} is attempted and if it fails then it is
assumed the previous connection failed.
@type timeout: int
@param timeout: timeout for the connection
@type verbose: bool
@param verbose: Verbose output
@type ignoresecuredevice: bool
@param ignoresecuredevice: Ignores the check for a secure device
@type ignoreversioncheck: bool
@param ignoreversioncheck: Ignores the check for a supported ADB version
@type serialno: str
@param serialno: The device or emulator serial number
@return: the device and serialno used for the connection
'''
progname = os.path.basename(sys.argv[0])
if serialno is None:
# eat all the extra options the invoking script may have added
args = sys.argv
while len(args) > 1 and args[1][0] == '-':
args.pop(1)
serialno = args[1] if len(args) > 1 else \
os.environ['ANDROID_SERIAL'] if os.environ.has_key('ANDROID_SERIAL') \
else '.*'
if IP_RE.match(serialno):
# If matches an IP address format and port was not specified add the default
serialno += ':%d' % ADB_DEFAULT_PORT
if verbose:
print >> sys.stderr, 'Connecting to a device with serialno=%s with a timeout of %d secs...' % \
(serialno, timeout)
ViewClient.setAlarm(timeout+5)
# NOTE: timeout is used for 2 different timeouts, the one to set the alarm to timeout the connection with
# adb and the timeout used by adb (once connected) for the sockets
device = adbclient.AdbClient(serialno, ignoreversioncheck=ignoreversioncheck, timeout=timeout)
ViewClient.setAlarm(0)
if verbose:
print >> sys.stderr, 'Connected to device with serialno=%s' % serialno
secure = device.getSystemProperty('ro.secure')
debuggable = device.getSystemProperty('ro.debuggable')
versionProperty = device.getProperty(VERSION_SDK_PROPERTY)
if versionProperty:
version = int(versionProperty)
else:
if verbose:
print "Couldn't obtain device SDK version"
version = -1
# we are going to use UiAutomator for versions >= 16 that's why we ignore if the device
# is secure if this is true
if secure == '1' and debuggable == '0' and not ignoresecuredevice and version < 16:
print >> sys.stderr, "%s: ERROR: Device is secure, AndroidViewClient won't work." % progname
if verbose:
print >> sys.stderr, " secure=%s debuggable=%s version=%d ignoresecuredevice=%s" % \
(secure, debuggable, version, ignoresecuredevice)
sys.exit(2)
if re.search("[.*()+]", serialno) and not re.search("(\d{1,3}\.){3}\d{1,3}", serialno):
# if a regex was used we have to determine the serialno used
serialno = ViewClient.__obtainDeviceSerialNumber(device)
if verbose:
print >> sys.stderr, 'Actual device serialno=%s' % serialno
return device, serialno
@staticmethod
def traverseShowClassIdAndText(view, extraInfo=None, noExtraInfo=None, extraAction=None):
'''
Shows the View class, id and text if available.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@type extraInfo: method
@param extraInfo: the View method to add extra info
@type noExtraInfo: bool
@param noExtraInfo: Don't add extra info
@type extraAction: method
@param extraAction: An extra action to be invoked for every view
@return: the string containing class, id, and text if available
'''
try:
eis = ''
if extraInfo:
eis = extraInfo(view)
if not eis and noExtraInfo:
eis = noExtraInfo
if eis:
eis = ' {0}'.format(eis)
if extraAction:
extraAction(view)
_str = unicode(view.getClass())
_str += ' '
_str += view.getId()
_str += ' '
_str += view.getText() if view.getText() else ''
_str += eis
return _str
except Exception, e:
import traceback
return u'Exception in view=%s: %s:%s\n%s' % (view.__smallStr__(), sys.exc_info()[0].__name__, e, traceback.format_exc())
@staticmethod
def traverseShowClassIdTextAndUniqueId(view):
'''
Shows the View class, id, text if available and unique id.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available and unique Id
'''
return ViewClient.traverseShowClassIdAndText(view, View.getUniqueId)
@staticmethod
def traverseShowClassIdTextAndContentDescription(view):
'''
Shows the View class, id, text if available and content description.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available and the content description
'''
return ViewClient.traverseShowClassIdAndText(view, View.getContentDescription, 'NAF')
@staticmethod
def traverseShowClassIdTextAndTag(view):
'''
Shows the View class, id, text if available and tag.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available and tag
'''
return ViewClient.traverseShowClassIdAndText(view, View.getTag, None)
@staticmethod
def traverseShowClassIdTextContentDescriptionAndScreenshot(view):
'''
Shows the View class, id, text if available and unique id and takes the screenshot.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available and the content description
'''
return ViewClient.traverseShowClassIdAndText(view, View.getContentDescription, 'NAF', extraAction=ViewClient.writeViewImageToFileInDir)
@staticmethod
def traverseShowClassIdTextAndCenter(view):
'''
Shows the View class, id and text if available and center.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available
'''
return ViewClient.traverseShowClassIdAndText(view, View.getCenter)
@staticmethod
def traverseShowClassIdTextPositionAndSize(view):
'''
Shows the View class, id and text if available.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available
'''
return ViewClient.traverseShowClassIdAndText(view, View.getPositionAndSize)
@staticmethod
def traverseShowClassIdTextAndBounds(view):
'''
Shows the View class, id and text if available.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available plus
View bounds
'''
return ViewClient.traverseShowClassIdAndText(view, View.getBounds)
@staticmethod
def traverseTakeScreenshot(view):
'''
Don't show any any, just takes the screenshot.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: None
'''
return ViewClient.writeViewImageToFileInDir(view)
# methods that can be used to transform ViewClient.traverse output
TRAVERSE_CIT = traverseShowClassIdAndText
''' An alias for L{traverseShowClassIdAndText(view)} '''
TRAVERSE_CITUI = traverseShowClassIdTextAndUniqueId
''' An alias for L{traverseShowClassIdTextAndUniqueId(view)} '''
TRAVERSE_CITCD = traverseShowClassIdTextAndContentDescription
''' An alias for L{traverseShowClassIdTextAndContentDescription(view)} '''
TRAVERSE_CITG = traverseShowClassIdTextAndTag
''' An alias for L{traverseShowClassIdTextAndTag(view)} '''
TRAVERSE_CITC = traverseShowClassIdTextAndCenter
''' An alias for L{traverseShowClassIdTextAndCenter(view)} '''
TRAVERSE_CITPS = traverseShowClassIdTextPositionAndSize
''' An alias for L{traverseShowClassIdTextPositionAndSize(view)} '''
TRAVERSE_CITB = traverseShowClassIdTextAndBounds
''' An alias for L{traverseShowClassIdTextAndBounds(view)} '''
TRAVERSE_CITCDS = traverseShowClassIdTextContentDescriptionAndScreenshot
''' An alias for L{traverseShowClassIdTextContentDescriptionAndScreenshot(view)} '''
TRAVERSE_S = traverseTakeScreenshot
''' An alias for L{traverseTakeScreenshot(view)} '''
@staticmethod
def sleep(secs=1.0):
'''
Sleeps for the specified number of seconds.
@type secs: float
@param secs: number of seconds
'''
time.sleep(secs)
def assertServiceResponse(self, response):
'''
Checks whether the response received from the server is correct or raises and Exception.
@type response: str
@param response: Response received from the server
@raise Exception: If the response received from the server is invalid
'''
if not self.serviceResponse(response):
raise Exception('Invalid response received from service.')
def serviceResponse(self, response):
'''
Checks the response received from the I{ViewServer}.
@return: C{True} if the response received matches L{PARCEL_TRUE}, C{False} otherwise
'''
PARCEL_TRUE = "Result: Parcel(00000000 00000001 '........')\r\n"
''' The TRUE response parcel '''
if DEBUG:
print >>sys.stderr, "serviceResponse: comparing '%s' vs Parcel(%s)" % (response, PARCEL_TRUE)
return response == PARCEL_TRUE
def setViews(self, received, windowId=None):
'''
Sets L{self.views} to the received value splitting it into lines.
@type received: str
@param received: the string received from the I{View Server}
'''
if not received or received == "":
raise ValueError("received is empty")
self.views = []
''' The list of Views represented as C{str} obtained after splitting it into lines after being received from the server. Done by L{self.setViews()}. '''
self.__parseTree(received.split("\n"), windowId)
if DEBUG:
print >>sys.stderr, "there are %d views in this dump" % len(self.views)
def setViewsFromUiAutomatorDump(self, received):
'''
Sets L{self.views} to the received value parsing the received XML.
@type received: str
@param received: the string received from the I{UI Automator}
'''
if not received or received == "":
raise ValueError("received is empty")
self.views = []
''' The list of Views represented as C{str} obtained after splitting it into lines after being received from the server. Done by L{self.setViews()}. '''
self.__parseTreeFromUiAutomatorDump(received)
if DEBUG:
print >>sys.stderr, "there are %d views in this dump" % len(self.views)
def __splitAttrs(self, strArgs):
'''
Splits the C{View} attributes in C{strArgs} and optionally adds the view id to the C{viewsById} list.
Unique Ids
==========
It is very common to find C{View}s having B{NO_ID} as the Id. This turns very difficult to
use L{self.findViewById()}. To help in this situation this method assigns B{unique Ids}.
The B{unique Ids} are generated using the pattern C{id/no_id/<number>} with C{<number>} starting
at 1.
@type strArgs: str
@param strArgs: the string containing the raw list of attributes and values
@return: Returns the attributes map.
'''
if self.useUiAutomator:
raise RuntimeError("This method is not compatible with UIAutomator")
# replace the spaces in text:mText to preserve them in later split
# they are translated back after the attribute matches
textRE = re.compile('%s=%s,' % (self.textProperty, _nd('len')))
m = textRE.search(strArgs)
if m:
__textStart = m.end()
__textLen = int(m.group('len'))
__textEnd = m.end() + __textLen
s1 = strArgs[__textStart:__textEnd]
s2 = s1.replace(' ', WS)
strArgs = strArgs.replace(s1, s2, 1)
idRE = re.compile("(?P<viewId>id/\S+)")
attrRE = re.compile('%s(?P<parens>\(\))?=%s,(?P<val>[^ ]*)' % (_ns('attr'), _nd('len')), flags=re.DOTALL)
hashRE = re.compile('%s@%s' % (_ns('class'), _nh('oid')))
attrs = {}
viewId = None
m = idRE.search(strArgs)
if m:
viewId = m.group('viewId')
if DEBUG:
print >>sys.stderr, "found view with id=%s" % viewId
for attr in strArgs.split():
m = attrRE.match(attr)
if m:
__attr = m.group('attr')
__parens = '()' if m.group('parens') else ''
__len = int(m.group('len'))
__val = m.group('val')
if WARNINGS and __len != len(__val):
warnings.warn("Invalid len: expected: %d found: %d s=%s e=%s" % (__len, len(__val), __val[:50], __val[-50:]))
if __attr == self.textProperty:
# restore spaces that have been replaced
__val = __val.replace(WS, ' ')
attrs[__attr + __parens] = __val
else:
m = hashRE.match(attr)
if m:
attrs['class'] = m.group('class')
attrs['oid'] = m.group('oid')
else:
if DEBUG:
print >>sys.stderr, attr, "doesn't match"
if True: # was assignViewById
if not viewId:
# If the view has NO_ID we are assigning a default id here (id/no_id) which is
# immediately incremented if another view with no id was found before to generate
# a unique id
viewId = "id/no_id/1"
if viewId in self.viewsById:
# sometimes the view ids are not unique, so let's generate a unique id here
i = 1
while True:
newId = re.sub('/\d+$', '', viewId) + '/%d' % i
if not newId in self.viewsById:
break
i += 1
viewId = newId
if DEBUG:
print >>sys.stderr, "adding viewById %s" % viewId
# We are assigning a new attribute to keep the original id preserved, which could have
# been NO_ID repeated multiple times
attrs['uniqueId'] = viewId
return attrs
def __parseTree(self, receivedLines, windowId=None):
'''
Parses the View tree contained in L{receivedLines}. The tree is created and the root node assigned to L{self.root}.
This method also assigns L{self.viewsById} values using L{View.getUniqueId} as the key.
@type receivedLines: str
@param receivedLines: the string received from B{View Server}
'''
self.root = None
self.viewsById = {}
self.views = []
parent = None
parents = []
treeLevel = -1
newLevel = -1
lastView = None
for v in receivedLines:
if v == '' or v == 'DONE' or v == 'DONE.':
break
attrs = self.__splitAttrs(v)
if not self.root:
if v[0] == ' ':
raise Exception("Unexpected root element starting with ' '.")
self.root = View.factory(attrs, self.device, self.build[VERSION_SDK_PROPERTY], self.forceViewServerUse, windowId)
if DEBUG: self.root.raw = v
treeLevel = 0
newLevel = 0
lastView = self.root
parent = self.root
parents.append(parent)
else:
newLevel = (len(v) - len(v.lstrip()))
if newLevel == 0:
raise Exception("newLevel==0 treeLevel=%d but tree can have only one root, v=%s" % (treeLevel, v))
child = View.factory(attrs, self.device, self.build[VERSION_SDK_PROPERTY], self.forceViewServerUse, windowId)
if DEBUG: child.raw = v
if newLevel == treeLevel:
parent.add(child)
lastView = child
elif newLevel > treeLevel:
if (newLevel - treeLevel) != 1:
raise Exception("newLevel jumps %d levels, v=%s" % ((newLevel-treeLevel), v))
parent = lastView
parents.append(parent)
parent.add(child)
lastView = child
treeLevel = newLevel
else: # newLevel < treeLevel
for _ in range(treeLevel - newLevel):
parents.pop()
parent = parents.pop()
parents.append(parent)
parent.add(child)
treeLevel = newLevel
lastView = child
self.views.append(lastView)
self.viewsById[lastView.getUniqueId()] = lastView
def __parseTreeFromUiAutomatorDump(self, receivedXml):
parser = UiAutomator2AndroidViewClient(self.device, self.build[VERSION_SDK_PROPERTY])
try:
start_xml_index = receivedXml.index("<")
except ValueError:
raise ValueError("received does not contain valid XML data")
self.root = parser.Parse(receivedXml[start_xml_index:])
self.views = parser.views
self.viewsById = {}
for v in self.views:
self.viewsById[v.getUniqueId()] = v
def getRoot(self):
'''
Gets the root node of the C{View} tree
@return: the root node of the C{View} tree
'''
return self.root
def traverse(self, root="ROOT", indent="", transform=None, stream=sys.stdout):
'''
Traverses the C{View} tree and prints its nodes.
The nodes are printed converting them to string but other transformations can be specified
by providing a method name as the C{transform} parameter.
@type root: L{View}
@param root: the root node from where the traverse starts
@type indent: str
@param indent: the indentation string to use to print the nodes
@type transform: method
@param transform: a method to use to transform the node before is printed
'''
if transform is None:
# this cannot be a default value, otherwise
# TypeError: 'staticmethod' object is not callable
# is raised
transform = ViewClient.TRAVERSE_CIT
if type(root) == types.StringType and root == "ROOT":
root = self.root
return ViewClient.__traverse(root, indent, transform, stream)
# if not root:
# return
#
# s = transform(root)
# if s:
# print >>stream, "%s%s" % (indent, s)
#
# for ch in root.children:
# self.traverse(ch, indent=indent+" ", transform=transform, stream=stream)
@staticmethod
def __traverse(root, indent="", transform=View.__str__, stream=sys.stdout):
if not root:
return
s = transform(root)
if stream and s:
ius = "%s%s" % (indent, s if isinstance(s, unicode) else unicode(s, 'utf-8', 'replace'))
print >>stream, ius.encode('utf-8', 'replace')
for ch in root.children:
ViewClient.__traverse(ch, indent=indent+" ", transform=transform, stream=stream)
def dump(self, window=-1, sleep=1):
'''
Dumps the window content.
Sleep is useful to wait some time before obtaining the new content when something in the
window has changed.
@type window: int or str
@param window: the window id or name of the window to dump.
The B{name} is the package name or the window name (i.e. StatusBar) for
system windows.
The window id can be provided as C{int} or C{str}. The C{str} should represent
and C{int} in either base 10 or 16.
Use -1 to dump all windows.
This parameter only is used when the backend is B{ViewServer} and it's
ignored for B{UiAutomator}.
@type sleep: int
@param sleep: sleep in seconds before proceeding to dump the content
@return: the list of Views as C{str} received from the server after being split into lines
'''
if sleep > 0:
time.sleep(sleep)
if self.useUiAutomator:
if self.uiAutomatorHelper:
received = self.uiAutomatorHelper.dumpWindowHierarchy()
else:
# NOTICE:
# Using /dev/tty this works even on devices with no sdcard
received = unicode(self.device.shell('uiautomator dump %s /dev/tty >/dev/null' % ('--compressed' if self.getSdkVersion() >= 18 and self.compressedDump else '')), encoding='utf-8', errors='replace')
if not received:
raise RuntimeError('ERROR: Empty UiAutomator dump was received')
if DEBUG:
self.received = received
if DEBUG_RECEIVED:
print >>sys.stderr, "received %d chars" % len(received)
print >>sys.stderr
print >>sys.stderr, repr(received)
print >>sys.stderr
onlyKilledRE = re.compile('[\n\S]*Killed[\n\r\S]*', re.MULTILINE)
if onlyKilledRE.search(received):
MONKEY = 'com.android.commands.monkey'
extraInfo = ''
if self.device.shell('ps | grep "%s"' % MONKEY):
extraInfo = "\nIt is know that '%s' conflicts with 'uiautomator'. Please kill it and try again." % MONKEY
raise RuntimeError('''ERROR: UiAutomator output contains no valid information. UiAutomator was killed, no reason given.''' + extraInfo)
if self.ignoreUiAutomatorKilled:
if DEBUG_RECEIVED:
print >>sys.stderr, "ignoring UiAutomator Killed"
killedRE = re.compile('</hierarchy>[\n\S]*Killed', re.MULTILINE)
if killedRE.search(received):
received = re.sub(killedRE, '</hierarchy>', received)
elif DEBUG_RECEIVED:
print "UiAutomator Killed: NOT FOUND!"
# It seems that API18 uiautomator spits this message to stdout
dumpedToDevTtyRE = re.compile('</hierarchy>[\n\S]*UI hierchary dumped to: /dev/tty.*', re.MULTILINE)
if dumpedToDevTtyRE.search(received):
received = re.sub(dumpedToDevTtyRE, '</hierarchy>', received)
if DEBUG_RECEIVED:
print >>sys.stderr, "received=", received
# API19 seems to send this warning as part of the XML.
# Let's remove it if present
received = received.replace('WARNING: linker: libdvm.so has text relocations. This is wasting memory and is a security risk. Please fix.\r\n', '')
if re.search('\[: not found', received):
raise RuntimeError('''ERROR: Some emulator images (i.e. android 4.1.2 API 16 generic_x86) does not include the '[' command.
While UiAutomator back-end might be supported 'uiautomator' command fails.
You should force ViewServer back-end.''')
if received.startswith('ERROR: could not get idle state.'):
# See https://android.googlesource.com/platform/frameworks/testing/+/jb-mr2-release/uiautomator/cmds/uiautomator/src/com/android/commands/uiautomator/DumpCommand.java
raise RuntimeError('''The views are being refreshed too frequently to dump.''')
self.setViewsFromUiAutomatorDump(received)
else:
if isinstance(window, str):
if window != '-1':
self.list(sleep=0)
found = False
for wId in self.windows:
try:
if window == self.windows[wId]:
window = wId
found = True
break
except:
pass
try:
if int(window) == wId:
window = wId
found = True
break
except:
pass
try:
if int(window, 16) == wId:
window = wId
found = True
break
except:
pass
if not found:
raise RuntimeError("ERROR: Cannot find window '%s' in %s" % (window, self.windows))
else:
window = -1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((VIEW_SERVER_HOST, self.localPort))
except socket.error, ex:
raise RuntimeError("ERROR: Connecting to %s:%d: %s" % (VIEW_SERVER_HOST, self.localPort, ex))
cmd = 'dump %x\r\n' % window
if DEBUG:
print >>sys.stderr, "executing: '%s'" % cmd
s.send(cmd)
received = ""
doneRE = re.compile("DONE")
ViewClient.setAlarm(120)
while True:
if DEBUG_RECEIVED:
print >>sys.stderr, " reading from socket..."
received += s.recv(1024)
if doneRE.search(received[-7:]):
break
s.close()
ViewClient.setAlarm(0)
if DEBUG:
self.received = received
if DEBUG_RECEIVED:
print >>sys.stderr, "received %d chars" % len(received)
print >>sys.stderr
print >>sys.stderr, received
print >>sys.stderr
if received:
for c in received:
if ord(c) > 127:
received = unicode(received, encoding='utf-8', errors='replace')
break
self.setViews(received, hex(window)[2:])
if DEBUG_TREE:
self.traverse(self.root)
return self.views
def list(self, sleep=1):
'''
List the windows.
Sleep is useful to wait some time before obtaining the new content when something in the
window has changed.
This also sets L{self.windows} as the list of windows.
@type sleep: int
@param sleep: sleep in seconds before proceeding to dump the content
@return: the list of windows
'''
if sleep > 0:
time.sleep(sleep)
if self.useUiAutomator:
raise Exception("Not implemented yet: listing windows with UiAutomator")
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((VIEW_SERVER_HOST, self.localPort))
except socket.error, ex:
raise RuntimeError("ERROR: Connecting to %s:%d: %s" % (VIEW_SERVER_HOST, self.localPort, ex))
s.send('list\r\n')
received = ""
doneRE = re.compile("DONE")
while True:
received += s.recv(1024)
if doneRE.search(received[-7:]):
break
s.close()
if DEBUG:
self.received = received
if DEBUG_RECEIVED:
print >>sys.stderr, "received %d chars" % len(received)
print >>sys.stderr
print >>sys.stderr, received
print >>sys.stderr
self.windows = {}
for line in received.split('\n'):
if not line:
break
if doneRE.search(line):
break
values = line.split()
if len(values) > 1:
package = values[1]
else:
package = "UNKNOWN"
if len(values) > 0:
wid = values[0]
else:
wid = '00000000'
self.windows[int('0x' + wid, 16)] = package
return self.windows
def findViewById(self, viewId, root="ROOT", viewFilter=None):
'''
Finds the View with the specified viewId.
@type viewId: str
@param viewId: the ID of the view to find
@type root: str
@type root: View
@param root: the root node of the tree where the View will be searched
@type: viewFilter: function
@param viewFilter: a function that will be invoked providing the candidate View as a parameter
and depending on the return value (C{True} or C{False}) the View will be
selected and returned as the result of C{findViewById()} or ignored.
This can be C{None} and no extra filtering is applied.
@return: the C{View} found or C{None}
'''
if not root:
return None
if type(root) == types.StringType and root == "ROOT":
return self.findViewById(viewId, self.root, viewFilter)
if root.getId() == viewId:
if viewFilter:
if viewFilter(root):
return root
else:
return root
if re.match('^id/no_id', viewId) or re.match('^id/.+/.+', viewId):
if root.getUniqueId() == viewId:
if viewFilter:
if viewFilter(root):
return root;
else:
return root
for ch in root.children:
foundView = self.findViewById(viewId, ch, viewFilter)
if foundView:
if viewFilter:
if viewFilter(foundView):
return foundView
else:
return foundView
def findViewByIdOrRaise(self, viewId, root="ROOT", viewFilter=None):
'''
Finds the View or raise a ViewNotFoundException.
@type viewId: str
@param viewId: the ID of the view to find
@type root: str
@type root: View
@param root: the root node of the tree where the View will be searched
@type: viewFilter: function
@param viewFilter: a function that will be invoked providing the candidate View as a parameter
and depending on the return value (C{True} or C{False}) the View will be
selected and returned as the result of C{findViewById()} or ignored.
This can be C{None} and no extra filtering is applied.
@return: the View found
@raise ViewNotFoundException: raise the exception if View not found
'''
view = self.findViewById(viewId, root, viewFilter)
if view:
return view
else:
raise ViewNotFoundException("ID", viewId, root)
def findViewByTag(self, tag, root="ROOT"):
'''
Finds the View with the specified tag
'''
return self.findViewWithAttribute('getTag()', tag, root)
def findViewByTagOrRaise(self, tag, root="ROOT"):
'''
Finds the View with the specified tag or raise a ViewNotFoundException
'''
view = self.findViewWithAttribute('getTag()', tag, root)
if view:
return view
else:
raise ViewNotFoundException("tag", tag, root)
def __findViewsWithAttributeInTree(self, attr, val, root):
# Note the plural in this method name
matchingViews = []
if not self.root:
print >>sys.stderr, "ERROR: no root, did you forget to call dump()?"
return matchingViews
if type(root) == types.StringType and root == "ROOT":
root = self.root
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: type val=", type(val)
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: checking if root=%s has attr=%s == %s" % (root.__smallStr__(), attr, val)
if root and attr in root.map and root.map[attr] == val:
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: FOUND: %s" % root.__smallStr__()
matchingViews.append(root)
else:
for ch in root.children:
matchingViews += self.__findViewsWithAttributeInTree(attr, val, ch)
return matchingViews
def __findViewWithAttributeInTree(self, attr, val, root):
if DEBUG:
print >> sys.stderr, " __findViewWithAttributeInTree: type(val)=", type(val)
if type(val) != types.UnicodeType:
u = unicode(val, encoding='utf-8', errors='ignore')
else:
u = val
print >> sys.stderr, u'''__findViewWithAttributeInTree({0}'''.format(attr),
try:
print >> sys.stderr, u''', {0}'''.format(u),
except:
pass
print >> sys.stderr, u'>>>>>>>>>>>>>>>>>>', type(root)
if type(root) == types.StringType:
print >> sys.stderr, u'>>>>>>>>>>>>>>>>>>', root
print >> sys.stderr, u''', {0})'''.format(root)
else:
print >> sys.stderr, u''', {0})'''.format(root.__smallStr__())
if not self.root:
print >>sys.stderr, "ERROR: no root, did you forget to call dump()?"
return None
if type(root) == types.StringType and root == "ROOT":
root = self.root
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: type val=", type(val)
if DEBUG:
#print >> sys.stderr, u'''__findViewWithAttributeInTree: checking if root={0}: '''.format(root),
print >> sys.stderr, u'''has {0} == '''.format(attr),
if type(val) == types.UnicodeType:
u = val
else:
u = unicode(val, encoding='utf-8', errors='replace')
try:
print >> sys.stderr, u'''{0}'''.format(u)
except:
pass
if isinstance(val, RegexType):
return self.__findViewWithAttributeInTreeThatMatches(attr, val, root)
else:
try:
if DEBUG:
print >> sys.stderr, u'''__findViewWithAttributeInTree: comparing {0}: '''.format(attr),
print >> sys.stderr, u'''{0} == '''.format(root.map[attr]),
print >> sys.stderr, u'''{0}'''.format(val)
except:
pass
if root and attr in root.map and root.map[attr] == val:
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: FOUND: %s" % root.__smallStr__()
return root
else:
for ch in root.children:
v = self.__findViewWithAttributeInTree(attr, val, ch)
if v:
return v
return None
def __findViewWithAttributeInTreeOrRaise(self, attr, val, root):
view = self.__findViewWithAttributeInTree(attr, val, root)
if view:
return view
else:
raise ViewNotFoundException(attr, val, root)
def __findViewWithAttributeInTreeThatMatches(self, attr, regex, root, rlist=[]):
if not self.root:
print >>sys.stderr, "ERROR: no root, did you forget to call dump()?"
return None
if type(root) == types.StringType and root == "ROOT":
root = self.root
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTreeThatMatches: checking if root=%s attr=%s matches %s" % (root.__smallStr__(), attr, regex)
if root and attr in root.map and regex.match(root.map[attr]):
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTreeThatMatches: FOUND: %s" % root.__smallStr__()
return root
#print >>sys.stderr, "appending root=%s to rlist=%s" % (root.__smallStr__(), rlist)
#return rlist.append(root)
else:
for ch in root.children:
v = self.__findViewWithAttributeInTreeThatMatches(attr, regex, ch, rlist)
if v:
return v
#print >>sys.stderr, "appending v=%s to rlist=%s" % (v.__smallStr__(), rlist)
#return rlist.append(v)
return None
#return rlist
def findViewWithAttribute(self, attr, val, root="ROOT"):
'''
Finds the View with the specified attribute and value
'''
if DEBUG:
try:
print >> sys.stderr, u'findViewWithAttribute({0}, {1}, {2})'.format(attr, unicode(val, encoding='utf-8', errors='replace'), root)
except:
pass
print >> sys.stderr, " findViewWithAttribute: type(val)=", type(val)
return self.__findViewWithAttributeInTree(attr, val, root)
def findViewsWithAttribute(self, attr, val, root="ROOT"):
'''
Finds the Views with the specified attribute and value.
This allows you to see all items that match your criteria in the view hierarchy
Usage:
buttons = v.findViewsWithAttribute("class", "android.widget.Button")
'''
return self.__findViewsWithAttributeInTree(attr, val, root)
def findViewWithAttributeOrRaise(self, attr, val, root="ROOT"):
'''
Finds the View or raise a ViewNotFoundException.
@return: the View found
@raise ViewNotFoundException: raise the exception if View not found
'''
view = self.findViewWithAttribute(attr, val, root)
if view:
return view
else:
raise ViewNotFoundException(attr, val, root)
def findViewWithAttributeThatMatches(self, attr, regex, root="ROOT"):
'''
Finds the list of Views with the specified attribute matching
regex
'''
return self.__findViewWithAttributeInTreeThatMatches(attr, regex, root)
def findViewWithText(self, text, root="ROOT"):
if DEBUG:
try:
print >>sys.stderr, '''findViewWithText({0}, {1})'''.format(text, root)
print >> sys.stderr, " findViewWithText: type(text)=", type(text)
except:
pass
if isinstance(text, RegexType):
return self.findViewWithAttributeThatMatches(self.textProperty, text, root)
#l = self.findViewWithAttributeThatMatches(TEXT_PROPERTY, text)
#ll = len(l)
#if ll == 0:
# return None
#elif ll == 1:
# return l[0]
#else:
# print >>sys.stderr, "WARNING: findViewWithAttributeThatMatches invoked by findViewWithText returns %d items." % ll
# return l
else:
return self.findViewWithAttribute(self.textProperty, text, root)
def findViewWithTextOrRaise(self, text, root="ROOT"):
'''
Finds the View or raise a ViewNotFoundException.
@return: the View found
@raise ViewNotFoundException: raise the exception if View not found
'''
if DEBUG:
print >>sys.stderr, "findViewWithTextOrRaise(%s, %s)" % (text, root)
view = self.findViewWithText(text, root)
if view:
return view
else:
raise ViewNotFoundException("text", text, root)
def findViewWithContentDescription(self, contentdescription, root="ROOT"):
'''
Finds the View with the specified content description
'''
return self.__findViewWithAttributeInTree('content-desc', contentdescription, root)
def findViewWithContentDescriptionOrRaise(self, contentdescription, root="ROOT"):
'''
Finds the View with the specified content description
'''
return self.__findViewWithAttributeInTreeOrRaise('content-desc', contentdescription, root)
def findViewsContainingPoint(self, (x, y), _filter=None):
'''
Finds the list of Views that contain the point (x, y).
'''
if not _filter:
_filter = lambda v: True
return [v for v in self.views if (v.containsPoint((x,y)) and _filter(v))]
def getViewIds(self):
'''
@deprecated: Use L{getViewsById} instead.
Returns the Views map.
'''
return self.viewsById
def getViewsById(self):
'''
Returns the Views map. The keys are C{uniqueIds} and the values are C{View}s.
'''
return self.viewsById
def __getFocusedWindowPosition(self):
return self.__getFocusedWindowId()
def getSdkVersion(self):
'''
Gets the SDK version.
'''
return self.build[VERSION_SDK_PROPERTY]
def isKeyboardShown(self):
'''
Whether the keyboard is displayed.
'''
return self.device.isKeyboardShown()
def writeImageToFile(self, filename, _format="PNG", deviceart=None, dropshadow=True, screenglare=True):
'''
Write the View image to the specified filename in the specified format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by the serialno of the device and
format extension.
@type _format: str
@param _format: Image format (default format is PNG)
'''
filename = self.device.substituteDeviceTemplate(filename)
if not os.path.isabs(filename):
raise ValueError("writeImageToFile expects an absolute path (filename='%s')" % filename)
if os.path.isdir(filename):
filename = os.path.join(filename, self.serialno + '.' + _format.lower())
if DEBUG:
print >> sys.stderr, "writeImageToFile: saving image to '%s' in %s format (reconnect=%s)" % (filename, _format, self.device.reconnect)
image = self.device.takeSnapshot(reconnect=self.device.reconnect)
if deviceart:
if 'STUDIO_DIR' in os.environ:
PLUGIN_DIR = 'plugins/android/lib/device-art-resources'
osName = platform.system()
if osName == 'Darwin':
deviceArtDir = os.environ['STUDIO_DIR'] + '/Contents/' + PLUGIN_DIR
else:
deviceArtDir = os.environ['STUDIO_DIR'] + '/' + PLUGIN_DIR
# FIXME: should parse XML
deviceArtXml = deviceArtDir + '/device-art.xml'
if not os.path.exists(deviceArtXml):
warnings.warn("Cannot find device art definition file")
# <device id="nexus_5" name="Nexus 5">
# <orientation name="port" size="1370,2405" screenPos="144,195" screenSize="1080,1920" shadow="port_shadow.png" back="port_back.png" lights="port_fore.png"/>
# <orientation name="land" size="2497,1235" screenPos="261,65" screenSize="1920,1080" shadow="land_shadow.png" back="land_back.png" lights="land_fore.png"/>
# </device>
orientation = self.display['orientation']
if orientation == 0 or orientation == 2:
orientationName = 'port'
elif orientation == 1 or orientation == 3:
orientationName = 'land'
else:
warnings.warn("Unknown orientation=" + orientation)
orientationName = 'port'
separator = '_'
if deviceart == 'auto':
hardware = self.device.getProperty('ro.hardware')
if hardware == 'hammerhead':
deviceart = 'nexus_5'
elif hardware == 'mako':
deviceart = 'nexus_4'
elif hardware == 'grouper':
deviceart = 'nexus_7' # 2012
elif hardware == 'flo':
deviceart = 'nexus_7_2013'
elif hardware in ['mt5861', 'mt5890']:
deviceart = 'tv_1080p'
elif hardware == 'universal5410':
deviceart = 'samsung_s4'
SUPPORTED_DEVICES = ['nexus_5', 'nexus_4', 'nexus_7', 'nexus_7_2013', 'tv_1080p', 'samsung_s4']
if deviceart not in SUPPORTED_DEVICES:
warnings.warn("Only %s is supported now, more devices coming soon" % SUPPORTED_DEVICES)
if deviceart == 'auto':
# it wasn't detected yet, let's assume generic phone
deviceart = 'phone'
screenSize = None
if deviceart == 'nexus_5':
if orientationName == 'port':
screenPos = (144, 195)
else:
screenPos = (261, 65)
elif deviceart == 'nexus_4':
if orientationName == 'port':
screenPos = (94, 187)
else:
screenPos = (257, 45)
elif deviceart == 'nexus_7': # 2012
if orientationName == 'port':
screenPos = (142, 190)
else:
screenPos = (260, 105)
elif deviceart == 'nexus_7_2013':
if orientationName == 'port':
screenPos = (130, 201)
screenSize = (800, 1280)
else:
screenPos = (282, 80)
screenSize = (1280, 800)
elif deviceart == 'tv_1080p':
screenPos = (85, 59)
orientationName = ''
separator = ''
elif deviceart == 'samsung_s4':
if orientationName == 'port':
screenPos = (76, 220)
screenSize = (1078, 1902) # FIXME: (1080, 1920) is the original size
else:
screenPos = (0, 0)
elif deviceart == 'phone':
if orientationName == 'port':
screenPos = (113, 93)
screenSize = (343, 46) # 46?, this is in device-art.xml
else:
screenPos = (141, 36)
screenSize = (324, 255)
deviceArtModelDir = deviceArtDir + '/' + deviceart
if not os.path.isdir(deviceArtModelDir):
warnings.warn("Cannot find device art for " + deviceart + ' at ' + deviceArtModelDir)
try:
from PIL import Image
if dropshadow:
dropShadowImage = Image.open(deviceArtModelDir + '/%s%sshadow.png' % (orientationName, separator))
deviceBack = Image.open(deviceArtModelDir + '/%s%sback.png' % (orientationName, separator))
if dropshadow:
dropShadowImage.paste(deviceBack, (0, 0), deviceBack)
deviceBack = dropShadowImage
if screenSize:
image = image.resize(screenSize, Image.ANTIALIAS)
deviceBack.paste(image, screenPos)
if screenglare:
screenGlareImage = Image.open(deviceArtModelDir + '/%s%sfore.png' % (orientationName, separator))
deviceBack.paste(screenGlareImage, (0, 0), screenGlareImage)
image = deviceBack
except ImportError as ex:
warnings.warn('''PIL or Pillow is needed for image manipulation
On Ubuntu install
$ sudo apt-get install python-imaging python-imaging-tk
On OSX install
$ brew install homebrew/python/pillow
''')
else:
warnings.warn("ViewClient.writeImageToFile: Cannot add device art because STUDIO_DIR environment variable was not set")
image.save(filename, _format)
@staticmethod
def writeViewImageToFileInDir(view):
'''
Write the View image to the directory specified in C{ViewClient.imageDirectory}.
@type view: View
@param view: The view
'''
if not ViewClient.imageDirectory:
raise RuntimeError('You must set ViewClient.imageDiretory in order to use this method')
view.writeImageToFile(ViewClient.imageDirectory)
@staticmethod
def __pickleable(tree):
'''
Makes the tree pickleable.
'''
def removeDeviceReference(view):
'''
Removes the reference to a L{MonkeyDevice}.
'''
view.device = None
###########################################################################################
# FIXME: Unfortunatelly deepcopy does not work with MonkeyDevice objects, which is
# sadly the reason why we cannot pickle the tree and we need to remove the MonkeyDevice
# references.
# We wanted to copy the tree to preserve the original and make piclkleable the copy.
#treeCopy = copy.deepcopy(tree)
treeCopy = tree
# IMPORTANT:
# This assumes that the first element in the list is the tree root
ViewClient.__traverse(treeCopy[0], transform=removeDeviceReference)
###########################################################################################
return treeCopy
def distanceTo(self, tree):
'''
Calculates the distance between the current state and the tree passed as argument.
@type tree: list of Views
@param tree: Tree of Views
@return: the distance
'''
return ViewClient.distance(ViewClient.__pickleable(self.views), tree)
@staticmethod
def distance(tree1, tree2):
'''
Calculates the distance between the two trees.
@type tree1: list of Views
@param tree1: Tree of Views
@type tree2: list of Views
@param tree2: Tree of Views
@return: the distance
'''
################################################################
#FIXME: this should copy the entire tree and then transform it #
################################################################
pickleableTree1 = ViewClient.__pickleable(tree1)
pickleableTree2 = ViewClient.__pickleable(tree2)
s1 = pickle.dumps(pickleableTree1)
s2 = pickle.dumps(pickleableTree2)
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: calculating distance between", s1[:20], "and", s2[:20]
l1 = len(s1)
l2 = len(s2)
t = float(max(l1, l2))
if l1 == l2:
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: trees have same length, using Hamming distance"
return ViewClient.__hammingDistance(s1, s2)/t
else:
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: trees have different length, using Levenshtein distance"
return ViewClient.__levenshteinDistance(s1, s2)/t
@staticmethod
def __hammingDistance(s1, s2):
'''
Finds the Hamming distance between two strings.
@param s1: string
@param s2: string
@return: the distance
@raise ValueError: if the lenght of the strings differ
'''
l1 = len(s1)
l2 = len(s2)
if l1 != l2:
raise ValueError("Hamming distance requires strings of same size.")
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
def hammingDistance(self, tree):
'''
Finds the Hamming distance between this tree and the one passed as argument.
'''
s1 = ' '.join(map(View.__str__, self.views))
s2 = ' '.join(map(View.__str__, tree))
return ViewClient.__hammingDistance(s1, s2)
@staticmethod
def __levenshteinDistance(s, t):
'''
Find the Levenshtein distance between two Strings.
Python version of Levenshtein distance method implemented in Java at
U{http://www.java2s.com/Code/Java/Data-Type/FindtheLevenshteindistancebetweentwoStrings.htm}.
This is the number of changes needed to change one String into
another, where each change is a single character modification (deletion,
insertion or substitution).
The previous implementation of the Levenshtein distance algorithm
was from U{http://www.merriampark.com/ld.htm}
Chas Emerick has written an implementation in Java, which avoids an OutOfMemoryError
which can occur when my Java implementation is used with very large strings.
This implementation of the Levenshtein distance algorithm
is from U{http://www.merriampark.com/ldjava.htm}::
StringUtils.getLevenshteinDistance(null, *) = IllegalArgumentException
StringUtils.getLevenshteinDistance(*, null) = IllegalArgumentException
StringUtils.getLevenshteinDistance("","") = 0
StringUtils.getLevenshteinDistance("","a") = 1
StringUtils.getLevenshteinDistance("aaapppp", "") = 7
StringUtils.getLevenshteinDistance("frog", "fog") = 1
StringUtils.getLevenshteinDistance("fly", "ant") = 3
StringUtils.getLevenshteinDistance("elephant", "hippo") = 7
StringUtils.getLevenshteinDistance("hippo", "elephant") = 7
StringUtils.getLevenshteinDistance("hippo", "zzzzzzzz") = 8
StringUtils.getLevenshteinDistance("hello", "hallo") = 1
@param s: the first String, must not be null
@param t: the second String, must not be null
@return: result distance
@raise ValueError: if either String input C{null}
'''
if s is None or t is None:
raise ValueError("Strings must not be null")
n = len(s)
m = len(t)
if n == 0:
return m
elif m == 0:
return n
if n > m:
tmp = s
s = t
t = tmp
n = m;
m = len(t)
p = [None]*(n+1)
d = [None]*(n+1)
for i in range(0, n+1):
p[i] = i
for j in range(1, m+1):
if DEBUG_DISTANCE:
if j % 100 == 0:
print >>sys.stderr, "DEBUG:", int(j/(m+1.0)*100),"%\r",
t_j = t[j-1]
d[0] = j
for i in range(1, n+1):
cost = 0 if s[i-1] == t_j else 1
# minimum of cell to the left+1, to the top+1, diagonally left and up +cost
d[i] = min(min(d[i-1]+1, p[i]+1), p[i-1]+cost)
_d = p
p = d
d = _d
if DEBUG_DISTANCE:
print >> sys.stderr, "\n"
return p[n]
def levenshteinDistance(self, tree):
'''
Finds the Levenshtein distance between this tree and the one passed as argument.
'''
s1 = ' '.join(map(View.__microStr__, self.views))
s2 = ' '.join(map(View.__microStr__, tree))
return ViewClient.__levenshteinDistance(s1, s2)
@staticmethod
def excerpt(_str, execute=False):
code = Excerpt2Code().Parse(_str)
if execute:
exec code
else:
return code
class ConnectedDevice:
def __init__(self, device, vc, serialno):
self.device = device
self.vc = vc
self.serialno = serialno
class CulebraOptions:
'''
Culebra options helper class
'''
HELP = 'help'
VERBOSE = 'verbose'
VERSION = 'version'
IGNORE_SECURE_DEVICE = 'ignore-secure-device'
IGNORE_VERSION_CHECK = 'ignore-version-check'
FORCE_VIEW_SERVER_USE = 'force-view-server-use' # Same a ViewClientOptions.FORCE_VIEW_SERVER_USE but with dashes
DO_NOT_START_VIEW_SERVER = 'do-not-start-view-server'
DO_NOT_IGNORE_UIAUTOMATOR_KILLED = 'do-not-ignore-uiautomator-killed'
FIND_VIEWS_BY_ID = 'find-views-by-id'
FIND_VIEWS_WITH_TEXT = 'find-views-with-text'
FIND_VIEWS_WITH_CONTENT_DESCRIPTION = 'find-views-with-content-description'
USE_REGEXPS = 'use-regexps'
VERBOSE_COMMENTS = 'verbose-comments'
UNIT_TEST_CLASS = 'unit-test-class'
UNIT_TEST_METHOD = 'unit-test-method'
USE_JAR = 'use-jar'
USE_DICTIONARY = 'use-dictionary'
DICTIONARY_KEYS_FROM = 'dictionary-keys-from'
AUTO_REGEXPS = 'auto-regexps'
START_ACTIVITY = 'start-activity'
OUTPUT = 'output'
INTERACTIVE = 'interactive'
WINDOW = 'window'
APPEND_TO_SYS_PATH = 'append-to-sys-path'
PREPEND_TO_SYS_PATH = 'prepend-to-sys-path'
SAVE_SCREENSHOT = 'save-screenshot'
SAVE_VIEW_SCREENSHOTS = 'save-view-screenshots'
GUI = 'gui'
SCALE = 'scale'
DO_NOT_VERIFY_SCREEN_DUMP = 'do-not-verify-screen-dump'
ORIENTATION_LOCKED = 'orientation-locked'
SERIALNO = 'serialno'
MULTI_DEVICE = 'multi-device'
LOG_ACTIONS = 'log-actions'
DEVICE_ART = 'device-art'
DROP_SHADOW = 'drop-shadow'
SCREEN_GLARE = 'glare'
NULL_BACK_END = 'null-back-end'
USE_UIAUTOMATOR_HELPER = 'use-uiautomator-helper'
CONCERTINA = 'concertina'
SHORT_OPTS = 'HVvIEFSkw:i:t:d:rCUM:j:D:K:R:a:o:pf:W:GuP:Os:mLA:ZB0hc'
LONG_OPTS = [HELP, VERBOSE, VERSION, IGNORE_SECURE_DEVICE, IGNORE_VERSION_CHECK, FORCE_VIEW_SERVER_USE,
DO_NOT_START_VIEW_SERVER,
DO_NOT_IGNORE_UIAUTOMATOR_KILLED,
WINDOW + '=',
FIND_VIEWS_BY_ID + '=', FIND_VIEWS_WITH_TEXT + '=', FIND_VIEWS_WITH_CONTENT_DESCRIPTION + '=',
USE_REGEXPS, VERBOSE_COMMENTS, UNIT_TEST_CLASS, UNIT_TEST_METHOD + '=',
USE_JAR + '=', USE_DICTIONARY + '=', DICTIONARY_KEYS_FROM + '=', AUTO_REGEXPS + '=',
START_ACTIVITY + '=',
OUTPUT + '=', PREPEND_TO_SYS_PATH,
SAVE_SCREENSHOT + '=', SAVE_VIEW_SCREENSHOTS + '=',
GUI,
DO_NOT_VERIFY_SCREEN_DUMP,
SCALE + '=',
ORIENTATION_LOCKED,
SERIALNO + '=',
MULTI_DEVICE,
LOG_ACTIONS,
DEVICE_ART + '=', DROP_SHADOW, SCREEN_GLARE,
NULL_BACK_END,
USE_UIAUTOMATOR_HELPER,
CONCERTINA
]
LONG_OPTS_ARG = {WINDOW: 'WINDOW',
FIND_VIEWS_BY_ID: 'BOOL', FIND_VIEWS_WITH_TEXT: 'BOOL', FIND_VIEWS_WITH_CONTENT_DESCRIPTION: 'BOOL',
USE_JAR: 'BOOL', USE_DICTIONARY: 'BOOL', DICTIONARY_KEYS_FROM: 'VALUE', AUTO_REGEXPS: 'LIST',
START_ACTIVITY: 'COMPONENT',
OUTPUT: 'FILENAME',
SAVE_SCREENSHOT: 'FILENAME', SAVE_VIEW_SCREENSHOTS: 'DIR',
UNIT_TEST_METHOD: 'NAME',
SCALE: 'FLOAT',
SERIALNO: 'LIST',
DEVICE_ART: 'MODEL'}
OPTS_HELP = {
'H': 'prints this help',
'V': 'verbose comments',
'v': 'prints version number and exists',
'k': 'don\'t ignore UiAutomator killed',
'w': 'use WINDOW content (default: -1, all windows)',
'i': 'whether to use findViewById() in script',
't': 'whether to use findViewWithText() in script',
'd': 'whether to use findViewWithContentDescription',
'r': 'use regexps in matches',
'U': 'generates unit test class and script',
'M': 'generates unit test method. Can be used with or without -U',
'j': 'use jar and appropriate shebang to run script (deprecated)',
'D': 'use a dictionary to store the Views found',
'K': 'dictionary keys from: id, text, content-description',
'R': 'auto regexps (i.e. clock), implies -r. help list options',
'a': 'starts Activity before dump',
'o': 'output filename',
'p': 'prepend environment variables values to sys.path',
'f': 'save screenshot to file',
'W': 'save View screenshots to files in directory',
'E': 'ignores ADB version check',
'G': 'presents the GUI (EXPERIMENTAL)',
'P': 'scale percentage (i.e. 0.5)',
'u': 'do not verify screen state after dump',
'O': 'orientation locked in generated test',
's': 'device serial number (can be more than 1)',
'm': 'enables multi-device test generation',
'L': 'log actions using logcat',
'A': 'device art model to frame screenshot (auto: autodetected)',
'Z': 'drop shadow for device art screenshot',
'B': 'screen glare over screenshot',
'0': 'use a null back-end (no View tree obtained)',
'h': 'use UiAutomatorHelper',
'c': 'enable concertina mode (EXPERIMENTAL)'
}
class CulebraTestCase(unittest.TestCase):
'''
The base class for all CulebraTests.
Class variables
---------------
There are some class variables that can be used to change the behavior of the tests.
B{serialno}: The serial number of the device. This can also be a list of devices for I{mutli-devices}
tests or the keyword C{all} to run the tests on all available devices or C{default} to run the tests
only on the default (first) device.
When a I{multi-device} test is running the available devices are available in a list named
L{self.devices} which has the corresponding L{ConnectedDevices} entries.
Also, in the case of I{multi-devices} tests and to be backward compatible with I{single-device} tests
the default device, the first one in the devices list, is assigned to L{self.device}, L{self.vc} and
L{self.serialno} too.
B{verbose}: The verbosity of the tests. This can be changed from the test command line using the
command line option C{-v} or C{--verbose}.
'''
kwargs1 = None
kwargs2 = None
devices = None
''' The list of connected devices '''
defaultDevice = None
''' The default L{ConnectedDevice}. Set to the first one found for multi-device cases '''
serialno = None
''' The default connected device C{serialno} '''
device = None
''' The default connected device '''
vc = None
''' The default connected device C{ViewClient} '''
verbose = False
options = {}
@classmethod
def setUpClass(cls):
cls.kwargs1 = {'ignoreversioncheck': False, 'verbose': False, 'ignoresecuredevice': False}
cls.kwargs2 = {'startviewserver': True, 'forceviewserveruse': False, 'autodump': False, 'ignoreuiautomatorkilled': True}
def __init__(self, methodName='runTest'):
self.Log = CulebraTestCase.__Log(self)
unittest.TestCase.__init__(self, methodName=methodName)
def setUp(self):
__devices = None
if self.serialno:
# serialno can be 1 serialno, multiple serialnos, 'all' or 'default'
if self.serialno.lower() == 'all':
__devices = [d.serialno for d in adbclient.AdbClient().getDevices()]
elif self.serialno.lower() == 'default':
__devices = [adbclient.AdbClient().getDevices()[0].serialno]
else:
__devices = self.serialno.split()
if len(__devices) > 1:
self.devices = __devices
# FIXME: both cases should be unified
if self.devices:
__devices = self.devices
self.devices = []
for serialno in __devices:
device, serialno = ViewClient.connectToDeviceOrExit(serialno=serialno, **self.kwargs1)
if self.options[CulebraOptions.START_ACTIVITY]:
device.startActivity(component=self.options[CulebraOptions.START_ACTIVITY])
vc = ViewClient(device, serialno, **self.kwargs2)
self.devices.append(ConnectedDevice(serialno=serialno, device=device, vc=vc))
# Select the first devices as default
self.defaultDevice = self.devices[0]
self.device = self.defaultDevice.device
self.serialno = self.defaultDevice.serialno
self.vc = self.defaultDevice.vc
else:
self.devices = []
if __devices:
# A list containing only one device was specified
self.serialno = __devices[0]
self.device, self.serialno = ViewClient.connectToDeviceOrExit(serialno=self.serialno, **self.kwargs1)
if self.options[CulebraOptions.START_ACTIVITY]:
self.device.startActivity(component=self.options[CulebraOptions.START_ACTIVITY])
self.vc = ViewClient(self.device, self.serialno, **self.kwargs2)
# Set the default device, to be consistent with multi-devices case
self.devices.append(ConnectedDevice(serialno=self.serialno, device=self.device, vc=self.vc))
def tearDown(self):
pass
def preconditions(self):
if self.options[CulebraOptions.ORIENTATION_LOCKED] is not None:
# If orientation locked was set to a valid orientation value then use it to compare
# against current orientation (when the test is run)
return (self.device.display['orientation'] == self.options[CulebraOptions.ORIENTATION_LOCKED])
return True
def isTestRunningOnMultipleDevices(self):
return (len(self.devices) > 1)
@staticmethod
def __passAll(arg):
return True
def all(self, arg, _filter=None):
# CulebraTestCase.__passAll cannot be specified as the default argument value
if _filter is None:
_filter = CulebraTestCase.__passAll
if DEBUG_MULTI:
print >> sys.stderr, "all(%s, %s)" % (arg, _filter)
l = (getattr(d, arg) for d in self.devices)
for i in l:
print >> sys.stderr, " i=", i
return filter(_filter, (getattr(d, arg) for d in self.devices))
def allVcs(self, _filter=None):
return self.all('vc', _filter)
def allDevices(self, _filter=None):
return self.all('device', _filter)
def allSerialnos(self, _filter=None):
return self.all('serialno', _filter)
def log(self, message, priority='D'):
'''
Logs a message with the specified priority.
'''
self.device.log('CULEBRA', message, priority, CulebraTestCase.verbose)
class __Log():
'''
Log class to simulate C{android.util.Log}
'''
def __init__(self, culebraTestCase):
self.culebraTestCase = culebraTestCase
def __getattr__(self, attr):
'''
Returns the corresponding log method or @C{AttributeError}.
'''
if attr in ['v', 'd', 'i', 'w', 'e']:
return lambda message: self.culebraTestCase.log(message, priority=attr.upper())
raise AttributeError(self.__class__.__name__ + ' has no attribute "%s"' % attr)
@staticmethod
def main():
# If you want to specify tests classes and methods in the command line you will be forced
# to include -s or --serialno and the serial number of the device (could be a regexp)
# as ViewClient would have no way of determine what it is.
# This could be also a list of devices (delimited by whitespaces) and in such case all of
# them will be used.
# The special argument 'all' means all the connected devices.
ser = ['-s', '--serialno']
old = '%(failfast)'
new = ' %s s The serial number[s] to connect to or \'all\'\n%s' % (', '.join(ser), old)
unittest.TestProgram.USAGE = unittest.TestProgram.USAGE.replace(old, new)
argsToRemove = []
i = 0
while i < len(sys.argv):
a = sys.argv[i]
if a in ['-v', '--verbose']:
# make CulebraTestCase.verbose the same as unittest verbose
CulebraTestCase.verbose = True
elif a in ser:
# remove arguments not handled by unittest
if len(sys.argv) > (i+1):
argsToRemove.append(sys.argv[i])
CulebraTestCase.serialno = sys.argv[i+1]
argsToRemove.append(CulebraTestCase.serialno)
i += 1
else:
raise RuntimeError('serial number missing')
i += 1
for a in argsToRemove:
sys.argv.remove(a)
unittest.main()
if __name__ == "__main__":
try:
vc = ViewClient(None)
except:
print "%s: Don't expect this to do anything" % __file__
|
akimo12345/AndroidViewClient
|
src/com/dtmilano/android/viewclient.py
|
Python
|
apache-2.0
| 183,943 | 0.007245 |
from django.conf import settings
from django.db.models import Q
from libs.django_utils import render_to_response
from django.views.generic import ListView
from springboard.models import IntranetApplication
from django.contrib.auth.decorators import login_required
from alerts.models import Alert
class SpringBoard(ListView):
context_object_name = "applications"
template_name = "springboard/springboard.html"
def get_queryset(self):
# Check the groups the user is allowed to see
return IntranetApplication.objects.filter(Q(groups__in = self.request.user.groups.all()) | Q(groups__isnull=True)).distinct()
def get_context_data(self, **kwargs):
# Temporary message for testing
from django.contrib import messages
# Call the base implementation first to get a context
context = super(SpringBoard, self).get_context_data(**kwargs)
# Get all the alerts for the user
context['alerts'] = Alert.objects.filter(sent_to = self.request.user)
return context
|
instituteofdesign/django-lms
|
apps/springboard/views.py
|
Python
|
bsd-3-clause
| 1,051 | 0.005709 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class GoogleAppSetup(Document):
pass
|
saurabh6790/google_integration
|
google_integration/google_connect/doctype/google_app_setup/google_app_setup.py
|
Python
|
mit
| 280 | 0.007143 |
from logan.runner import run_app, configure_app
import sys
import base64
import os
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
from fabric_bolt.core.settings.base import *
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'fabric-bolt.db'),
'USER': 'sqlite3',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
SECRET_KEY = %(default_key)r
"""
def generate_settings():
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def configure():
configure_app(
project='fabric-bolt',
default_config_path='~/.fabric-bolt/settings.py',
default_settings='fabric_bolt.core.settings.base',
settings_initializer=generate_settings,
settings_envvar='FABRIC_BOLT_CONF',
)
def main(progname=sys.argv[0]):
run_app(
project='fabric-bolt',
default_config_path='~/.fabric-bolt/settings.py',
default_settings='fabric_bolt.core.settings.base',
settings_initializer=generate_settings,
settings_envvar='FABRIC_BOLT_CONF',
)
if __name__ == '__main__':
main()
|
brajput24/fabric-bolt
|
fabric_bolt/utils/runner.py
|
Python
|
mit
| 1,251 | 0.000799 |
# -*- coding: utf-8 -*-
from __future__ import division
# -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, re
# If your extensions are in another directory, add it here.
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SpiffWorkflow'
copyright = '2012 ' + ', '.join(open('../AUTHORS').readlines())
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import SpiffWorkflow
version = SpiffWorkflow.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'sphinxdoc.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['figures']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps page names to templates.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
html_additional_pages = {'index': 'index.html'}
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
html_use_opensearch = 'http://sphinx.pocoo.org'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sphinxdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('contents', 'sphinx.tex', 'Sphinx Documentation',
'Georg Brandl', 'manual', 1)]
latex_logo = '_static/sphinx.png'
#latex_use_parts = True
# Additional stuff for the LaTeX preamble.
latex_elements = {
'fontpkg': '\\usepackage{palatino}'
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Extension interface
# -------------------
from sphinx import addnodes
dir_sig_re = re.compile(r'\.\. ([^:]+)::(.*)$')
def parse_directive(env, sig, signode):
if not sig.startswith('.'):
dec_sig = '.. %s::' % sig
signode += addnodes.desc_name(dec_sig, dec_sig)
return sig
m = dir_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
dec_name = '.. %s::' % name
signode += addnodes.desc_name(dec_name, dec_name)
signode += addnodes.desc_addname(args, args)
return name
def parse_role(env, sig, signode):
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.ext.autodoc import cut_lines
app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_description_unit('directive', 'dir', 'pair: %s; directive', parse_directive)
app.add_description_unit('role', 'role', 'pair: %s; role', parse_role)
app.add_description_unit('confval', 'confval', 'pair: %s; configuration value')
app.add_description_unit('event', 'event', 'pair: %s; event', parse_event)
|
zetaops/SpiffWorkflow
|
doc/conf.py
|
Python
|
lgpl-3.0
| 5,907 | 0.003894 |
# -*- coding: utf-8 -*-
import unittest
class PilhaVaziaErro(Exception):
pass
class Pilha():
def __init__(self):
self.lista = []
def topo(self):
if self.lista:
return self.lista[-1]
raise PilhaVaziaErro()
def vazia(self):
return not bool(self.lista)
def empilhar(self, valor):
self.lista.append(valor)
def desempilhar(self):
try:
return self.lista.pop()
except IndexError:
raise PilhaVaziaErro
def esta_balanceada(expressao):
"""
Função que calcula se expressão possui parenteses, colchetes e chaves balanceados
O Aluno deverá informar a complexidade de tempo e espaço da função
Deverá ser usada como estrutura de dados apenas a pilha feita na aula anterior
:param expressao: string com expressao a ser balanceada
:return: boleano verdadeiro se expressao está balanceada e falso caso contrário
Complexidade
Tempo: O(n)
Memoria: O(n)
"""
if expressao:
pilha = Pilha()
if expressao[0] in '}])':
return False
for i in expressao:
if i in '{[(':
pilha.empilhar(i)
elif i in '}])':
if i=='}' and pilha.desempilhar() != '{':
return False
elif i==']' and pilha.desempilhar() != '[':
return False
elif i==')' and pilha.desempilhar() != '(':
return False
if pilha.vazia():
return True
return False
else:
return True
class BalancearTestes(unittest.TestCase):
def test_expressao_vazia(self):
self.assertTrue(esta_balanceada(''))
def test_parenteses(self):
self.assertTrue(esta_balanceada('()'))
def test_chaves(self):
self.assertTrue(esta_balanceada('{}'))
def test_colchetes(self):
self.assertTrue(esta_balanceada('[]'))
def test_todos_caracteres(self):
self.assertTrue(esta_balanceada('({[]})'))
self.assertTrue(esta_balanceada('[({})]'))
self.assertTrue(esta_balanceada('{[()]}'))
def test_chave_nao_fechada(self):
self.assertFalse(esta_balanceada('{'))
def test_colchete_nao_fechado(self):
self.assertFalse(esta_balanceada('['))
def test_parentese_nao_fechado(self):
self.assertFalse(esta_balanceada('('))
def test_chave_nao_aberta(self):
self.assertFalse(esta_balanceada('}{'))
def test_colchete_nao_aberto(self):
self.assertFalse(esta_balanceada(']['))
def test_parentese_nao_aberto(self):
self.assertFalse(esta_balanceada(')('))
def test_falta_de_caracter_de_fechamento(self):
self.assertFalse(esta_balanceada('({[]}'))
def test_falta_de_caracter_de_abertura(self):
self.assertFalse(esta_balanceada('({]})'))
def test_expressao_matematica_valida(self):
self.assertTrue(esta_balanceada('({[1+3]*5}/7)+9'))
|
trgomes/estrutura-de-dados
|
Exercicios/5-balanceamento.py
|
Python
|
mit
| 3,019 | 0.002659 |
import os
import sys
from setuptools import setup, find_packages
version = '0.3.3'
def get_package_manifest(filename):
packages = []
with open(filename) as package_file:
for line in package_file.readlines():
line = line.strip()
if not line:
continue
if line.startswith('#'):
# comment
continue
if line.startswith('-e '):
# not a valid package
continue
packages.append(line)
return packages
def get_install_requires():
"""
:returns: A list of packages required for installation.
"""
return get_package_manifest('requirements.txt')
def get_tests_requires():
"""
:returns: A list of packages required for running the tests.
"""
packages = get_package_manifest('requirements_dev.txt')
try:
from unittest import mock
except ImportError:
packages.append('mock')
if sys.version_info[:2] < (2, 7):
packages.append('unittest2')
return packages
def read(f):
with open(os.path.join(os.path.dirname(__file__), f)) as f:
return f.read().strip()
setup(
name='sockjs-gevent',
version=version,
description=('gevent base sockjs server'),
long_description='\n\n'.join((read('README.md'), read('CHANGES.txt'))),
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Internet :: WWW/HTTP",
'Topic :: Internet :: WWW/HTTP :: WSGI'
],
author='Nick Joyce',
author_email='nick.joyce@realkinetic.com',
url='https://github.com/njoyce/sockjs-gevent',
license='MIT',
install_requires=get_install_requires(),
tests_require=get_tests_requires(),
setup_requires=['nose>=1.0'],
test_suite='nose.collector',
include_package_data = True,
packages=find_packages(exclude=["examples", "tests"]),
zip_safe = False,
)
|
njoyce/sockjs-gevent
|
setup.py
|
Python
|
mit
| 2,151 | 0.00186 |
################################ FIG J.2 P.683 ################################
import matplotlib.pyplot as plt
def freqplot(fdata, ydata, symbol='', ttl='', xlab='Frequency (Hz)', ylab=''):
""" FREQPLOT - Plot a function of frequency. See myplot for more features."""
#not sure what this means
#if nargin<2, fdata=0:length(ydata)-1; end
plt.plot(fdata, ydata, symbol);
plt.grid()
plt.title(ttl)
plt.ylabel(ylab)
plt.xlabel(xlab);
|
vberthiaume/digitalFilters
|
ch2/freqplot.py
|
Python
|
gpl-3.0
| 469 | 0.01919 |
# Copyright 2013. Amazon Web Services, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import application
import unittest
from application import application
from flask import Flask, current_app, request, Response
""" Main test cases for our application """
class AppTestCase(unittest.TestCase):
#application = Flask(__name__)
def setUp(self):
application.testing = True
with application.app_context():
self.client = current_app.test_client()
def test_load_config(self):
""" Test that we can load our config properly """
self.assertTrue(1)
def test_get_test(self):
""" Test hitting /test and that we get a correct HTTP response """
self.assertTrue(1)
def test_get_form(self):
""" Test that we can get a signup form """
self.assertTrue(1)
def test_get_user(self):
""" Test that we can get a user context """
self.assertTrue(1)
def test_login(self):
""" Test that we can authenticate as a user """
self.assertTrue(1)
if __name__ == '__main__':
unittest.main()
|
baida21/py-flask-signup
|
tests/application-tests.py
|
Python
|
apache-2.0
| 1,642 | 0.004263 |
# -*- coding: utf-8 -*-
#
# Misaka documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 12 11:37:42 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Misaka'
copyright = u'2011-2017, Frank Smit'
author = u'Frank Smit'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.0'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {
# '**': [
# 'about.html',
# # 'navigation.html',
# # 'relations.html',
# # 'searchbox.html',
# # 'donate.html',
# ]
# }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Misakadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Misaka.tex', u'Misaka Documentation',
u'Frank Smit', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'misaka', u'Misaka Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Misaka', u'Misaka Documentation',
author, 'Misaka', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Extras ---------------------------------------------------------------
def setup(app):
app.add_stylesheet('customizations.css')
|
hepochen/hoedown_misaka
|
docs/conf.py
|
Python
|
mit
| 9,639 | 0.005602 |
# Copyright (C) 2016 - Yevgen Muntyan
# Copyright (C) 2016 - Ignacio Casal Quinteiro
# Copyright (C) 2016 - Arnavion
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from gvsbuild.utils.base_builders import Meson
from gvsbuild.utils.base_expanders import Tarball
from gvsbuild.utils.base_project import project_add
@project_add
class Graphene(Tarball, Meson):
def __init__(self):
Meson.__init__(
self,
"graphene",
archive_url="https://github.com/ebassi/graphene/archive/refs/tags/1.10.6.tar.gz",
hash="7eba972751d404316a9b59a7c1e0782de263c3cf9dd5ebf1503ba9b8354cc948",
dependencies=["ninja", "meson", "pkg-config", "glib"],
)
if self.opts.enable_gi:
self.add_dependency("gobject-introspection")
enable_gi = "enabled"
else:
enable_gi = "disabled"
self.add_param("-Dintrospection={}".format(enable_gi))
def build(self):
Meson.build(self, make_tests=True)
self.install(r".\LICENSE share\doc\graphene")
|
wingtk/gvsbuild
|
gvsbuild/projects/graphene.py
|
Python
|
gpl-2.0
| 1,663 | 0.001203 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Скрипт извлекает слова из текстового файла и сортирует их по частоте.
# С помощью модуля pymorphy2 можно привести слова к начальной форме (единственное число, именительный падеж).
# Нужен pymorphy2 и русскоязычный словарь для него!
# pip install --user pymorphy2
# Примеры:
# ./wordfreq-morph.py ./text-file.txt | less
# xclip -o | ./wordfreq-morph.py -m
# Проверялся на интерпретаторе:
# Python 3.6.1 on linux
import sys
import sqlite3
import os
import re
import argparse
# Сортировка вывода словарей:
from collections import OrderedDict
#------------------------------------------------------------------------------
# Опции:
# Проверочный морфологический словарь (в каталоге скрипта):
NORMAL_DICT_PATH = 'dict.opencorpora-sing-nom.txt'
NORMAL_DICT_DIR = 'word-length-dicts'
database_name = 'opencorpora-sing-nom.sqlite'
#-------------------------------------------------------------------------
# Аргументы командной строки:
def create_parser():
"""Список доступных параметров скрипта."""
parser = argparse.ArgumentParser()
parser.add_argument('file',
nargs='*',
help='Русскоязычный текстовый файл в UTF-8'
)
parser.add_argument('-m', '--morph',
action='store_true', default='False',
help='Преобразование слов в начальную форму (нужен pymorphy2)'
)
return parser
#-------------------------------------------------------------------------
# Функции:
def metadict_path (metadict_dir):
"""Возвращает абсолютный путь к каталогу словарей."""
# Получаем абсолютный путь к каталогу скрипта:
script_path = os.path.dirname(os.path.abspath(__file__))
# Добавляем к пути каталог словарей:
metadict_path = script_path + '/' + metadict_dir
return metadict_path
def find_files (directory):
"""Возвращает список путей ко всем файлам каталога, включая подкаталоги."""
path_f = []
for d, dirs, files in os.walk(directory):
for f in files:
# Формирование адреса:
path = os.path.join(d,f)
# Добавление адреса в список:
path_f.append(path)
return path_f
def lowercase (text):
"""Создаёт из текста список слов в нижнем регистре"""
# Переводим текст в нижний регистр:
text = str(text.lower())
# Регексп вытаскивает из текста слова:
words = re.findall(r"(\w+)", text, re.UNICODE)
# Восстанавливаются ссылки:
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
words = words + urls
return words
def wordfreq_old (words):
"""Создаёт словарь с частотой слов"""
stats = {}
# Слово -- ключ словаря, значение, это его частота:
for word in words:
stats[word] = stats.get(word, 0) + 1
return stats
def word_test_slow (word):
"""Светяет слово со словарём, выбирая словарь по длине слова."""
# Определяем длину слова:
search_string = '-' + str(len(word)) + '.txt'
dicts_list = find_files(metadict_path(NORMAL_DICT_DIR))
test = False
# Подключаем словарь для проверки:
for dict in dicts_list:
if search_string in dict:
normal_dict_file = open(dict, "r")
normal_dict = normal_dict_file.read()
normal_dict_file.close()
if word in normal_dict:
return True
else:
return False
def word_test_sql (word,cursor):
"""Проверяет, есть ли слово в базе данных"""
# Номер таблицы, это длина слова:
word_lenght = len(word)
# А вот не нужно хардкодить (число таблиц в базе данных может измениться)
if word_lenght > 32:
word_lenght = 32
table_name = 'opencorpora' + str(word_lenght)
#database = sqlite3.connect(metadict_path(database_name))
#cursor = database.cursor()
cursor.execute("SELECT words FROM "+table_name+" WHERE words=?",(word,))
result = cursor.fetchall()
#database.close()
if result:
return True
else:
return False
def wordfreq_morph (words):
"""Создаёт словарь с частотой слов (в начальной форме)"""
# Морфологический анализатор:
import pymorphy2
stats = {}
n_stats = {}
for word in words:
stats[word] = stats.get(word, 0) + 1
morph = pymorphy2.MorphAnalyzer()
for item in stats:
# Слово приводится к начальной форме:
n_word = morph.parse(item)[0].normal_form
# Неологизмы оставляем без изменений:
if word_test_sql(n_word,cursor) is not True:
n_word = item
# Создаётся новый ключ, или прибавляется значение к существующему:
if n_word not in n_stats:
n_stats[n_word] = stats[item]
else:
n_stats[n_word] = n_stats[n_word] + stats[item]
return n_stats
def dict_sort (stats):
"""Сортировка словаря по частоте и алфавиту"""
stats_sort = OrderedDict(sorted(stats.items(), key=lambda x: x[0], reverse=False))
stats_list = OrderedDict(sorted(stats_sort.items(), key=lambda x: x[1], reverse=True))
return stats_list
#-------------------------------------------------------------------------
# Тело программы:
# Создаётся список аргументов скрипта:
parser = create_parser()
namespace = parser.parse_args()
# Проверяем, существует ли указанный файл:
file_patch = ' '.join(namespace.file)
if namespace.file is not None and os.path.exists(file_patch):
file = open(file_patch, "r")
text = file.read()
file.close()
# Если нет, читаем стандартный ввод:
else:
text = sys.stdin.read()
# Извлекаем из текста слова:
words = lowercase(text)
# Подключение к базе данных:
database = sqlite3.connect(metadict_path(database_name))
cursor = database.cursor()
# Если указано преобразование слов:
if namespace.morph is True:
wordfreq = wordfreq_morph(words)
else:
wordfreq = wordfreq_old(words)
# Отключаемся от базы данных:
database.close()
# Вывод словаря:
wordfreq_sort=dict_sort(wordfreq)
for word, count in wordfreq_sort.items():
print (count, word)
|
Shadybloom/synch-profiler
|
wordfreq-morph.py
|
Python
|
mit
| 7,594 | 0.006508 |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, OneLogin, Inc.
# All rights reserved.
from base64 import b64decode
import json
from lxml import etree
from os.path import dirname, join, exists
import unittest
from xml.dom.minidom import parseString
from onelogin.saml2 import compat
from onelogin.saml2.constants import OneLogin_Saml2_Constants
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from onelogin.saml2.utils import OneLogin_Saml2_Utils
class OneLogin_Saml2_Utils_Test(unittest.TestCase):
data_path = join(dirname(__file__), '..', '..', '..', 'data')
def loadSettingsJSON(self, filename=None):
if filename:
filename = join(dirname(__file__), '..', '..', '..', 'settings', filename)
else:
filename = join(dirname(__file__), '..', '..', '..', 'settings', 'settings1.json')
if exists(filename):
stream = open(filename, 'r')
settings = json.load(stream)
stream.close()
return settings
else:
raise Exception('Settings json file does not exist')
def file_contents(self, filename):
f = open(filename, 'r')
content = f.read()
f.close()
return content
def testFormatCert(self):
"""
Tests the format_cert method of the OneLogin_Saml2_Utils
"""
settings_info = self.loadSettingsJSON()
cert = settings_info['idp']['x509cert']
self.assertNotIn('-----BEGIN CERTIFICATE-----', cert)
self.assertNotIn('-----END CERTIFICATE-----', cert)
self.assertEqual(len(cert), 860)
formated_cert1 = OneLogin_Saml2_Utils.format_cert(cert)
self.assertIn('-----BEGIN CERTIFICATE-----', formated_cert1)
self.assertIn('-----END CERTIFICATE-----', formated_cert1)
formated_cert2 = OneLogin_Saml2_Utils.format_cert(cert, True)
self.assertEqual(formated_cert1, formated_cert2)
formated_cert3 = OneLogin_Saml2_Utils.format_cert(cert, False)
self.assertNotIn('-----BEGIN CERTIFICATE-----', formated_cert3)
self.assertNotIn('-----END CERTIFICATE-----', formated_cert3)
self.assertEqual(len(formated_cert3), 860)
def testFormatPrivateKey(self):
"""
Tests the format_private_key method of the OneLogin_Saml2_Utils
"""
key = "-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKBgQDivbhR7P516x/S3BqKxupQe0LONoliupiBOesCO3SHbDrl3+q9\nIbfnfmE04rNuMcPsIxB161TdDpIesLCn7c8aPHISKOtPlAeTZSnb8QAu7aRjZq3+\nPbrP5uW3TcfCGPtKTytHOge/OlJbo078dVhXQ14d1EDwXJW1rRXuUt4C8QIDAQAB\nAoGAD4/Z4LWVWV6D1qMIp1Gzr0ZmdWTE1SPdZ7Ej8glGnCzPdguCPuzbhGXmIg0V\nJ5D+02wsqws1zd48JSMXXM8zkYZVwQYIPUsNn5FetQpwxDIMPmhHg+QNBgwOnk8J\nK2sIjjLPL7qY7Itv7LT7Gvm5qSOkZ33RCgXcgz+okEIQMYkCQQDzbTOyDL0c5WQV\n6A2k06T/azdhUdGXF9C0+WkWSfNaovmTgRXh1G+jMlr82Snz4p4/STt7P/XtyWzF\n3pkVgZr3AkEA7nPjXwHlttNEMo6AtxHd47nizK2NUN803ElIUT8P9KSCoERmSXq6\n6PDekGNic4ldpsSvOeYCk8MAYoDBy9kvVwJBAMLgX4xg6lzhv7hR5+pWjTb1rIY6\nrCHbrPfU264+UZXz9v2BT/VUznLF81WMvStD9xAPHpFS6R0OLghSZhdzhI0CQQDL\n8Duvfxzrn4b9QlmduV8wLERoT6rEVxKLsPVz316TGrxJvBZLk/cV0SRZE1cZf4uk\nXSWMfEcJ/0Zt+LdG1CqjAkEAqwLSglJ9Dy3HpgMz4vAAyZWzAxvyA1zW0no9GOLc\nPQnYaNUN/Fy2SYtETXTb0CQ9X1rt8ffkFP7ya+5TC83aMg==\n-----END RSA PRIVATE KEY-----\n"
formated_key = OneLogin_Saml2_Utils.format_private_key(key, True)
self.assertIn('-----BEGIN RSA PRIVATE KEY-----', formated_key)
self.assertIn('-----END RSA PRIVATE KEY-----', formated_key)
self.assertEqual(len(formated_key), 891)
formated_key = OneLogin_Saml2_Utils.format_private_key(key, False)
self.assertNotIn('-----BEGIN RSA PRIVATE KEY-----', formated_key)
self.assertNotIn('-----END RSA PRIVATE KEY-----', formated_key)
self.assertEqual(len(formated_key), 816)
key_2 = "-----BEGIN PRIVATE KEY-----\nMIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAM62buSW9Zgh7CmZ\nouJekK0ac9sgEZkspemjv7SyE6Hbdz+KmUr3C7MI6JuPfVyJbxvMDf3FbgBBK7r5\nyfGgehXwplLMZj8glvV3NkdLMLPWmaw9U5sOzRoym46pVvsEo1PUL2qDK5Wrsm1g\nuY1KIDSHL59NQ7PzDKgm1dxioeXFAgMBAAECgYA/fvRzTReloo3rfWD2Tfv84EpE\nPgaJ2ZghO4Zwl97F8icgIo/R4i760Lq6xgnI+gJiNHz7vcB7XYl0RrRMf3HgbA7z\npJxREmOVltESDHy6lH0TmCdv9xMmHltB+pbGOhqBvuGgFbEOR73lDDV0ln2rEITJ\nA2zjYF+hWe8b0JFeQQJBAOsIIIlHAMngjhCQDD6kla/vce972gCFU7ZeFw16ZMmb\n8W4rGRfQoQWYxSLAFIFsYewSBTccanyYbBNe3njki3ECQQDhJ4cgV6VpTwez4dkp\nU/xCHKoReedAEJhXucTNGpiIqu+TDgIz9aRbrgnUKkS1s06UJhcDRTl/+pCSRRt/\nCA2VAkBkPw4pn1hNwvK1S8t9OJQD+5xcKjZcvIFtKoqonAi7GUGL3OQSDVFw4q1K\n2iSk40aM+06wJ/WfeR+3z2ISrGBxAkAJ20YiF1QpcQlASbHNCl0vs7uKOlDyUAer\nR3mjFPf6e6kzQdi815MTZGIPxK3vWmMlPymgvgYPYTO1A4t5myulAkEA1QioAWcJ\noO26qhUlFRBCR8BMJoVPImV7ndVHE7usHdJvP7V2P9RyuRcMCTVul8RRmyoh/+yG\n4ghMaHo/v0YY5Q==\n-----END PRIVATE KEY-----\n"
formated_key_2 = OneLogin_Saml2_Utils.format_private_key(key_2, True)
self.assertIn('-----BEGIN PRIVATE KEY-----', formated_key_2)
self.assertIn('-----END PRIVATE KEY-----', formated_key_2)
self.assertEqual(len(formated_key_2), 916)
formated_key_2 = OneLogin_Saml2_Utils.format_private_key(key_2, False)
self.assertNotIn('-----BEGIN PRIVATE KEY-----', formated_key_2)
self.assertNotIn('-----END PRIVATE KEY-----', formated_key_2)
self.assertEqual(len(formated_key_2), 848)
key_3 = 'MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAM62buSW9Zgh7CmZouJekK0ac9sgEZkspemjv7SyE6Hbdz+KmUr3C7MI6JuPfVyJbxvMDf3FbgBBK7r5yfGgehXwplLMZj8glvV3NkdLMLPWmaw9U5sOzRoym46pVvsEo1PUL2qDK5Wrsm1guY1KIDSHL59NQ7PzDKgm1dxioeXFAgMBAAECgYA/fvRzTReloo3rfWD2Tfv84EpEPgaJ2ZghO4Zwl97F8icgIo/R4i760Lq6xgnI+gJiNHz7vcB7XYl0RrRMf3HgbA7zpJxREmOVltESDHy6lH0TmCdv9xMmHltB+pbGOhqBvuGgFbEOR73lDDV0ln2rEITJA2zjYF+hWe8b0JFeQQJBAOsIIIlHAMngjhCQDD6kla/vce972gCFU7ZeFw16ZMmb8W4rGRfQoQWYxSLAFIFsYewSBTccanyYbBNe3njki3ECQQDhJ4cgV6VpTwez4dkpU/xCHKoReedAEJhXucTNGpiIqu+TDgIz9aRbrgnUKkS1s06UJhcDRTl/+pCSRRt/CA2VAkBkPw4pn1hNwvK1S8t9OJQD+5xcKjZcvIFtKoqonAi7GUGL3OQSDVFw4q1K2iSk40aM+06wJ/WfeR+3z2ISrGBxAkAJ20YiF1QpcQlASbHNCl0vs7uKOlDyUAerR3mjFPf6e6kzQdi815MTZGIPxK3vWmMlPymgvgYPYTO1A4t5myulAkEA1QioAWcJoO26qhUlFRBCR8BMJoVPImV7ndVHE7usHdJvP7V2P9RyuRcMCTVul8RRmyoh/+yG4ghMaHo/v0YY5Q=='
formated_key_3 = OneLogin_Saml2_Utils.format_private_key(key_3, True)
self.assertIn('-----BEGIN RSA PRIVATE KEY-----', formated_key_3)
self.assertIn('-----END RSA PRIVATE KEY-----', formated_key_3)
self.assertEqual(len(formated_key_3), 924)
formated_key_3 = OneLogin_Saml2_Utils.format_private_key(key_3, False)
self.assertNotIn('-----BEGIN PRIVATE KEY-----', formated_key_3)
self.assertNotIn('-----END PRIVATE KEY-----', formated_key_3)
self.assertNotIn('-----BEGIN RSA PRIVATE KEY-----', formated_key_3)
self.assertNotIn('-----END RSA PRIVATE KEY-----', formated_key_3)
self.assertEqual(len(formated_key_3), 848)
def testRedirect(self):
"""
Tests the redirect method of the OneLogin_Saml2_Utils
"""
request_data = {
'http_host': 'example.com'
}
# Check relative and absolute
hostname = OneLogin_Saml2_Utils.get_self_host(request_data)
url = 'http://%s/example' % hostname
url2 = '/example'
target_url = OneLogin_Saml2_Utils.redirect(url, {}, request_data)
target_url2 = OneLogin_Saml2_Utils.redirect(url2, {}, request_data)
self.assertEqual(target_url, target_url2)
# Check that accept http/https and reject other protocols
url3 = 'https://%s/example?test=true' % hostname
url4 = 'ftp://%s/example' % hostname
target_url3 = OneLogin_Saml2_Utils.redirect(url3, {}, request_data)
self.assertIn('test=true', target_url3)
self.assertRaisesRegexp(Exception, 'Redirect to invalid URL',
OneLogin_Saml2_Utils.redirect, url4, {}, request_data)
# Review parameter prefix
parameters1 = {
'value1': 'a'
}
target_url5 = OneLogin_Saml2_Utils.redirect(url, parameters1, request_data)
self.assertEqual('http://%s/example?value1=a' % hostname, target_url5)
target_url6 = OneLogin_Saml2_Utils.redirect(url3, parameters1, request_data)
self.assertEqual('https://%s/example?test=true&value1=a' % hostname, target_url6)
# Review parameters
parameters2 = {
'alphavalue': 'a',
'numvaluelist': ['1', '2'],
'testing': None
}
target_url7 = OneLogin_Saml2_Utils.redirect(url, parameters2, request_data)
parameters2_decoded = {"alphavalue": "alphavalue=a", "numvaluelist": "numvaluelist[]=1&numvaluelist[]=2", "testing": "testing"}
parameters2_str = "&".join(parameters2_decoded[x] for x in parameters2)
self.assertEqual('http://%s/example?%s' % (hostname, parameters2_str), target_url7)
parameters3 = {
'alphavalue': 'a',
'emptynumvaluelist': [],
'numvaluelist': [''],
}
parameters3_decoded = {"alphavalue": "alphavalue=a", "numvaluelist": "numvaluelist[]="}
parameters3_str = "&".join((parameters3_decoded[x] for x in parameters3.keys() if x in parameters3_decoded))
target_url8 = OneLogin_Saml2_Utils.redirect(url, parameters3, request_data)
self.assertEqual('http://%s/example?%s' % (hostname, parameters3_str), target_url8)
def testGetselfhost(self):
"""
Tests the get_self_host method of the OneLogin_Saml2_Utils
"""
request_data = {}
self.assertRaisesRegexp(Exception, 'No hostname defined',
OneLogin_Saml2_Utils.get_self_host, request_data)
request_data = {
'server_name': 'example.com'
}
self.assertEqual('example.com', OneLogin_Saml2_Utils.get_self_host(request_data))
request_data = {
'http_host': 'example.com'
}
self.assertEqual('example.com', OneLogin_Saml2_Utils.get_self_host(request_data))
request_data = {
'http_host': 'example.com:443'
}
self.assertEqual('example.com', OneLogin_Saml2_Utils.get_self_host(request_data))
request_data = {
'http_host': 'example.com:ok'
}
self.assertEqual('example.com:ok', OneLogin_Saml2_Utils.get_self_host(request_data))
def testisHTTPS(self):
"""
Tests the is_https method of the OneLogin_Saml2_Utils
"""
request_data = {
'https': 'off'
}
self.assertFalse(OneLogin_Saml2_Utils.is_https(request_data))
request_data = {
'https': 'on'
}
self.assertTrue(OneLogin_Saml2_Utils.is_https(request_data))
request_data = {
'server_port': '80'
}
self.assertFalse(OneLogin_Saml2_Utils.is_https(request_data))
request_data = {
'server_port': '443'
}
self.assertTrue(OneLogin_Saml2_Utils.is_https(request_data))
def testGetSelfURLhost(self):
"""
Tests the get_self_url_host method of the OneLogin_Saml2_Utils
"""
request_data = {
'http_host': 'example.com'
}
self.assertEqual('http://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['server_port'] = '80'
self.assertEqual('http://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['server_port'] = '81'
self.assertEqual('http://example.com:81', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['server_port'] = '443'
self.assertEqual('https://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
del request_data['server_port']
request_data['https'] = 'on'
self.assertEqual('https://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['server_port'] = '444'
self.assertEqual('https://example.com:444', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['server_port'] = '443'
request_data['request_uri'] = ''
self.assertEqual('https://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['request_uri'] = '/'
self.assertEqual('https://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['request_uri'] = 'onelogin/'
self.assertEqual('https://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['request_uri'] = '/onelogin'
self.assertEqual('https://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data['request_uri'] = 'https://example.com/onelogin/sso'
self.assertEqual('https://example.com', OneLogin_Saml2_Utils.get_self_url_host(request_data))
request_data2 = {
'request_uri': 'example.com/onelogin/sso'
}
self.assertRaisesRegexp(Exception, 'No hostname defined',
OneLogin_Saml2_Utils.get_self_url_host, request_data2)
def testGetSelfURL(self):
"""
Tests the get_self_url method of the OneLogin_Saml2_Utils
"""
request_data = {
'http_host': 'example.com'
}
url = OneLogin_Saml2_Utils.get_self_url_host(request_data)
self.assertEqual(url, OneLogin_Saml2_Utils.get_self_url(request_data))
request_data['request_uri'] = ''
self.assertEqual(url, OneLogin_Saml2_Utils.get_self_url(request_data))
request_data['request_uri'] = '/'
self.assertEqual(url + '/', OneLogin_Saml2_Utils.get_self_url(request_data))
request_data['request_uri'] = 'index.html'
self.assertEqual(url + 'index.html', OneLogin_Saml2_Utils.get_self_url(request_data))
request_data['request_uri'] = '?index.html'
self.assertEqual(url + '?index.html', OneLogin_Saml2_Utils.get_self_url(request_data))
request_data['request_uri'] = '/index.html'
self.assertEqual(url + '/index.html', OneLogin_Saml2_Utils.get_self_url(request_data))
request_data['request_uri'] = '/index.html?testing'
self.assertEqual(url + '/index.html?testing', OneLogin_Saml2_Utils.get_self_url(request_data))
request_data['request_uri'] = '/test/index.html?testing'
self.assertEqual(url + '/test/index.html?testing', OneLogin_Saml2_Utils.get_self_url(request_data))
request_data['request_uri'] = 'https://example.com/testing'
self.assertEqual(url + '/testing', OneLogin_Saml2_Utils.get_self_url(request_data))
def testGetSelfURLNoQuery(self):
"""
Tests the get_self_url_no_query method of the OneLogin_Saml2_Utils
"""
request_data = {
'http_host': 'example.com',
'script_name': '/index.html'
}
url = OneLogin_Saml2_Utils.get_self_url_host(request_data) + request_data['script_name']
self.assertEqual(url, OneLogin_Saml2_Utils.get_self_url_no_query(request_data))
request_data['path_info'] = '/test'
self.assertEqual(url + '/test', OneLogin_Saml2_Utils.get_self_url_no_query(request_data))
def testGetSelfRoutedURLNoQuery(self):
"""
Tests the get_self_routed_url_no_query method of the OneLogin_Saml2_Utils
"""
request_data = {
'http_host': 'example.com',
'request_uri': '/example1/route?x=test',
'query_string': '?x=test'
}
url = OneLogin_Saml2_Utils.get_self_url_host(request_data) + '/example1/route'
self.assertEqual(url, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data))
request_data_2 = {
'http_host': 'example.com',
'request_uri': '',
}
url_2 = OneLogin_Saml2_Utils.get_self_url_host(request_data_2)
self.assertEqual(url_2, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_2))
request_data_3 = {
'http_host': 'example.com',
}
url_3 = OneLogin_Saml2_Utils.get_self_url_host(request_data_3)
self.assertEqual(url_3, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_3))
request_data_4 = {
'http_host': 'example.com',
'request_uri': '/example1/route/test/',
'query_string': '?invalid=1'
}
url_4 = OneLogin_Saml2_Utils.get_self_url_host(request_data_4) + '/example1/route/test/'
self.assertEqual(url_4, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_4))
request_data_5 = {
'http_host': 'example.com',
'request_uri': '/example1/route/test/',
'query_string': ''
}
url_5 = OneLogin_Saml2_Utils.get_self_url_host(request_data_5) + '/example1/route/test/'
self.assertEqual(url_5, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_5))
request_data_6 = {
'http_host': 'example.com',
'request_uri': '/example1/route/test/',
}
url_6 = OneLogin_Saml2_Utils.get_self_url_host(request_data_6) + '/example1/route/test/'
self.assertEqual(url_6, OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data_6))
def testGetStatus(self):
"""
Gets the status of a message
"""
xml = self.file_contents(join(self.data_path, 'responses', 'response1.xml.base64'))
xml = b64decode(xml)
dom = etree.fromstring(xml)
status = OneLogin_Saml2_Utils.get_status(dom)
self.assertEqual(OneLogin_Saml2_Constants.STATUS_SUCCESS, status['code'])
xml2 = self.file_contents(join(self.data_path, 'responses', 'invalids', 'status_code_responder.xml.base64'))
xml2 = b64decode(xml2)
dom2 = etree.fromstring(xml2)
status2 = OneLogin_Saml2_Utils.get_status(dom2)
self.assertEqual(OneLogin_Saml2_Constants.STATUS_RESPONDER, status2['code'])
self.assertEqual('', status2['msg'])
xml3 = self.file_contents(join(self.data_path, 'responses', 'invalids', 'status_code_responer_and_msg.xml.base64'))
xml3 = b64decode(xml3)
dom3 = etree.fromstring(xml3)
status3 = OneLogin_Saml2_Utils.get_status(dom3)
self.assertEqual(OneLogin_Saml2_Constants.STATUS_RESPONDER, status3['code'])
self.assertEqual('something_is_wrong', status3['msg'])
xml_inv = self.file_contents(join(self.data_path, 'responses', 'invalids', 'no_status.xml.base64'))
xml_inv = b64decode(xml_inv)
dom_inv = etree.fromstring(xml_inv)
self.assertRaisesRegexp(Exception, 'Missing Status on response',
OneLogin_Saml2_Utils.get_status, dom_inv)
xml_inv2 = self.file_contents(join(self.data_path, 'responses', 'invalids', 'no_status_code.xml.base64'))
xml_inv2 = b64decode(xml_inv2)
dom_inv2 = etree.fromstring(xml_inv2)
self.assertRaisesRegexp(Exception, 'Missing Status Code on response',
OneLogin_Saml2_Utils.get_status, dom_inv2)
def testParseDuration(self):
"""
Tests the parse_duration method of the OneLogin_Saml2_Utils
"""
duration = 'PT1393462294S'
timestamp = 1393876825
parsed_duration = OneLogin_Saml2_Utils.parse_duration(duration, timestamp)
self.assertEqual(2787339119, parsed_duration)
parsed_duration_2 = OneLogin_Saml2_Utils.parse_duration(duration)
self.assertTrue(parsed_duration_2 > parsed_duration)
invalid_duration = 'PT1Y'
self.assertRaisesRegexp(Exception, 'Unrecognised ISO 8601 date format',
OneLogin_Saml2_Utils.parse_duration, invalid_duration)
new_duration = 'P1Y1M'
parsed_duration_4 = OneLogin_Saml2_Utils.parse_duration(new_duration, timestamp)
self.assertEqual(1428091225, parsed_duration_4)
neg_duration = '-P14M'
parsed_duration_5 = OneLogin_Saml2_Utils.parse_duration(neg_duration, timestamp)
self.assertEqual(1357243225, parsed_duration_5)
def testParseSAML2Time(self):
"""
Tests the parse_SAML_to_time method of the OneLogin_Saml2_Utils
"""
time = 1386650371
saml_time = '2013-12-10T04:39:31Z'
self.assertEqual(time, OneLogin_Saml2_Utils.parse_SAML_to_time(saml_time))
self.assertRaisesRegexp(Exception, 'does not match format',
OneLogin_Saml2_Utils.parse_SAML_to_time, 'invalidSAMLTime')
# Now test if toolkit supports miliseconds
saml_time2 = '2013-12-10T04:39:31.120Z'
self.assertEqual(time, OneLogin_Saml2_Utils.parse_SAML_to_time(saml_time2))
def testParseTime2SAML(self):
"""
Tests the parse_time_to_SAML method of the OneLogin_Saml2_Utils
"""
time = 1386650371
saml_time = '2013-12-10T04:39:31Z'
self.assertEqual(saml_time, OneLogin_Saml2_Utils.parse_time_to_SAML(time))
self.assertRaisesRegexp(Exception, 'could not convert string to float',
OneLogin_Saml2_Utils.parse_time_to_SAML, 'invalidtime')
def testGetExpireTime(self):
"""
Tests the get_expire_time method of the OneLogin_Saml2_Utils
"""
self.assertEqual(None, OneLogin_Saml2_Utils.get_expire_time())
self.assertNotEqual(None, OneLogin_Saml2_Utils.get_expire_time('PT360000S'))
self.assertEqual('1291955971', OneLogin_Saml2_Utils.get_expire_time('PT360000S', '2010-12-10T04:39:31Z'))
self.assertEqual('1291955971', OneLogin_Saml2_Utils.get_expire_time('PT360000S', 1291955971))
self.assertNotEqual('3311642371', OneLogin_Saml2_Utils.get_expire_time('PT360000S', '2074-12-10T04:39:31Z'))
self.assertNotEqual('3311642371', OneLogin_Saml2_Utils.get_expire_time('PT360000S', 1418186371))
def testGenerateNameIdWithSPNameQualifier(self):
"""
Tests the generateNameId method of the OneLogin_Saml2_Utils
"""
name_id_value = 'ONELOGIN_ce998811003f4e60f8b07a311dc641621379cfde'
entity_id = 'http://stuff.com/endpoints/metadata.php'
name_id_format = 'urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified'
name_id = OneLogin_Saml2_Utils.generate_name_id(name_id_value, entity_id, name_id_format)
expected_name_id = '<saml:NameID SPNameQualifier="http://stuff.com/endpoints/metadata.php" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified">ONELOGIN_ce998811003f4e60f8b07a311dc641621379cfde</saml:NameID>'
self.assertEqual(expected_name_id, name_id)
settings_info = self.loadSettingsJSON()
x509cert = settings_info['idp']['x509cert']
key = OneLogin_Saml2_Utils.format_cert(x509cert)
name_id_enc = OneLogin_Saml2_Utils.generate_name_id(name_id_value, entity_id, name_id_format, key)
expected_name_id_enc = '<saml:EncryptedID><xenc:EncryptedData xmlns:xenc="http://www.w3.org/2001/04/xmlenc#" xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" Type="http://www.w3.org/2001/04/xmlenc#Element">\n<xenc:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#aes128-cbc"/>\n<dsig:KeyInfo xmlns:dsig="http://www.w3.org/2000/09/xmldsig#">\n<xenc:EncryptedKey>\n<xenc:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"/>\n<xenc:CipherData>\n<xenc:CipherValue>'
self.assertIn(expected_name_id_enc, name_id_enc)
def testGenerateNameIdWithSPNameQualifier(self):
"""
Tests the generateNameId method of the OneLogin_Saml2_Utils
"""
name_id_value = 'ONELOGIN_ce998811003f4e60f8b07a311dc641621379cfde'
name_id_format = 'urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified'
name_id = OneLogin_Saml2_Utils.generate_name_id(name_id_value, None, name_id_format)
expected_name_id = '<saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:unspecified">ONELOGIN_ce998811003f4e60f8b07a311dc641621379cfde</saml:NameID>'
self.assertEqual(expected_name_id, name_id)
settings_info = self.loadSettingsJSON()
x509cert = settings_info['idp']['x509cert']
key = OneLogin_Saml2_Utils.format_cert(x509cert)
name_id_enc = OneLogin_Saml2_Utils.generate_name_id(name_id_value, None, name_id_format, key)
expected_name_id_enc = '<saml:EncryptedID><xenc:EncryptedData xmlns:xenc="http://www.w3.org/2001/04/xmlenc#" xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" Type="http://www.w3.org/2001/04/xmlenc#Element">\n<xenc:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#aes128-cbc"/>\n<dsig:KeyInfo xmlns:dsig="http://www.w3.org/2000/09/xmldsig#">\n<xenc:EncryptedKey>\n<xenc:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"/>\n<xenc:CipherData>\n<xenc:CipherValue>'
self.assertIn(expected_name_id_enc, name_id_enc)
def testCalculateX509Fingerprint(self):
"""
Tests the calculateX509Fingerprint method of the OneLogin_Saml2_Utils
"""
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
cert_path = settings.get_cert_path()
key = self.file_contents(cert_path + 'sp.key')
cert = self.file_contents(cert_path + 'sp.crt')
self.assertEqual(None, OneLogin_Saml2_Utils.calculate_x509_fingerprint(key))
self.assertEqual('afe71c28ef740bc87425be13a2263d37971da1f9', OneLogin_Saml2_Utils.calculate_x509_fingerprint(cert))
self.assertEqual('afe71c28ef740bc87425be13a2263d37971da1f9', OneLogin_Saml2_Utils.calculate_x509_fingerprint(cert, 'sha1'))
self.assertEqual('c51cfa06c7a49767f6eab18238eae1c56708e29264da3d11f538a12cd2c357ba', OneLogin_Saml2_Utils.calculate_x509_fingerprint(cert, 'sha256'))
self.assertEqual('bc5826e6f9429247254bae5e3c650e6968a36a62d23075eb168134978d88600559c10830c28711b2c29c7947c0c2eb1d', OneLogin_Saml2_Utils.calculate_x509_fingerprint(cert, 'sha384'))
self.assertEqual('3db29251b97559c67988ea0754cb0573fc409b6f75d89282d57cfb75089539b0bbdb2dcd9ec6e032549ecbc466439d5992e18db2cf5494ca2fe1b2e16f348dff', OneLogin_Saml2_Utils.calculate_x509_fingerprint(cert, 'sha512'))
def testDeleteLocalSession(self):
"""
Tests the delete_local_session method of the OneLogin_Saml2_Utils
"""
global local_session_test
local_session_test = 1
OneLogin_Saml2_Utils.delete_local_session()
self.assertEqual(1, local_session_test)
dscb = lambda: self.session_cear()
OneLogin_Saml2_Utils.delete_local_session(dscb)
self.assertEqual(0, local_session_test)
def session_cear(self):
"""
Auxiliar method to test the delete_local_session method of the OneLogin_Saml2_Utils
"""
global local_session_test
local_session_test = 0
def testFormatFingerPrint(self):
"""
Tests the format_finger_print method of the OneLogin_Saml2_Utils
"""
finger_print_1 = 'AF:E7:1C:28:EF:74:0B:C8:74:25:BE:13:A2:26:3D:37:97:1D:A1:F9'
self.assertEqual('afe71c28ef740bc87425be13a2263d37971da1f9', OneLogin_Saml2_Utils.format_finger_print(finger_print_1))
finger_print_2 = 'afe71c28ef740bc87425be13a2263d37971da1f9'
self.assertEqual('afe71c28ef740bc87425be13a2263d37971da1f9', OneLogin_Saml2_Utils.format_finger_print(finger_print_2))
def testDecryptElement(self):
"""
Tests the decrypt_element method of the OneLogin_Saml2_Utils
"""
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
key = settings.get_sp_key()
xml_nameid_enc = b64decode(self.file_contents(join(self.data_path, 'responses', 'response_encrypted_nameid.xml.base64')))
dom_nameid_enc = etree.fromstring(xml_nameid_enc)
encrypted_nameid_nodes = dom_nameid_enc.find('.//saml:EncryptedID', namespaces=OneLogin_Saml2_Constants.NSMAP)
encrypted_data = encrypted_nameid_nodes[0]
decrypted_nameid = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key)
self.assertEqual('{%s}NameID' % OneLogin_Saml2_Constants.NS_SAML, decrypted_nameid.tag)
self.assertEqual('2de11defd199f8d5bb63f9b7deb265ba5c675c10', decrypted_nameid.text)
xml_assertion_enc = b64decode(self.file_contents(join(self.data_path, 'responses', 'valid_encrypted_assertion_encrypted_nameid.xml.base64')))
dom_assertion_enc = etree.fromstring(xml_assertion_enc)
encrypted_assertion_enc_nodes = dom_assertion_enc.find('.//saml:EncryptedAssertion', namespaces=OneLogin_Saml2_Constants.NSMAP)
encrypted_data_assert = encrypted_assertion_enc_nodes[0]
decrypted_assertion = OneLogin_Saml2_Utils.decrypt_element(encrypted_data_assert, key)
self.assertEqual('{%s}Assertion' % OneLogin_Saml2_Constants.NS_SAML, decrypted_assertion.tag)
self.assertEqual('_6fe189b1c241827773902f2b1d3a843418206a5c97', decrypted_assertion.get('ID'))
encrypted_nameid_nodes = decrypted_assertion.xpath('./saml:Subject/saml:EncryptedID', namespaces=OneLogin_Saml2_Constants.NSMAP)
encrypted_data = encrypted_nameid_nodes[0][0]
decrypted_nameid = OneLogin_Saml2_Utils.decrypt_element(encrypted_data, key)
self.assertEqual('{%s}NameID' % OneLogin_Saml2_Constants.NS_SAML, decrypted_nameid.tag)
self.assertEqual('457bdb600de717891c77647b0806ce59c089d5b8', decrypted_nameid.text)
key_2_file_name = join(self.data_path, 'misc', 'sp2.key')
f = open(key_2_file_name, 'r')
key2 = f.read()
f.close()
self.assertRaises(Exception, OneLogin_Saml2_Utils.decrypt_element, encrypted_data, key2)
key_3_file_name = join(self.data_path, 'misc', 'sp2.key')
f = open(key_3_file_name, 'r')
key3 = f.read()
f.close()
self.assertRaises(Exception, OneLogin_Saml2_Utils.decrypt_element, encrypted_data, key3)
xml_nameid_enc_2 = b64decode(self.file_contents(join(self.data_path, 'responses', 'invalids', 'encrypted_nameID_without_EncMethod.xml.base64')))
dom_nameid_enc_2 = etree.fromstring(xml_nameid_enc_2)
encrypted_nameid_nodes_2 = dom_nameid_enc_2.find('.//saml:EncryptedID', namespaces=OneLogin_Saml2_Constants.NSMAP)
encrypted_data_2 = encrypted_nameid_nodes_2[0]
self.assertRaises(Exception, OneLogin_Saml2_Utils.decrypt_element, encrypted_data_2, key)
xml_nameid_enc_3 = b64decode(self.file_contents(join(self.data_path, 'responses', 'invalids', 'encrypted_nameID_without_keyinfo.xml.base64')))
dom_nameid_enc_3 = etree.fromstring(xml_nameid_enc_3)
encrypted_nameid_nodes_3 = dom_nameid_enc_3.find('.//saml:EncryptedID', namespaces=OneLogin_Saml2_Constants.NSMAP)
encrypted_data_3 = encrypted_nameid_nodes_3[0]
self.assertRaises(Exception, OneLogin_Saml2_Utils.decrypt_element, encrypted_data_3, key)
def testAddSign(self):
"""
Tests the add_sign method of the OneLogin_Saml2_Utils
"""
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
key = settings.get_sp_key()
cert = settings.get_sp_cert()
xml_authn = b64decode(self.file_contents(join(self.data_path, 'requests', 'authn_request.xml.base64')))
xml_authn_signed = compat.to_string(OneLogin_Saml2_Utils.add_sign(xml_authn, key, cert))
self.assertIn('<ds:SignatureValue>', xml_authn_signed)
res = parseString(xml_authn_signed)
ds_signature = res.firstChild.firstChild.nextSibling.nextSibling
self.assertIn('ds:Signature', ds_signature.tagName)
xml_authn_dom = parseString(xml_authn)
xml_authn_signed_2 = compat.to_string(OneLogin_Saml2_Utils.add_sign(xml_authn_dom.toxml(), key, cert))
self.assertIn('<ds:SignatureValue>', xml_authn_signed_2)
res_2 = parseString(xml_authn_signed_2)
ds_signature_2 = res_2.firstChild.firstChild.nextSibling.nextSibling
self.assertIn('ds:Signature', ds_signature_2.tagName)
xml_authn_signed_3 = compat.to_string(OneLogin_Saml2_Utils.add_sign(xml_authn_dom.firstChild.toxml(), key, cert))
self.assertIn('<ds:SignatureValue>', xml_authn_signed_3)
res_3 = parseString(xml_authn_signed_3)
ds_signature_3 = res_3.firstChild.firstChild.nextSibling.nextSibling
self.assertIn('ds:Signature', ds_signature_3.tagName)
xml_authn_etree = etree.fromstring(xml_authn)
xml_authn_signed_4 = compat.to_string(OneLogin_Saml2_Utils.add_sign(xml_authn_etree, key, cert))
self.assertIn('<ds:SignatureValue>', xml_authn_signed_4)
res_4 = parseString(xml_authn_signed_4)
ds_signature_4 = res_4.firstChild.firstChild.nextSibling.nextSibling
self.assertIn('ds:Signature', ds_signature_4.tagName)
xml_authn_signed_5 = compat.to_string(OneLogin_Saml2_Utils.add_sign(xml_authn_etree, key, cert))
self.assertIn('<ds:SignatureValue>', xml_authn_signed_5)
res_5 = parseString(xml_authn_signed_5)
ds_signature_5 = res_5.firstChild.firstChild.nextSibling.nextSibling
self.assertIn('ds:Signature', ds_signature_5.tagName)
xml_logout_req = b64decode(self.file_contents(join(self.data_path, 'logout_requests', 'logout_request.xml.base64')))
xml_logout_req_signed = compat.to_string(OneLogin_Saml2_Utils.add_sign(xml_logout_req, key, cert))
self.assertIn('<ds:SignatureValue>', xml_logout_req_signed)
res_6 = parseString(xml_logout_req_signed)
ds_signature_6 = res_6.firstChild.firstChild.nextSibling.nextSibling
self.assertIn('ds:Signature', ds_signature_6.tagName)
xml_logout_res = b64decode(self.file_contents(join(self.data_path, 'logout_responses', 'logout_response.xml.base64')))
xml_logout_res_signed = compat.to_string(OneLogin_Saml2_Utils.add_sign(xml_logout_res, key, cert))
self.assertIn('<ds:SignatureValue>', xml_logout_res_signed)
res_7 = parseString(xml_logout_res_signed)
ds_signature_7 = res_7.firstChild.firstChild.nextSibling.nextSibling
self.assertIn('ds:Signature', ds_signature_7.tagName)
xml_metadata = self.file_contents(join(self.data_path, 'metadata', 'metadata_settings1.xml'))
xml_metadata_signed = compat.to_string(OneLogin_Saml2_Utils.add_sign(xml_metadata, key, cert))
self.assertIn('<ds:SignatureValue>', xml_metadata_signed)
res_8 = parseString(xml_metadata_signed)
ds_signature_8 = res_8.firstChild.firstChild.nextSibling.firstChild.nextSibling
self.assertIn('ds:Signature', ds_signature_8.tagName)
def testValidateSign(self):
"""
Tests the validate_sign method of the OneLogin_Saml2_Utils
"""
settings = OneLogin_Saml2_Settings(self.loadSettingsJSON())
idp_data = settings.get_idp_data()
cert = idp_data['x509cert']
settings_2 = OneLogin_Saml2_Settings(self.loadSettingsJSON('settings2.json'))
idp_data2 = settings_2.get_idp_data()
cert_2 = idp_data2['x509cert']
fingerprint_2 = OneLogin_Saml2_Utils.calculate_x509_fingerprint(cert_2)
fingerprint_2_256 = OneLogin_Saml2_Utils.calculate_x509_fingerprint(cert_2, 'sha256')
try:
self.assertFalse(OneLogin_Saml2_Utils.validate_sign('', cert))
except Exception as e:
self.assertEqual('Empty string supplied as input', str(e))
# expired cert
xml_metadata_signed = self.file_contents(join(self.data_path, 'metadata', 'signed_metadata_settings1.xml'))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_metadata_signed, cert))
# expired cert, verified it
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(xml_metadata_signed, cert, validatecert=True))
xml_metadata_signed_2 = self.file_contents(join(self.data_path, 'metadata', 'signed_metadata_settings2.xml'))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_metadata_signed_2, cert_2))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_metadata_signed_2, None, fingerprint_2))
xml_response_msg_signed = b64decode(self.file_contents(join(self.data_path, 'responses', 'signed_message_response.xml.base64')))
# expired cert
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_msg_signed, cert))
# expired cert, verified it
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(xml_response_msg_signed, cert, validatecert=True))
# modified cert
other_cert_path = join(dirname(__file__), '..', '..', '..', 'certs')
f = open(other_cert_path + '/certificate1', 'r')
cert_x = f.read()
f.close()
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(xml_response_msg_signed, cert_x))
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(xml_response_msg_signed, cert_x, validatecert=True))
xml_response_msg_signed_2 = b64decode(self.file_contents(join(self.data_path, 'responses', 'signed_message_response2.xml.base64')))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_msg_signed_2, cert_2))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_msg_signed_2, None, fingerprint_2))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_msg_signed_2, None, fingerprint_2, 'sha1'))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_msg_signed_2, None, fingerprint_2_256, 'sha256'))
xml_response_assert_signed = b64decode(self.file_contents(join(self.data_path, 'responses', 'signed_assertion_response.xml.base64')))
# expired cert
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_assert_signed, cert))
# expired cert, verified it
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(xml_response_assert_signed, cert, validatecert=True))
xml_response_assert_signed_2 = b64decode(self.file_contents(join(self.data_path, 'responses', 'signed_assertion_response2.xml.base64')))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_assert_signed_2, cert_2))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_assert_signed_2, None, fingerprint_2))
xml_response_double_signed = b64decode(self.file_contents(join(self.data_path, 'responses', 'double_signed_response.xml.base64')))
# expired cert
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_double_signed, cert))
# expired cert, verified it
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(xml_response_double_signed, cert, validatecert=True))
xml_response_double_signed_2 = b64decode(self.file_contents(join(self.data_path, 'responses', 'double_signed_response2.xml.base64')))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_double_signed_2, cert_2))
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(xml_response_double_signed_2, None, fingerprint_2))
dom = parseString(xml_response_msg_signed_2)
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(dom.toxml(), cert_2))
dom.firstChild.firstChild.firstChild.nodeValue = 'https://idp.example.com/simplesaml/saml2/idp/metadata.php'
dom.firstChild.getAttributeNode('ID').nodeValue = u'_34fg27g212d63k1f923845324475802ac0fc24530b'
# Reference validation failed
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(dom.toxml(), cert_2))
invalid_fingerprint = 'afe71c34ef740bc87434be13a2263d31271da1f9'
# Wrong fingerprint
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(xml_metadata_signed_2, None, invalid_fingerprint))
dom_2 = parseString(xml_response_double_signed_2)
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(dom_2.toxml(), cert_2))
dom_2.firstChild.firstChild.firstChild.nodeValue = 'https://example.com/other-idp'
# Modified message
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(dom_2.toxml(), cert_2))
dom_3 = parseString(xml_response_double_signed_2)
assert_elem_3 = dom_3.firstChild.firstChild.nextSibling.nextSibling.nextSibling
assert_elem_3.setAttributeNS(OneLogin_Saml2_Constants.NS_SAML, 'xmlns:saml', OneLogin_Saml2_Constants.NS_SAML)
self.assertTrue(OneLogin_Saml2_Utils.validate_sign(assert_elem_3.toxml(), cert_2))
no_signed = b64decode(self.file_contents(join(self.data_path, 'responses', 'invalids', 'no_signature.xml.base64')))
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(no_signed, cert))
no_key = b64decode(self.file_contents(join(self.data_path, 'responses', 'invalids', 'no_key.xml.base64')))
self.assertFalse(OneLogin_Saml2_Utils.validate_sign(no_key, cert))
|
pitbulk/python3-saml
|
tests/src/OneLogin/saml2_tests/utils_test.py
|
Python
|
bsd-3-clause
| 40,950 | 0.004274 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"PaymentModeEnum",},
)
class PaymentModeEnum(proto.Message):
r"""Container for enum describing possible payment modes.
"""
class PaymentMode(proto.Enum):
r"""Enum describing possible payment modes."""
UNSPECIFIED = 0
UNKNOWN = 1
CLICKS = 4
CONVERSION_VALUE = 5
CONVERSIONS = 6
GUEST_STAY = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v10/enums/types/payment_mode.py
|
Python
|
apache-2.0
| 1,172 | 0.000853 |
# -*- Mode: Python; test-case-name: -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
import time
from twisted.internet import reactor
from flumotion.common import log
# Minimum size to take in account when calculating mean file read
MIN_REQUEST_SIZE = 64 * 1024 + 1
# Statistics update period
STATS_UPDATE_PERIOD = 10
class RequestStatistics(object):
def __init__(self, serverStats):
self._stats = serverStats
self.bytesSent = 0L
self._stats._onRequestStart(self)
def onDataSent(self, size):
self.bytesSent += size
self._stats._onRequestDataSent(self, size)
def onCompleted(self, size):
self._stats._onRequestComplete(self, size)
class ServerStatistics(object):
_updater = None
_callId = None
def __init__(self):
now = time.time()
self.startTime = now
self.currentRequestCount = 0
self.totalRequestCount = 0
self.requestCountPeak = 0
self.requestCountPeakTime = now
self.finishedRequestCount = 0
self.totalBytesSent = 0L
# Updated by a call to the update method
self.meanRequestCount = 0
self.currentRequestRate = 0
self.requestRatePeak = 0
self.requestRatePeakTime = now
self.meanRequestRate = 0.0
self.currentBitrate = 0
self.meanBitrate = 0
self.bitratePeak = 0
self.bitratePeakTime = now
self._fileReadRatios = 0.0
self._lastUpdateTime = now
self._lastRequestCount = 0
self._lastBytesSent = 0L
def startUpdates(self, updater):
self._updater = updater
self._set("bitrate-peak-time", self.bitratePeakTime)
self._set("request-rate-peak-time", self.requestRatePeakTime)
self._set("request-count-peak-time", self.requestCountPeakTime)
if self._callId is None:
self._callId = reactor.callLater(STATS_UPDATE_PERIOD, self._update)
def stopUpdates(self):
self._updater = None
if self._callId is not None:
self._callId.cancel()
self._callId = None
def getMeanFileReadRatio(self):
if self.finishedRequestCount > 0:
return self._fileReadRatios / self.finishedRequestCount
return 0.0
meanFileReadRatio = property(getMeanFileReadRatio)
def _update(self):
now = time.time()
updateDelta = now - self._lastUpdateTime
# Update average concurrent request
meanReqCount = self._updateAverage(self._lastUpdateTime, now,
self.meanRequestCount,
self.currentRequestCount)
# Calculate Request rate
countDiff = self.totalRequestCount - self._lastRequestCount
newReqRate = float(countDiff) / updateDelta
# Calculate average request rate
meanReqRate = self._updateAverage(self._lastUpdateTime, now,
self.currentRequestRate, newReqRate)
# Calculate current bitrate
bytesDiff = (self.totalBytesSent - self._lastBytesSent) * 8
newBitrate = bytesDiff / updateDelta
# calculate average bitrate
meanBitrate = self._updateAverage(self._lastUpdateTime, now,
self.currentBitrate, newBitrate)
# Update Values
self.meanRequestCount = meanReqCount
self.currentRequestRate = newReqRate
self.meanRequestRate = meanReqRate
self.currentBitrate = newBitrate
self.meanBitrate = meanBitrate
# Update the statistics keys with the new values
self._set("mean-request-count", meanReqCount)
self._set("current-request-rate", newReqRate)
self._set("mean-request-rate", meanReqRate)
self._set("current-bitrate", newBitrate)
self._set("mean-bitrate", meanBitrate)
# Update request rate peak
if newReqRate > self.requestRatePeak:
self.requestRatePeak = newReqRate
self.requestRatePeakTime = now
# update statistic keys
self._set("request-rate-peak", newReqRate)
self._set("request-rate-peak-time", now)
# Update bitrate peak
if newBitrate > self.bitratePeak:
self.bitratePeak = newBitrate
self.bitratePeakTime = now
# update statistic keys
self._set("bitrate-peak", newBitrate)
self._set("bitrate-peak-time", now)
# Update bytes read statistic key too
self._set("total-bytes-sent", self.totalBytesSent)
self._lastRequestCount = self.totalRequestCount
self._lastBytesSent = self.totalBytesSent
self._lastUpdateTime = now
# Log the stats
self._logStatsLine()
self._callId = reactor.callLater(STATS_UPDATE_PERIOD, self._update)
def _set(self, key, value):
if self._updater is not None:
self._updater.update(key, value)
def _onRequestStart(self, stats):
# Update counters
self.currentRequestCount += 1
self.totalRequestCount += 1
self._set("current-request-count", self.currentRequestCount)
self._set("total-request-count", self.totalRequestCount)
# Update concurrent request peak
if self.currentRequestCount > self.requestCountPeak:
now = time.time()
self.requestCountPeak = self.currentRequestCount
self.requestCountPeakTime = now
self._set("request-count-peak", self.currentRequestCount)
self._set("request-count-peak-time", now)
def _onRequestDataSent(self, stats, size):
self.totalBytesSent += size
def _onRequestComplete(self, stats, size):
self.currentRequestCount -= 1
self.finishedRequestCount += 1
self._set("current-request-count", self.currentRequestCount)
if (size > 0) and (stats.bytesSent > MIN_REQUEST_SIZE):
self._fileReadRatios += float(stats.bytesSent) / size
self._set("mean-file-read-ratio", self.meanFileReadRatio)
def _updateAverage(self, lastTime, newTime, lastValue, newValue):
lastDelta = lastTime - self.startTime
newDelta = newTime - lastTime
if lastDelta > 0:
delta = lastDelta + newDelta
before = (lastValue * lastDelta) / delta
after = (newValue * newDelta) / delta
return before + after
return lastValue
def _logStatsLine(self):
"""
Statistic fields names:
TRC: Total Request Count
CRC: Current Request Count
CRR: Current Request Rate
MRR: Mean Request Rate
FRR: File Read Ratio
MBR: Mean Bitrate
CBR: Current Bitrate
"""
log.debug("stats-http-server",
"TRC: %s; CRC: %d; CRR: %.2f; MRR: %.2f; "
"FRR: %.4f; MBR: %d; CBR: %d",
self.totalRequestCount, self.currentRequestCount,
self.currentRequestRate, self.meanRequestRate,
self.meanFileReadRatio, self.meanBitrate,
self.currentBitrate)
|
flumotion-mirror/flumotion
|
flumotion/component/misc/httpserver/serverstats.py
|
Python
|
lgpl-2.1
| 7,784 | 0 |
# -*- coding: utf-8 -*-
__all__ = ["inet_aton", "record_by_ip", "record_by_request", "get_ip",
"record_by_ip_as_dict", "record_by_request_as_dict"]
import struct
import socket
from geoip.defaults import BACKEND, REDIS_TYPE
from geoip.redis_wrapper import RedisClient
from geoip.models import Range
_RECORDS_KEYS = ('country', 'area', 'city', 'isp', 'provider')
def _from_redis(ip):
r = RedisClient()
data = r.zrangebyscore("geoip", ip, 'inf', 0, 1, withscores=True)
if not data:
return
res, score = data[0]
geo_id, junk, prefix = res.decode().split(":", 2)
if prefix == "s" and score > ip:
return
info = r.get("geoip:%s" % junk)
if info is not None:
return info.decode('utf-8', 'ignore').split(':')
def _from_db(ip):
obj = Range.objects.select_related().filter(
start_ip__lte=ip, end_ip__gte=ip
).order_by('end_ip', '-start_ip')[:1][0]
if REDIS_TYPE == 'pk':
return map(lambda k: str(getattr(obj, k).pk), _RECORDS_KEYS)
return map(lambda k: str(getattr(obj, k)), _RECORDS_KEYS)
def inet_aton(ip):
return struct.unpack('!L', socket.inet_aton(ip))[0]
def get_ip(request):
ip = request.META['REMOTE_ADDR']
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR'].split(',')[0]
return ip
def record_by_ip(ip):
return (_from_redis if BACKEND == 'redis' else _from_db)(inet_aton(ip))
def record_by_request(request):
return record_by_ip(get_ip(request))
def record_by_ip_as_dict(ip):
return dict(zip(_RECORDS_KEYS, record_by_ip(ip)))
def record_by_request_as_dict(request):
return dict(zip(_RECORDS_KEYS, record_by_ip(get_ip(request))))
|
gotlium/django-geoip-redis
|
geoip/geo.py
|
Python
|
gpl-3.0
| 1,723 | 0 |
# -*- coding: utf-8 -*-
"""Tests that use cross-checks for generic methods
Should be easy to check consistency across models
Does not cover tsa
Initial cases copied from test_shrink_pickle
Created on Wed Oct 30 14:01:27 2013
Author: Josef Perktold
"""
from statsmodels.compat.python import range
import numpy as np
import statsmodels.api as sm
from statsmodels.compat.scipy import NumpyVersion
from numpy.testing import assert_, assert_allclose
from nose import SkipTest
import platform
iswin = platform.system() == 'Windows'
npversionless15 = NumpyVersion(np.__version__) < '1.5.0'
winoldnp = iswin & npversionless15
class CheckGenericMixin(object):
def __init__(self):
self.predict_kwds = {}
@classmethod
def setup_class(self):
nobs = 500
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x)
self.exog = x
self.xf = 0.25 * np.ones((2, 4))
def test_ttest_tvalues(self):
# test that t_test has same results a params, bse, tvalues, ...
res = self.results
mat = np.eye(len(res.params))
tt = res.t_test(mat)
assert_allclose(tt.effect, res.params, rtol=1e-12)
# TODO: tt.sd and tt.tvalue are 2d also for single regressor, squeeze
assert_allclose(np.squeeze(tt.sd), res.bse, rtol=1e-10)
assert_allclose(np.squeeze(tt.tvalue), res.tvalues, rtol=1e-12)
assert_allclose(tt.pvalue, res.pvalues, rtol=5e-10)
assert_allclose(tt.conf_int(), res.conf_int(), rtol=1e-10)
# test params table frame returned by t_test
table_res = np.column_stack((res.params, res.bse, res.tvalues,
res.pvalues, res.conf_int()))
table1 = np.column_stack((tt.effect, tt.sd, tt.tvalue, tt.pvalue,
tt.conf_int()))
table2 = tt.summary_frame().values
assert_allclose(table2, table_res, rtol=1e-12)
# move this to test_attributes ?
assert_(hasattr(res, 'use_t'))
tt = res.t_test(mat[0])
tt.summary() # smoke test for #1323
assert_allclose(tt.pvalue, res.pvalues[0], rtol=5e-10)
def test_ftest_pvalues(self):
res = self.results
use_t = res.use_t
k_vars = len(res.params)
# check default use_t
pvals = [res.wald_test(np.eye(k_vars)[k], use_f=use_t).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# sutomatic use_f based on results class use_t
pvals = [res.wald_test(np.eye(k_vars)[k]).pvalue
for k in range(k_vars)]
assert_allclose(pvals, res.pvalues, rtol=5e-10, atol=1e-25)
# label for pvalues in summary
string_use_t = 'P>|z|' if use_t is False else 'P>|t|'
summ = str(res.summary())
assert_(string_use_t in summ)
# try except for models that don't have summary2
try:
summ2 = str(res.summary2())
except AttributeError:
summ2 = None
if summ2 is not None:
assert_(string_use_t in summ2)
# TODO The following is not (yet) guaranteed across models
#@knownfailureif(True)
def test_fitted(self):
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
raise SkipTest
res = self.results
fitted = res.fittedvalues
assert_allclose(res.model.endog - fitted, res.resid, rtol=1e-12)
assert_allclose(fitted, res.predict(), rtol=1e-12)
def test_predict_types(self):
res = self.results
# squeeze to make 1d for single regressor test case
p_exog = np.squeeze(np.asarray(res.model.exog[:2]))
# ignore wrapper for isinstance check
from statsmodels.genmod.generalized_linear_model import GLMResults
from statsmodels.discrete.discrete_model import DiscreteResults
# FIXME: work around GEE has no wrapper
if hasattr(self.results, '_results'):
results = self.results._results
else:
results = self.results
if (isinstance(results, GLMResults) or
isinstance(results, DiscreteResults)):
# SMOKE test only TODO
res.predict(p_exog)
res.predict(p_exog.tolist())
res.predict(p_exog[0].tolist())
else:
fitted = res.fittedvalues[:2]
assert_allclose(fitted, res.predict(p_exog), rtol=1e-12)
# this needs reshape to column-vector:
assert_allclose(fitted, res.predict(np.squeeze(p_exog).tolist()),
rtol=1e-12)
# only one prediction:
assert_allclose(fitted[:1], res.predict(p_exog[0].tolist()),
rtol=1e-12)
assert_allclose(fitted[:1], res.predict(p_exog[0]),
rtol=1e-12)
# predict doesn't preserve DataFrame, e.g. dot converts to ndarray
# import pandas
# predicted = res.predict(pandas.DataFrame(p_exog))
# assert_(isinstance(predicted, pandas.DataFrame))
# assert_allclose(predicted, fitted, rtol=1e-12)
######### subclasses for individual models, unchanged from test_shrink_pickle
# TODO: check if setup_class is faster than setup
class TestGenericOLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestGenericOLSOneExog(CheckGenericMixin):
# check with single regressor (no constant)
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog[:, 1]
np.random.seed(987689)
y = x + np.random.randn(x.shape[0])
self.results = sm.OLS(y, x).fit()
class TestGenericWLS(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.WLS(y, self.exog, weights=np.ones(len(y))).fit()
class TestGenericPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
model = sm.Poisson(y_count, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs',
disp=0)
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)
class TestGenericNegativeBinomial(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
np.random.seed(987689)
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
mod = sm.NegativeBinomial(data.endog, data.exog)
start_params = np.array([-0.0565406 , -0.21213599, 0.08783076,
-0.02991835, 0.22901974, 0.0621026,
0.06799283, 0.08406688, 0.18530969,
1.36645452])
self.results = mod.fit(start_params=start_params, disp=0)
class TestGenericLogit(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1.0 / (1 + np.exp(x.sum(1) - x.mean()))).astype(int)
model = sm.Logit(y_bin, x) #, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
# use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs', disp=0)
class TestGenericRLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.RLM(y, self.exog).fit()
class TestGenericGLM(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.GLM(y, self.exog).fit()
class TestGenericGEEPoisson(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# no sm. import
# vi = sm.dependence_structures.Independence()
from statsmodels.genmod.dependence_structures import Independence
vi = Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params)
class TestGenericGEEPoissonNaive(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# no sm. import
# vi = sm.dependence_structures.Independence()
from statsmodels.genmod.dependence_structures import Independence
vi = Independence()
family = sm.families.Poisson()
self.results = sm.GEE(y_count, self.exog, groups, family=family,
cov_struct=vi).fit(start_params=start_params,
cov_type='naive')
class TestGenericGEEPoissonBC(CheckGenericMixin):
def setup(self):
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
#y_count = np.random.poisson(np.exp(x.sum(1) - x.mean()))
y_count = np.random.poisson(np.exp(x.sum(1) - x.sum(1).mean(0)))
groups = np.random.randint(0, 4, size=x.shape[0])
# use start_params to speed up test, difficult convergence not tested
start_params = np.array([0., 1., 1., 1.])
# params_est = np.array([-0.0063238 , 0.99463752, 1.02790201, 0.98080081])
# no sm. import
# vi = sm.dependence_structures.Independence()
from statsmodels.genmod.dependence_structures import Independence
vi = Independence()
family = sm.families.Poisson()
mod = sm.GEE(y_count, self.exog, groups, family=family, cov_struct=vi)
self.results = mod.fit(start_params=start_params,
cov_type='bias_reduced')
if __name__ == '__main__':
pass
|
rgommers/statsmodels
|
statsmodels/base/tests/test_generic_methods.py
|
Python
|
bsd-3-clause
| 12,158 | 0.003208 |
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Replays web pages under simulated network conditions.
Must be run as administrator (sudo).
To record web pages:
1. Start the program in record mode.
$ sudo ./replay.py --record archive.wpr
2. Load the web pages you want to record in a web browser. It is important to
clear browser caches before this so that all subresources are requested
from the network.
3. Kill the process to stop recording.
To replay web pages:
1. Start the program in replay mode with a previously recorded archive.
$ sudo ./replay.py archive.wpr
2. Load recorded pages in a web browser. A 404 will be served for any pages or
resources not in the recorded archive.
Network simulation examples:
# 128KByte/s uplink bandwidth, 4Mbps/s downlink bandwidth with 100ms RTT time
$ sudo ./replay.py --up 128KByte/s --down 4Mbit/s --delay_ms=100 archive.wpr
# 1% packet loss rate
$ sudo ./replay.py --packet_loss_rate=0.01 archive.wpr
"""
import logging
import optparse
import os
import sys
import traceback
import cachemissarchive
import customhandlers
import dnsproxy
import httparchive
import httpclient
import httpproxy
import platformsettings
import replayspdyserver
import servermanager
import trafficshaper
if sys.version < '2.6':
print 'Need Python 2.6 or greater.'
sys.exit(1)
def configure_logging(log_level_name, log_file_name=None):
"""Configure logging level and format.
Args:
log_level_name: 'debug', 'info', 'warning', 'error', or 'critical'.
log_file_name: a file name
"""
if logging.root.handlers:
logging.critical('A logging method (e.g. "logging.warn(...)")'
' was called before logging was configured.')
log_level = getattr(logging, log_level_name.upper())
log_format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(level=log_level, format=log_format)
logger = logging.getLogger()
if log_file_name:
fh = logging.FileHandler(log_file_name)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter(log_format))
logger.addHandler(fh)
system_handler = platformsettings.get_system_logging_handler()
if system_handler:
logger.addHandler(system_handler)
def AddDnsForward(server_manager, host):
"""Forward DNS traffic."""
server_manager.Append(platformsettings.set_temporary_primary_nameserver, host)
def AddDnsProxy(server_manager, options, host, real_dns_lookup, http_archive):
dns_filters = []
if options.dns_private_passthrough:
private_filter = dnsproxy.PrivateIpFilter(real_dns_lookup, http_archive)
dns_filters.append(private_filter)
server_manager.AppendRecordCallback(private_filter.InitializeArchiveHosts)
server_manager.AppendReplayCallback(private_filter.InitializeArchiveHosts)
if options.shaping_dns:
delay_filter = dnsproxy.DelayFilter(options.record, **options.shaping_dns)
dns_filters.append(delay_filter)
server_manager.AppendRecordCallback(delay_filter.SetRecordMode)
server_manager.AppendReplayCallback(delay_filter.SetReplayMode)
server_manager.Append(dnsproxy.DnsProxyServer, host,
dns_lookup=dnsproxy.ReplayDnsLookup(host, dns_filters))
def AddWebProxy(server_manager, options, host, real_dns_lookup, http_archive,
cache_misses):
inject_script = httpclient.GetInjectScript(options.inject_scripts)
custom_handlers = customhandlers.CustomHandlers(options.screenshot_dir)
if options.spdy:
assert not options.record, 'spdy cannot be used with --record.'
archive_fetch = httpclient.ReplayHttpArchiveFetch(
http_archive,
inject_script,
options.diff_unknown_requests,
cache_misses=cache_misses,
use_closest_match=options.use_closest_match)
server_manager.Append(
replayspdyserver.ReplaySpdyServer, archive_fetch,
custom_handlers, host=host, port=options.port,
certfile=options.certfile)
else:
custom_handlers.add_server_manager_handler(server_manager)
archive_fetch = httpclient.ControllableHttpArchiveFetch(
http_archive, real_dns_lookup,
inject_script,
options.diff_unknown_requests, options.record,
cache_misses=cache_misses, use_closest_match=options.use_closest_match)
server_manager.AppendRecordCallback(archive_fetch.SetRecordMode)
server_manager.AppendReplayCallback(archive_fetch.SetReplayMode)
server_manager.Append(
httpproxy.HttpProxyServer,
archive_fetch, custom_handlers,
host=host, port=options.port, **options.shaping_http)
if options.ssl:
server_manager.Append(
httpproxy.HttpsProxyServer,
archive_fetch, custom_handlers, options.certfile,
host=host, port=options.ssl_port, **options.shaping_http)
def AddTrafficShaper(server_manager, options, host):
if options.shaping_dummynet:
ssl_port = options.ssl_shaping_port if options.ssl else None
kwargs = dict(
host=host, port=options.shaping_port, ssl_port=ssl_port,
use_loopback=not options.server_mode, **options.shaping_dummynet)
if not options.dns_forwarding:
kwargs['dns_port'] = None
server_manager.Append(trafficshaper.TrafficShaper, **kwargs)
class OptionsWrapper(object):
"""Add checks, updates, and methods to option values.
Example:
options, args = option_parser.parse_args()
options = OptionsWrapper(options, option_parser) # run checks and updates
if options.record and options.HasTrafficShaping():
[...]
"""
_TRAFFICSHAPING_OPTIONS = set(
['down', 'up', 'delay_ms', 'packet_loss_rate', 'init_cwnd', 'net'])
_CONFLICTING_OPTIONS = (
('record', ('down', 'up', 'delay_ms', 'packet_loss_rate', 'net',
'spdy', 'use_server_delay')),
('append', ('down', 'up', 'delay_ms', 'packet_loss_rate', 'net',
'spdy', 'use_server_delay')), # same as --record
('net', ('down', 'up', 'delay_ms')),
('server', ('server_mode',)),
)
# The --net values come from http://www.webpagetest.org/.
# https://sites.google.com/a/webpagetest.org/docs/other-resources/2011-fcc-broadband-data
_NET_CONFIGS = (
# key --down --up --delay_ms
('dsl', ('1536Kbit/s', '384Kbit/s', '50')),
('cable', ( '5Mbit/s', '1Mbit/s', '28')),
('fios', ( '20Mbit/s', '5Mbit/s', '4')),
)
NET_CHOICES = [key for key, values in _NET_CONFIGS]
def __init__(self, options, parser):
self._options = options
self._parser = parser
self._nondefaults = set([
name for name, value in parser.defaults.items()
if getattr(options, name) != value])
self._CheckConflicts()
self._MassageValues()
def _CheckConflicts(self):
"""Give an error if mutually exclusive options are used."""
for option, bad_options in self._CONFLICTING_OPTIONS:
if option in self._nondefaults:
for bad_option in bad_options:
if bad_option in self._nondefaults:
self._parser.error('Option --%s cannot be used with --%s.' %
(bad_option, option))
def _ShapingKeywordArgs(self, shaping_key):
"""Return the shaping keyword args for |shaping_key|.
Args:
shaping_key: one of 'dummynet', 'dns', 'http'.
Returns:
{} # if shaping_key does not apply, or options have default values.
{k: v, ...}
"""
kwargs = {}
def AddItemIfSet(d, kw_key, opt_key=None):
opt_key = opt_key or kw_key
if opt_key in self._nondefaults:
d[kw_key] = getattr(self, opt_key)
if ((self.shaping_type == 'proxy' and shaping_key in ('dns', 'http')) or
self.shaping_type == shaping_key):
AddItemIfSet(kwargs, 'delay_ms')
if shaping_key in ('dummynet', 'http'):
AddItemIfSet(kwargs, 'down_bandwidth', opt_key='down')
AddItemIfSet(kwargs, 'up_bandwidth', opt_key='up')
if shaping_key == 'dummynet':
AddItemIfSet(kwargs, 'packet_loss_rate')
AddItemIfSet(kwargs, 'init_cwnd')
elif self.shaping_type != 'none':
if 'packet_loss_rate' in self._nondefaults:
logging.warn('Shaping type, %s, ignores --packet_loss_rate=%s',
self.shaping_type, self.packet_loss_rate)
if 'init_cwnd' in self._nondefaults:
logging.warn('Shaping type, %s, ignores --init_cwnd=%s',
self.shaping_type, self.init_cwnd)
return kwargs
def _MassageValues(self):
"""Set options that depend on the values of other options."""
if self.append and not self.record:
self._options.record = True
for net_choice, values in self._NET_CONFIGS:
if net_choice == self.net:
self._options.down, self._options.up, self._options.delay_ms = values
self._nondefaults.update(['down', 'up', 'delay_ms'])
if not self.shaping_port:
self._options.shaping_port = self.port
if not self.ssl_shaping_port:
self._options.ssl_shaping_port = self.ssl_port
if not self.ssl:
self._options.certfile = None
self.shaping_dns = self._ShapingKeywordArgs('dns')
self.shaping_http = self._ShapingKeywordArgs('http')
self.shaping_dummynet = self._ShapingKeywordArgs('dummynet')
def __getattr__(self, name):
"""Make the original option values available."""
return getattr(self._options, name)
def IsRootRequired(self):
"""Returns True iff the options require root access."""
return (self.shaping_dummynet or
self.dns_forwarding or
self.port < 1024 or
self.ssl_port < 1024) and self.admin_check
def replay(options, replay_filename):
if options.IsRootRequired():
platformsettings.rerun_as_administrator()
configure_logging(options.log_level, options.log_file)
server_manager = servermanager.ServerManager(options.record)
cache_misses = None
if options.cache_miss_file:
if os.path.exists(options.cache_miss_file):
logging.warning('Cache Miss Archive file %s already exists; '
'replay will load and append entries to archive file',
options.cache_miss_file)
cache_misses = cachemissarchive.CacheMissArchive.Load(
options.cache_miss_file)
else:
cache_misses = cachemissarchive.CacheMissArchive(
options.cache_miss_file)
if options.server:
AddDnsForward(server_manager, options.server)
else:
host = platformsettings.get_server_ip_address(options.server_mode)
real_dns_lookup = dnsproxy.RealDnsLookup(
name_servers=[platformsettings.get_original_primary_nameserver()])
if options.record:
httparchive.HttpArchive.AssertWritable(replay_filename)
if options.append and os.path.exists(replay_filename):
http_archive = httparchive.HttpArchive.Load(replay_filename)
logging.info('Appending to %s (loaded %d existing responses)',
replay_filename, len(http_archive))
else:
http_archive = httparchive.HttpArchive()
else:
http_archive = httparchive.HttpArchive.Load(replay_filename)
logging.info('Loaded %d responses from %s',
len(http_archive), replay_filename)
server_manager.AppendRecordCallback(real_dns_lookup.ClearCache)
server_manager.AppendRecordCallback(http_archive.clear)
if options.dns_forwarding:
if not options.server_mode:
AddDnsForward(server_manager, host)
AddDnsProxy(server_manager, options, host, real_dns_lookup, http_archive)
if options.ssl and options.certfile is None:
options.certfile = os.path.join(os.path.dirname(__file__), 'wpr_cert.pem')
AddWebProxy(server_manager, options, host, real_dns_lookup,
http_archive, cache_misses)
AddTrafficShaper(server_manager, options, host)
exit_status = 0
try:
server_manager.Run()
except KeyboardInterrupt:
logging.info('Shutting down.')
except (dnsproxy.DnsProxyException,
trafficshaper.TrafficShaperException,
platformsettings.NotAdministratorError,
platformsettings.DnsUpdateError) as e:
logging.critical('%s: %s', e.__class__.__name__, e)
exit_status = 1
except:
logging.critical(traceback.format_exc())
exit_status = 2
if options.record:
http_archive.Persist(replay_filename)
logging.info('Saved %d responses to %s', len(http_archive), replay_filename)
if cache_misses:
cache_misses.Persist()
logging.info('Saved %d cache misses and %d requests to %s',
cache_misses.get_total_cache_misses(),
len(cache_misses.request_counts.keys()),
options.cache_miss_file)
return exit_status
def GetOptionParser():
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
if description:
return description + '\n'
else:
return ''
option_parser = optparse.OptionParser(
usage='%prog [options] replay_file',
formatter=PlainHelpFormatter(),
description=__doc__,
epilog='http://code.google.com/p/web-page-replay/')
option_parser.add_option('--spdy', default=False,
action='store_true',
help='Replay via SPDY. (Can be combined with --no-ssl).')
option_parser.add_option('-r', '--record', default=False,
action='store_true',
help='Download real responses and record them to replay_file')
option_parser.add_option('--append', default=False,
action='store_true',
help='Append responses to replay_file.')
option_parser.add_option('-l', '--log_level', default='debug',
action='store',
type='choice',
choices=('debug', 'info', 'warning', 'error', 'critical'),
help='Minimum verbosity level to log')
option_parser.add_option('-f', '--log_file', default=None,
action='store',
type='string',
help='Log file to use in addition to writting logs to stderr.')
option_parser.add_option('-e', '--cache_miss_file', default=None,
action='store',
dest='cache_miss_file',
type='string',
help='Archive file to record cache misses as pickled objects.'
'Cache misses occur when a request cannot be served in replay mode.')
network_group = optparse.OptionGroup(option_parser,
'Network Simulation Options',
'These options configure the network simulation in replay mode')
network_group.add_option('-u', '--up', default='0',
action='store',
type='string',
help='Upload Bandwidth in [K|M]{bit/s|Byte/s}. Zero means unlimited.')
network_group.add_option('-d', '--down', default='0',
action='store',
type='string',
help='Download Bandwidth in [K|M]{bit/s|Byte/s}. Zero means unlimited.')
network_group.add_option('-m', '--delay_ms', default='0',
action='store',
type='string',
help='Propagation delay (latency) in milliseconds. Zero means no delay.')
network_group.add_option('-p', '--packet_loss_rate', default='0',
action='store',
type='string',
help='Packet loss rate in range [0..1]. Zero means no loss.')
network_group.add_option('-w', '--init_cwnd', default='0',
action='store',
type='string',
help='Set initial cwnd (linux only, requires kernel patch)')
network_group.add_option('--net', default=None,
action='store',
type='choice',
choices=OptionsWrapper.NET_CHOICES,
help='Select a set of network options: %s.' % ', '.join(
OptionsWrapper.NET_CHOICES))
network_group.add_option('--shaping_type', default='dummynet',
action='store',
choices=('dummynet', 'proxy'),
help='When shaping is configured (i.e. --up, --down, etc.) decides '
'whether to use |dummynet| (default), or |proxy| servers.')
option_parser.add_option_group(network_group)
harness_group = optparse.OptionGroup(option_parser,
'Replay Harness Options',
'These advanced options configure various aspects of the replay harness')
harness_group.add_option('-S', '--server', default=None,
action='store',
type='string',
help='IP address of host running "replay.py --server_mode". '
'This only changes the primary DNS nameserver to use the given IP.')
harness_group.add_option('-M', '--server_mode', default=False,
action='store_true',
help='Run replay DNS & http proxies, and trafficshaping on --port '
'without changing the primary DNS nameserver. '
'Other hosts may connect to this using "replay.py --server" '
'or by pointing their DNS to this server.')
harness_group.add_option('-i', '--inject_scripts', default='deterministic.js',
action='store',
dest='inject_scripts',
help='A comma separated list of JavaScript sources to inject in all '
'pages. By default a script is injected that eliminates sources '
'of entropy such as Date() and Math.random() deterministic. '
'CAUTION: Without deterministic.js, many pages will not replay.')
harness_group.add_option('-D', '--no-diff_unknown_requests', default=True,
action='store_false',
dest='diff_unknown_requests',
help='During replay, do not show a diff of unknown requests against '
'their nearest match in the archive.')
harness_group.add_option('-C', '--use_closest_match', default=False,
action='store_true',
dest='use_closest_match',
help='During replay, if a request is not found, serve the closest match'
'in the archive instead of giving a 404.')
harness_group.add_option('-U', '--use_server_delay', default=False,
action='store_true',
dest='use_server_delay',
help='During replay, simulate server delay by delaying response time to'
'requests.')
harness_group.add_option('-I', '--screenshot_dir', default=None,
action='store',
type='string',
help='Save PNG images of the loaded page in the given directory.')
harness_group.add_option('-P', '--no-dns_private_passthrough', default=True,
action='store_false',
dest='dns_private_passthrough',
help='Don\'t forward DNS requests that resolve to private network '
'addresses. CAUTION: With this option important services like '
'Kerberos will resolve to the HTTP proxy address.')
harness_group.add_option('-x', '--no-dns_forwarding', default=True,
action='store_false',
dest='dns_forwarding',
help='Don\'t forward DNS requests to the local replay server. '
'CAUTION: With this option an external mechanism must be used to '
'forward traffic to the replay server.')
harness_group.add_option('-o', '--port', default=80,
action='store',
type='int',
help='Port number to listen on.')
harness_group.add_option('--ssl_port', default=443,
action='store',
type='int',
help='SSL port number to listen on.')
harness_group.add_option('--shaping_port', default=None,
action='store',
type='int',
help='Port on which to apply traffic shaping. Defaults to the '
'listen port (--port)')
harness_group.add_option('--ssl_shaping_port', default=None,
action='store',
type='int',
help='SSL port on which to apply traffic shaping. Defaults to the '
'SSL listen port (--ssl_port)')
harness_group.add_option('-c', '--certfile', default=None,
action='store',
type='string',
help='Certificate file to use with SSL (gets auto-generated if needed).')
harness_group.add_option('--no-ssl', default=True,
action='store_false',
dest='ssl',
help='Do not setup an SSL proxy.')
option_parser.add_option_group(harness_group)
harness_group.add_option('--no-admin-check', default=True,
action='store_false',
dest='admin_check',
help='Do not check if administrator access is needed.')
return option_parser
def main():
option_parser = GetOptionParser()
options, args = option_parser.parse_args()
options = OptionsWrapper(options, option_parser)
if options.server:
replay_filename = None
elif len(args) != 1:
option_parser.error('Must specify a replay_file')
else:
replay_filename = args[0]
return replay(options, replay_filename)
if __name__ == '__main__':
sys.exit(main())
|
leighpauls/k2cro4
|
third_party/webpagereplay/replay.py
|
Python
|
bsd-3-clause
| 20,951 | 0.011503 |
from RemoteFlSys import *
class gfeReader:#GetFilesExts Reader
def __init__(self, Reader, Params, AddInfo, OneTime = True):
self.Params = Params
self.Reader = Reader
self.AddInfo = AddInfo
self.Once = OneTime
class fsDirInf:
def __init__(self, SuperDir, Name, DictFlInfs, DictDirs, CaseSens = True):
self.SuperDir = SuperDir
self.Name = Name
self.FlInfs = DictFlInfs
self.Dirs = DictDirs
self.MatchCase = CaseSens
self.GfeCached = False
if not CaseSens:
Name = Name.lower()
if self.SuperDir != None:
self.SuperDir.AddDir(Name, self)
def AddDir(self, Name, AddMe):#Done
if not self.MatchCase:
Name = Name.lower()
self.Dirs[Name] = AddMe
def GetFlInf(self, fName):#Done
if not self.MatchCase:
fName = fName.lower()
Pos = fName.find("/", 1)
if Pos > 0:
return self.Dirs[fName[1:Pos]].GetFlInf(fName[Pos:])
else: return self.FlInfs[fName[1:]][1]
def SetFlInf(self, fName, Data, MkDirs = True):#Done
CmpName = fName
if not self.MatchCase:
CmpName = fName.lower()
Pos = fName.find("/", 1)
if Pos > 0:
if MkDirs and not self.Dirs.has_key(CmpName[1:Pos]):
self.Dirs[CmpName[1:Pos]] = fsDirInf(self, fName[1:Pos], dict(), dict(), self.MatchCase)
self.Dirs[CmpName[1:Pos]].SetFlInf(fName[Pos:], Data, MkDirs)
else: self.FlInfs[CmpName[1:]] = fName[1:], Data
def GetDir(self, Path):#Done
if not self.MatchCase: Path = Path.lower()
Pos = Path.find("/", 1)
if Pos > 0:
return self.Dirs[Path[1:Pos]].GetDir(Path[Pos:])
else: return self.Dirs[Path[1:]]
def MkInfCached(self):
self.GfeCached = True
for Dir in self.Dirs.values():
Dir.MkInfCached()
def SetFlInfLst(self, LstFlDat, InfCached = False, MkDirs = True):#Done
CurDir = self
CurDirPath = ""
for fName, Data in LstFlDat:
if fName[0] != '/': fName = "/" + fName
CmpName = fName
if not self.MatchCase:
CmpName = fName.lower()
Pos = CmpName.find(CurDirPath)
while Pos < 0:
Pos = CurDirPath.find("/")
CurDirPath = CurDirPath[0:Pos]
Pos = CmpName.find(CurDirPath)
CurDirPath = CmpName[0:Pos + len(CurDirPath)]
if len(CurDirPath) != 0:
fName = fName[Pos + len(CurDirPath):]
CurDir = self.GetDir(CurDirPath)
else: CurDir = self
CurDir.SetFlInf(fName, Data, MkDirs)
if InfCached: self.MkInfCached()
def GfeInfCache(self, LstExts, BegDots, Prepend = ""):
Rtn = list()
for fName in self.FlInfs:
FlInf = self.FlInfs[fName]
if fName[fName.rfind('.') + 1:] in LstExts:
Rtn.append((Prepend + FlInf[0], FlInf[1]))
for Dir in self.Dirs.values():
Rtn.extend(Dir.GfeInfCache(LstExts, BegDots, Prepend + Dir.Name + "/"))
return Rtn
def GfeCheckCache(self, Func, Cmd, Path, LstExts, BegDots = False):
TheDir = None
Rtn = None
try:
TheDir = self.GetDir(Path)
Rtn = TheDir.GfeInfCache(LstExts, BegDots)
except:
Rtn = Func(Cmd, Path, LstExts, BegDots)
TmpRtn = [0] * len(Rtn)
if len(Rtn) > 0 and Rtn[0][0][0] != '/':
Path += "/"
for c in xrange(len(Rtn)):
TmpRtn[c] = (Path + Rtn[c][0], Rtn[c][1])
self.SetFlInfLst(TmpRtn, True)
return Rtn
DRV_TYPE_REAL = 0
DRV_TYPE_RFS = 1
class FileDrv:#InfoGetters: Reader takes filename
AllDrv = dict()
CurDir = os.getcwd()
def __init__(self, DrvName, PrependName):
FileDrv.AllDrv[DrvName] = self
self.Name = PrependName
self.InfoGetters = dict()
self.InfCache = dict()
self.SnglReaders = dict()
self.Type = DRV_TYPE_REAL
def Open(self, fName, Mode):
return open(self.Name + fName, Mode)
def NativeOpen(self, fName, Mode):
return File(self.Name + fName, Mode)
def ListDir(self, Path):
return os.listdir(self.Name + Path)
def IsDir(self, Path):
return os.path.isdir(self.Name + Path)
def Exists(self, Path):
return os.path.exists(self.Name + Path)
def IsFile(self, Path):
return os.path.isfile(self.Name + Path)
def GetFilesExts(self, Path, LstExts, Invert = False, RtnBegDots = False):
for c in xrange(len(LstExts)):
LstExts[c] = LstExts[c].lower()
CurPath = self.Name + Path + "/"
Next = [""]
Cur = []
Rtn = list()
while len(Next) > 0:
Cur = Next
Next = list()
for TestPath in Cur:
LstPaths = list()
try:
LstPaths = os.listdir(CurPath + TestPath)
except WindowsError:
continue
for PathName in LstPaths:
Add = TestPath + PathName
if os.path.isdir(CurPath + Add):
Next.append(Add + "/")
elif not RtnBegDots and PathName[0] == '.':
continue
else:
Pos = PathName.rfind('.')
if Pos < 0 and (Invert ^ ("" in LstExts)): Rtn.append(Add)
elif Pos >= 0 and (Invert ^ (PathName[Pos + 1:].lower() in LstExts)):
Rtn.append(Add)
return Rtn
def UseGfeReader(self, Cmd, Path, LstExts = None, BegDots = False):
CurReader = self.InfoGetters[Cmd]
if LstExts == None: LstExts = CurReader.Params[0]
LstFls = self.GetFilesExts(Path, list(LstExts), CurReader.Params[1], BegDots)
Rtn = [None] * len(LstFls)
Prepend = self.Name + Path + "/"
for c in xrange(len(LstFls)):
Rtn[c] = LstFls[c], CurReader.Reader(Prepend + LstFls[c])
return Rtn
def GetInfSingle(self, Cmd, fName):
Rtn = self.SnglReader[Cmd](fName)
self.InfCache[Cmd].SetFlInf(fName, Rtn)
return Rtn
if os.name == "nt":
for c in xrange(26):
CurDrv = chr(ord('A') + c)
if os.path.isdir(CurDrv + ":"):
FileDrv(CurDrv, CurDrv + ":")
elif os.name == "posix":
FileDrv("C", "")
def EvalPath(Path):
Pos = Path.find(":")
if Pos == -1:
Path = CurDir + "/" + Path
Pos = Path.find(":")
return Path[0:Pos], Path[Pos + 1:]
def OpenFile(fName, Mode):
DrvName, fName = EvalPath(fName)
return FileDrv.AllDrv[DrvName].Open(fName, Mode)
def NativeOpenFile(fName, Mode):
DrvName, fName = EvalPath(fName)
return FileDrv.AllDrv[DrvName].NativeOpen(fName, Mode)
def ListDir(Path):
if Path == "": return [OutPath + ":" for OutPath in FileDrv.AllDrv.keys()]
DrvName, Path = EvalPath(Path)
return FileDrv.AllDrv[DrvName].ListDir(Path)
def Exists(Path):
DrvName, Path = EvalPath(Path)
if not FileDrv.AllDrv.has_key(DrvName): return False
return FileDrv.AllDrv[DrvName].Exists(Path)
def IsDir(Path):
DrvName, Path = EvalPath(Path)
if not FileDrv.AllDrv.has_key(DrvName): return False
return FileDrv.AllDrv[DrvName].IsDir(Path)
def IsFile(Path):
DrvName, Path = EvalPath(Path)
if not FileDrv.AllDrv.has_key(DrvName): return False
return FileDrv.AllDrv[DrvName].IsFile(Path)
def GetFilesExts(Path, LstExt, Invert = False, RtnBegDots = False):
DrvName, Path = EvalPath(Path)
return FileDrv.AllDrv[DrvName].GetFilesExts(Path, LstExt, Invert, RtnBegDots)
def GetInfGfe(Cmd, Path, LstExts = None, BegDots = False):#Warning BegDots functionality is in question
print Path
DrvName, Path = EvalPath(Path)
Drv = FileDrv.AllDrv[DrvName]
return Drv.InfCache[Cmd].GfeCheckCache(Drv.UseGfeReader, Cmd, Path, LstExts, BegDots)
def GetInfFile(Cmd, Path):#TODO: Execute specified callback to retreive info if info is not cached like with the gfe readers
DrvName, Path = EvalPath(Path)
Drv = FileDrv.AllDrv[DrvName]
try:
return Drv.InfCache[Cmd].GetFlInf(Path)
except:
return Drv.GetInfSingle(Cmd, Path)
class TxtUsrIface:
def __init__(self, Ostream, Istream, Err):
self.Os = Ostream
self.Is = Istream
self.Err = Err
def getpass(self, Msg):
self.Os.write(Msg)
return self.Is.readline().replace("\n", "")
def get(self, Msg):
self.Os.write(Msg)
return self.Is.readline().replace("\n", "")
def out(self, Msg):
self.Os.write(Msg + "\n")
def success(self, Msg):
self.Os.write(Msg + "\n")
def error(self, Msg):
self.Err.write(Msg + "\n")
class RfsDrv(FileDrv):#InfoGetters have AddInfo = (Formating Sender, Is Data received encrypted), Reader takes bytes
def __init__(self, ServIface):
self.Name = ServIface.Username
self.Iface = ServIface
try:
self.FsLock = SingleMutex()
except:
self.FsLock = threading.Lock()
self.InfoGetters = dict()
self.SnglReaders = dict()
self.InfCache = dict()
self.Type = DRV_TYPE_RFS
FileDrv.AllDrv[self.Name] = self
def Open(self, fName, Mode):
self.FsLock.acquire()
Rtn = CltFile(self.Iface.Prot, fName, Mode)
self.FsLock.release()
Rtn.FsLock = self.FsLock
return Rtn
def NativeOpen(self, fName, Mode):
if not (isinstance(self.FsLock, KyleObj) or isinstance(self.Iface.Prot.Prot, KyleObj)):
raise Exception("This Drive does not support native KyleUtils Files in the current state")
return RfsFile(self.Iface.Prot.Prot, fName, Mode, self.FsLock)
def ListDir(self, Path):
with self.FsLock: return self.Iface.ListDir(Path)
def IsDir(self, Path):
with self.FsLock: return self.Iface.PathIsDir(Path)
def Exists(self, Path):
with self.FsLock: return self.Iface.PathExists(Path)
def IsFile(self, Path):
with self.FsLock: return self.Iface.PathIsFile(Path)
def GetFilesExts(self, Path, LstExts, Invert = False, RtnBegDots = False):
with self.FsLock: return self.Iface.GetFilesExts(Path, LstExts, Invert, RtnBegDots)
def UseGfeReader(self, Cmd, Path, LstExts = None, BegDots = False):
Cur = self.InfoGetters[Cmd]
if LstExts == None: LstExts = Cur.Params[0]
Cur.AddInfo[0](Iface.Prot, Path, LstExts, Cur.Params[1], BegDots)
Rtn, NoEncDat, EncDat = Iface.Prot.Recv()
if NoEncDat[0] != '\0': raise Exception(NoEncDat[1:])
CurStr = NoEncDat[1:]
if Cur.AddInfo[1]:
CurStr = EncDat
NumElem = GetLongStrBytes(CurStr[0:2])
CurStr = CurStr[2:]
RtnLst = [0] * NumElem
for c in xrange(NumElem):
Info, Pos = Cur.Reader(CurStr)
PathName, Pos = UnpackStrLen(CurStr, 2, Pos)
CurStr = CurStr[Pos:]
RtnLst[c] = PathName, Info
return RtnLst
def GetInfSingle(self, Cmd, fName):
Reader = self.SnglReaders[Cmd]
Rtn = self.Iface.UseService(Reader[0], Reader[1], [fName])
self.InfCache[Cmd].SetFlInf(fName, Rtn)
return Rtn
BaseDir = __file__.replace("\\", "/")
BaseDir = __file__[0:BaseDir.rfind("/")]
def FlSysInit(LstModules, Inet = True):
global TheTUI
global Iface
global LstFirst
Iface = None
if Inet:
Iface = GetCltReFiSyS("ReFiSyS", BaseDir + "/CltRoots.cert")
RfsDrv(GetUserIface(Iface, TheTUI, LstFirst))
for Module in LstModules:
if Inet: Iface.LinkService(Module)
ModReal = dict()
ModRfs = dict()
SnglReal = dict()
SnglRfs = dict()
for k, v in Module.RealGfeReaders.iteritems():
ModReal[k] = gfeReader(*v)
for k, v in Module.RfsGfeReaders.iteritems():
ModRfs[k] = gfeReader(*v)
for k, v in Module.RealInfGetters.iteritems():
SnglReal[k] = v
for k, v in Module.RfsInfGetters.iteritems():
SnglRfs[k] = v
for Drv in FileDrv.AllDrv.values():
if Drv.Type == DRV_TYPE_REAL:
Drv.InfoGetters.update(ModReal)
Drv.SnglReaders.update(SnglReal)
elif Drv.Type == DRV_TYPE_RFS:
Drv.InfoGetters.update(ModRfs)
Drv.SnglReaders.update(SnglRfs)
for Drv in FileDrv.AllDrv.values():
for k in Drv.InfoGetters:
Drv.InfCache[k] = fsDirInf(None, Drv.Name, dict(), dict())
def DeInit():
global Iface
if Iface != None:
Iface.Logout()
Iface.Exit()
def GetFlLen(FlObj):
if hasattr(FlObj, "GetLen"):
return FlObj.GetLen()
Prev = FlObj.tell()
FlObj.seek(0, os.SEEK_END)
Rtn = FlObj.tell()
FlObj.seek(Prev, os.SEEK_SET)
return Rtn
ProtInit()
TheTUI = TxtUsrIface(sys.stdout, sys.stdin, sys.stderr)
Iface = None
|
KyleTen2/ReFiSys
|
FlSys.py
|
Python
|
mit
| 13,483 | 0.011941 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# This utility assumes you already have local a Spark git folder and that you
# have added remotes corresponding to both (i) the github apache Spark
# mirror and (ii) the apache git repo.
import json
import os
import re
import subprocess
import sys
import urllib2
try:
import jira.client
JIRA_IMPORTED = True
except ImportError:
JIRA_IMPORTED = False
# Location of your Spark git development area
SPARK_HOME = os.environ.get("SPARK_REDSHIFT_HOME", os.getcwd())
# Remote name which points to the Gihub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "origin")
# Remote name which points to Apache git
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "origin")
# ASF JIRA username
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", "")
# ASF JIRA password
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", "")
# OAuth key used for issuing requests against the GitHub API. If this is not defined, then requests
# will be unauthenticated. You should only need to configure this if you find yourself regularly
# exceeding your IP's unauthenticated request rate limit. You can create an OAuth key at
# https://github.com/settings/tokens. This script only requires the "public_repo" scope.
GITHUB_OAUTH_KEY = os.environ.get("GITHUB_OAUTH_KEY")
GITHUB_BASE = "https://github.com/databricks/spark-redshift/pull"
GITHUB_API_BASE = "https://api.github.com/repos/databricks/spark-redshift"
JIRA_BASE = "https://issues.apache.org/jira/browse"
JIRA_API_BASE = "https://issues.apache.org/jira"
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
def get_json(url):
try:
request = urllib2.Request(url)
if GITHUB_OAUTH_KEY:
request.add_header('Authorization', 'token %s' % GITHUB_OAUTH_KEY)
return json.load(urllib2.urlopen(request))
except urllib2.HTTPError as e:
if "X-RateLimit-Remaining" in e.headers and e.headers["X-RateLimit-Remaining"] == '0':
print "Exceeded the GitHub API rate limit; see the instructions in " + \
"dev/merge_spark_pr.py to configure an OAuth token for making authenticated " + \
"GitHub requests."
else:
print "Unable to fetch URL, exiting: %s" % url
sys.exit(-1)
def fail(msg):
print msg
clean_up()
sys.exit(-1)
def run_cmd(cmd):
print cmd
if isinstance(cmd, list):
return subprocess.check_output(cmd)
else:
return subprocess.check_output(cmd.split(" "))
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
def clean_up():
print "Restoring head pointer to %s" % original_head
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print "Deleting local branch %s" % branch
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref, title, body, pr_repo_desc):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = raw_input(
"Enter primary author in the format of \"name <email>\" [%s]: " %
distinct_authors[0])
if primary_author == "":
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
# We remove @ symbols from the body to avoid triggering e-mails
# to people every time someone creates a public fork of Spark.
merge_message_flags += ["-m", body.replace("@", "")]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += ["-m", "Closes #%s from %s." % (pr_num, pr_repo_desc)]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] + merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name, target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num, pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref, pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
try:
run_cmd("git cherry-pick -sx %s" % merge_hash)
except Exception as e:
msg = "Error cherry-picking: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and finish the cherry-pick. Finished?"
continue_maybe(msg)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name, pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
def resolve_jira_issue(merge_branches, comment, default_jira_id=""):
asf_jira = jira.client.JIRA({'server': JIRA_API_BASE},
basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
jira_id = raw_input("Enter a JIRA id [%s]: " % default_jira_id)
if jira_id == "":
jira_id = default_jira_id
try:
issue = asf_jira.issue(jira_id)
except Exception as e:
fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
cur_status = issue.fields.status.name
cur_summary = issue.fields.summary
cur_assignee = issue.fields.assignee
if cur_assignee is None:
cur_assignee = "NOT ASSIGNED!!!"
else:
cur_assignee = cur_assignee.displayName
if cur_status == "Resolved" or cur_status == "Closed":
fail("JIRA issue %s already has status '%s'" % (jira_id, cur_status))
print ("=== JIRA %s ===" % jira_id)
print ("summary\t\t%s\nassignee\t%s\nstatus\t\t%s\nurl\t\t%s/%s\n" % (
cur_summary, cur_assignee, cur_status, JIRA_BASE, jira_id))
versions = asf_jira.project_versions("SPARK")
versions = sorted(versions, key=lambda x: x.name, reverse=True)
versions = filter(lambda x: x.raw['released'] is False, versions)
# Consider only x.y.z versions
versions = filter(lambda x: re.match('\d+\.\d+\.\d+', x.name), versions)
default_fix_versions = map(lambda x: fix_version_from_branch(x, versions).name, merge_branches)
for v in default_fix_versions:
# Handles the case where we have forked a release branch but not yet made the release.
# In this case, if the PR is committed to the master branch and the release branch, we
# only consider the release branch to be the fix version. E.g. it is not valid to have
# both 1.1.0 and 1.0.0 as fix versions.
(major, minor, patch) = v.split(".")
if patch == "0":
previous = "%s.%s.%s" % (major, int(minor) - 1, 0)
if previous in default_fix_versions:
default_fix_versions = filter(lambda x: x != v, default_fix_versions)
default_fix_versions = ",".join(default_fix_versions)
fix_versions = raw_input("Enter comma-separated fix version(s) [%s]: " % default_fix_versions)
if fix_versions == "":
fix_versions = default_fix_versions
fix_versions = fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return filter(lambda v: v.name == version_str, versions)[0].raw
jira_fix_versions = map(lambda v: get_version_json(v), fix_versions)
resolve = filter(lambda a: a['name'] == "Resolve Issue", asf_jira.transitions(jira_id))[0]
resolution = filter(lambda r: r.raw['name'] == "Fixed", asf_jira.resolutions())[0]
asf_jira.transition_issue(
jira_id, resolve["id"], fixVersions = jira_fix_versions,
comment = comment, resolution = {'id': resolution.raw['id']})
print "Successfully resolved %s with fixVersions=%s!" % (jira_id, fix_versions)
def resolve_jira_issues(title, merge_branches, comment):
jira_ids = re.findall("SPARK-[0-9]{4,5}", title)
if len(jira_ids) == 0:
resolve_jira_issue(merge_branches, comment)
for jira_id in jira_ids:
resolve_jira_issue(merge_branches, comment, jira_id)
def standardize_jira_ref(text):
"""
Standardize the [SPARK-XXXXX] [MODULE] prefix
Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX] [MLLIB] Issue"
>>> standardize_jira_ref("[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful")
'[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful'
>>> standardize_jira_ref("[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests")
'[SPARK-4123] [PROJECT INFRA] [WIP] Show new dependencies added in pull requests'
>>> standardize_jira_ref("[MLlib] Spark 5954: Top by key")
'[SPARK-5954] [MLLIB] Top by key'
>>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl")
'[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl'
>>> standardize_jira_ref("SPARK-1094 Support MiMa for reporting binary compatibility accross versions.")
'[SPARK-1094] Support MiMa for reporting binary compatibility accross versions.'
>>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark")
'[SPARK-1146] [WIP] Vagrant support for Spark'
>>> standardize_jira_ref("SPARK-1032. If Yarn app fails before registering, app master stays aroun...")
'[SPARK-1032] If Yarn app fails before registering, app master stays aroun...'
>>> standardize_jira_ref("[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.")
'[SPARK-6250] [SPARK-6146] [SPARK-5911] [SQL] Types are now reserved words in DDL parser.'
>>> standardize_jira_ref("Additional information for users building from source code")
'Additional information for users building from source code'
"""
jira_refs = []
components = []
# If the string is compliant, no need to process any further
if (re.search(r'^\[SPARK-[0-9]{3,6}\] (\[[A-Z0-9_\s,]+\] )+\S+', text)):
return text
# Extract JIRA ref(s):
pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE)
for ref in pattern.findall(text):
# Add brackets, replace spaces with a dash, & convert to uppercase
jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']')
text = text.replace(ref, '')
# Extract spark component(s):
# Look for alphanumeric chars, spaces, dashes, periods, and/or commas
pattern = re.compile(r'(\[[\w\s,-\.]+\])', re.IGNORECASE)
for component in pattern.findall(text):
components.append(component.upper())
text = text.replace(component, '')
# Cleanup any remaining symbols:
pattern = re.compile(r'^\W+(.*)', re.IGNORECASE)
if (pattern.search(text) is not None):
text = pattern.search(text).groups()[0]
# Assemble full text (JIRA ref(s), module(s), remaining text)
clean_text = ' '.join(jira_refs).strip() + " " + ' '.join(components).strip() + " " + text.strip()
# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were included
clean_text = re.sub(r'\s+', ' ', clean_text.strip())
return clean_text
def main():
global original_head
os.chdir(SPARK_HOME)
original_head = run_cmd("git rev-parse HEAD")[:8]
branches = get_json("%s/branches" % GITHUB_API_BASE)
#branch_names = filter(lambda x: x.startswith("branch-"), [x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
latest_branch = "master"
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
pr_events = get_json("%s/issues/%s/events" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
# Decide whether to use the modified title or not
modified_title = standardize_jira_ref(pr["title"])
if modified_title != pr["title"]:
print "I've re-written the title as follows to match the standard format:"
print "Original: %s" % pr["title"]
print "Modified: %s" % modified_title
result = raw_input("Would you like to use the modified title? (y/n): ")
if result.lower() == "y":
title = modified_title
print "Using modified title:"
else:
title = pr["title"]
print "Using original title:"
print title
else:
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
# Merged pull requests don't appear as merged in the GitHub API;
# Instead, they're closed by asfgit.
merge_commits = \
[e for e in pr_events if e["actor"]["login"] == "asfgit" and e["event"] == "closed"]
if merge_commits:
merge_hash = merge_commits[0]["commit_id"]
message = get_json("%s/commits/%s" % (GITHUB_API_BASE, merge_hash))["commit"]["message"]
print "Pull request %s has already been merged, assuming you want to backport" % pr_num
commit_is_downloaded = run_cmd(['git', 'rev-parse', '--quiet', '--verify',
"%s^{commit}" % merge_hash]).strip() != ""
if not commit_is_downloaded:
fail("Couldn't find any merge commit for #%s, you may need to update HEAD." % pr_num)
print "Found commit %s:\n%s" % (merge_hash, message)
cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = "Pull request %s is not mergeable in its current form.\n" % pr_num + \
"Continue? (experts only!)"
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref, title, body, pr_repo_desc)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash, latest_branch)]
if JIRA_IMPORTED:
if JIRA_USERNAME and JIRA_PASSWORD:
continue_maybe("Would you like to update an associated JIRA?")
jira_comment = "Issue resolved by pull request %s\n[%s/%s]" % (pr_num, GITHUB_BASE, pr_num)
resolve_jira_issues(title, merged_refs, jira_comment)
else:
print "JIRA_USERNAME and JIRA_PASSWORD not set"
print "Exiting without trying to close the associated JIRA."
else:
print "Could not find jira-python library. Run 'sudo pip install jira' to install."
print "Exiting without trying to close the associated JIRA."
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
main()
|
snowflakedb/spark-snowflake
|
legacy/dev/merge_pr.py
|
Python
|
apache-2.0
| 18,840 | 0.004299 |
import unittest
import traceback
from time import perf_counter
class CodewarsTestRunner(object):
def __init__(self): pass
def run(self, test):
r = CodewarsTestResult()
s = perf_counter()
print("\n<DESCRIBE::>Tests")
try:
test(r)
finally:
pass
print("\n<COMPLETEDIN::>{:.4f}".format(1000*(perf_counter() - s)))
return r
__unittest = True
class CodewarsTestResult(unittest.TestResult):
def __init__(self):
super().__init__()
self.start = 0.0
def startTest(self, test):
print("\n<IT::>" + test._testMethodName)
super().startTest(test)
self.start = perf_counter()
def stopTest(self, test):
print("\n<COMPLETEDIN::>{:.4f}".format(1000*(perf_counter() - self.start)))
super().stopTest(test)
def addSuccess(self, test):
print("\n<PASSED::>Test Passed")
super().addSuccess(test)
def addError(self, test, err):
print("\n<ERROR::>Unhandled Exception")
print("\n<LOG:ESC:Error>" + esc(''.join(traceback.format_exception_only(err[0], err[1]))))
print("\n<LOG:ESC:Traceback>" + esc(self._exc_info_to_string(err, test)))
super().addError(test, err)
def addFailure(self, test, err):
print("\n<FAILED::>Test Failed")
print("\n<LOG:ESC:Failure>" + esc(''.join(traceback.format_exception_only(err[0], err[1]))))
super().addFailure(test, err)
# from unittest/result.py
def _exc_info_to_string(self, err, test):
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
length = self._count_relevant_tb_levels(tb) # Skip assert*() traceback levels
else:
length = None
return ''.join(traceback.format_tb(tb, limit=length))
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def esc(s):
return s.replace("\n", "<:LF:>")
|
Codewars/codewars-runner
|
frameworks/python/codewars.py
|
Python
|
mit
| 2,284 | 0.004816 |
# Copyright (c) 2011 Nick Hurley <hurley at todesschaf dot org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Helpers for git extensions written in python
"""
import inspect
import os
import subprocess
import sys
import traceback
config = {}
def __extract_name_email(info, type_):
"""Extract a name and email from a string in the form:
User Name <user@example.com> tstamp offset
Stick that into our config dict for either git committer or git author.
"""
val = ' '.join(info.split(' ')[:-2])
angle = val.find('<')
if angle > -1:
config['GIT_%s_NAME' % type_] = val[:angle - 1]
config['GIT_%s_EMAIL' % type_] = val[angle + 1:-1]
else:
config['GIT_%s_NAME' % type_] = val
def __create_config():
"""Create our configuration dict from git and the env variables we're given.
"""
devnull = file('/dev/null', 'w')
# Stick all our git variables in our dict, just in case anyone needs them
gitvar = subprocess.Popen(['git', 'var', '-l'], stdout=subprocess.PIPE,
stderr=devnull)
for line in gitvar.stdout:
k, v = line.split('=', 1)
if k == 'GIT_COMMITTER_IDENT':
__extract_name_email(v, 'COMMITTER')
elif k == 'GIT_AUTHOR_IDENT':
__extract_name_email(v, 'AUTHOR')
elif v == 'true':
v = True
elif v == 'false':
v = False
else:
try:
v = int(v)
except:
pass
config[k] = v
gitvar.wait()
# Find out where git's sub-exes live
gitexec = subprocess.Popen(['git', '--exec-path'], stdout=subprocess.PIPE,
stderr=devnull)
config['GIT_LIBEXEC'] = gitexec.stdout.readlines()[0].strip()
gitexec.wait()
# Figure out the git dir in our repo, if applicable
gitdir = subprocess.Popen(['git', 'rev-parse', '--git-dir'],
stdout=subprocess.PIPE, stderr=devnull)
lines = gitdir.stdout.readlines()
if gitdir.wait() == 0:
config['GIT_DIR'] = lines[0].strip()
# Figure out the top level of our repo, if applicable
gittoplevel = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'],
stdout=subprocess.PIPE, stderr=devnull)
lines = gittoplevel.stdout.readlines()
if gittoplevel.wait() == 0:
config['GIT_TOPLEVEL'] = lines[0].strip()
# We may have been called by a wrapper that passes us some info through the
# environment. Use it if it's there
for k, v in os.environ.iteritems():
if k.startswith('PY_GIT_'):
config[k[3:]] = v
elif k == 'PGL_OK':
config['PGL_OK'] = True
# Make sure our git dir and toplevel are fully-qualified
if 'GIT_DIR' in config and not os.path.isabs(config['GIT_DIR']):
git_dir = os.path.join(config['GIT_TOPLEVEL'], config['GIT_DIR'])
config['GIT_DIR'] = os.path.abspath(git_dir)
def warn(msg):
"""Print a warning
"""
sys.stderr.write('%s\n' % (msg,))
def die(msg):
"""Print an error message and exit the program
"""
sys.stderr.write('%s\n' % (msg,))
sys.exit(1)
def do_checks():
"""Check to ensure we've got everything we expect
"""
try:
import argparse
except:
die('Your python must support the argparse module')
def main(_main):
"""Mark a function as the main function for our git subprogram. Based
very heavily on automain by Gerald Kaszuba, but with modifications to make
it work better for our purposes.
"""
parent = inspect.stack()[1][0]
name = parent.f_locals.get('__name__', None)
if name == '__main__':
__create_config()
if 'PGL_OK' not in config:
do_checks()
rval = 1
try:
rval = _main()
except Exception, e:
sys.stdout.write('%s\n' % str(e))
f = file('pygit.tb', 'w')
traceback.print_tb(sys.exc_info()[2], None, f)
f.close()
sys.exit(rval)
return _main
if __name__ == '__main__':
"""If we get run as a script, check to make sure it's all ok and exit with
an appropriate error code
"""
do_checks()
sys.exit(0)
|
todesschaf/pgl
|
pgl.py
|
Python
|
gpl-2.0
| 4,786 | 0.002925 |
#!/usr/bin/env python
"""
fla.gr user model
Given a userID or a username or a email, return the users couchc.database ORM
http://xkcd.com/353/
Josh Ashby
2013
http://joshashby.com
joshuaashby@joshashby.com
"""
from couchdb.mapping import Document, TextField, DateTimeField, \
BooleanField, IntegerField
import bcrypt
from datetime import datetime
import config.config as c
import utils.markdownUtils as mdu
from models.modelExceptions.userModelExceptions import \
multipleUsersError, passwordError, userError
from models.couch.baseCouchModel import baseCouchModel
class userORM(Document, baseCouchModel):
"""
Base ORM for users in fla.gr, this one currently uses couchc.database to store
the data.
TODO: Flesh this doc out a lot more
"""
_name = "users"
username = TextField()
email = TextField()
about = TextField(default="")
disable = BooleanField(default=False)
emailVisibility = BooleanField(default=False)
level = IntegerField(default=1)
password = TextField()
joined = DateTimeField(default=datetime.now)
docType = TextField(default="user")
formatedAbout = ""
_view = 'typeViews/user'
@classmethod
def new(cls, username, password):
"""
Make a new user, checking for username conflicts. If no conflicts are
found the password is encrypted with bcrypt and the resulting `userORM` returned.
:param username: The username that should be used for the new user
:param password: The plain text password that should be used for the password.
:return: `userORM` if the username is available,
"""
if password == "":
raise passwordError("Password cannot be null")
elif not cls.find(username):
passwd = bcrypt.hashpw(password, bcrypt.gensalt())
user = cls(username=username, password=passwd)
return user
else:
raise userError("That username is taken, please choose again.",
username)
def setPassword(self, password):
"""
Sets the users password to `password`
:param password: plain text password to hash
"""
self.password = bcrypt.hashpw(password, bcrypt.gensalt())
self.store(c.database.couchServer)
@staticmethod
def _search(items, value):
"""
Searches the list `items` for the given value
:param items: A list of ORM objects to search
:param value: The value to search for, in this case
value can be a username or an email, or an id
"""
foundUser = []
for user in items:
if user.email == value \
or user.username == value \
or user.id == value:
foundUser.append(user)
if not foundUser:
return None
if len(foundUser)>1:
raise multipleUsersError("Multiple Users", value)
else:
user = foundUser[0]
user.formatedAbout = mdu.markClean(user.about)
return user
@property
def hasAdmin(self):
return self.level > 50
def format(self):
"""
Formats markdown and dates into the right stuff
"""
self.formatedAbout = mdu.markClean(self.about)
self.formatedJoined = datetime.strftime(self.joined, "%a %b %d, %Y @ %H:%I%p")
|
JoshAshby/Fla.gr
|
app/models/couch/user/userModel.py
|
Python
|
mit
| 3,412 | 0.001758 |
from rsk_mind.datasource import *
from rsk_mind.classifier import *
from transformer import CustomTransformer
PROJECT_NAME = 'test'
DATASOURCE= {
'IN' : {
'class' : CSVDataSource,
'params' : ('in.csv', )
},
'OUT' : {
'class' : CSVDataSource,
'params' : ('out.csv', )
}
}
ANALYSIS = {
'persist': True,
'out': 'info.json'
}
TRANSFORMER = CustomTransformer
TRAINING = {
'algorithms' : [
{
'classifier': XGBoostClassifier,
'parameters' : {
'bst:max_depth': 7,
'bst:eta': 0.3,
'bst:subsample': 0.5,
'silent': 0,
'objective': 'binary:logistic',
'nthread': 4,
'eval_metric': 'auc'
},
'dataset': DATASOURCE['IN']
}
],
'ensemble': 'max',
'dataset': DATASOURCE['IN']
}
ENGINE = {
}
|
rsk-mind/rsk-mind-framework
|
test/setting.py
|
Python
|
mit
| 927 | 0.010787 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import zip
import unittest
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal)
import pytest
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.transforms as mtransforms
from matplotlib.path import Path
from matplotlib.scale import LogScale
from matplotlib.testing.decorators import image_comparison
def test_non_affine_caching():
class AssertingNonAffineTransform(mtransforms.Transform):
"""
This transform raises an assertion error when called when it
shouldn't be and self.raise_on_transform is True.
"""
input_dims = output_dims = 2
is_affine = False
def __init__(self, *args, **kwargs):
mtransforms.Transform.__init__(self, *args, **kwargs)
self.raise_on_transform = False
self.underlying_transform = mtransforms.Affine2D().scale(10, 10)
def transform_path_non_affine(self, path):
assert not self.raise_on_transform, \
'Invalidated affine part of transform unnecessarily.'
return self.underlying_transform.transform_path(path)
transform_path = transform_path_non_affine
def transform_non_affine(self, path):
assert not self.raise_on_transform, \
'Invalidated affine part of transform unnecessarily.'
return self.underlying_transform.transform(path)
transform = transform_non_affine
my_trans = AssertingNonAffineTransform()
ax = plt.axes()
plt.plot(np.arange(10), transform=my_trans + ax.transData)
plt.draw()
# enable the transform to raise an exception if it's non-affine transform
# method is triggered again.
my_trans.raise_on_transform = True
ax.transAxes.invalidate()
plt.draw()
def test_external_transform_api():
class ScaledBy(object):
def __init__(self, scale_factor):
self._scale_factor = scale_factor
def _as_mpl_transform(self, axes):
return (mtransforms.Affine2D().scale(self._scale_factor)
+ axes.transData)
ax = plt.axes()
line, = plt.plot(np.arange(10), transform=ScaledBy(10))
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
# assert that the top transform of the line is the scale transform.
assert_allclose(line.get_transform()._a.get_matrix(),
mtransforms.Affine2D().scale(10).get_matrix())
@image_comparison(baseline_images=['pre_transform_data'],
tol=0.08)
def test_pre_transform_plotting():
# a catch-all for as many as possible plot layouts which handle
# pre-transforming the data NOTE: The axis range is important in this
# plot. It should be x10 what the data suggests it should be
ax = plt.axes()
times10 = mtransforms.Affine2D().scale(10)
ax.contourf(np.arange(48).reshape(6, 8), transform=times10 + ax.transData)
ax.pcolormesh(np.linspace(0, 4, 7),
np.linspace(5.5, 8, 9),
np.arange(48).reshape(8, 6),
transform=times10 + ax.transData)
ax.scatter(np.linspace(0, 10), np.linspace(10, 0),
transform=times10 + ax.transData)
x = np.linspace(8, 10, 20)
y = np.linspace(1, 5, 20)
u = 2*np.sin(x) + np.cos(y[:, np.newaxis])
v = np.sin(x) - np.cos(y[:, np.newaxis])
df = 25. / 30. # Compatibility factor for old test image
ax.streamplot(x, y, u, v, transform=times10 + ax.transData,
density=(df, df), linewidth=u**2 + v**2)
# reduce the vector data down a bit for barb and quiver plotting
x, y = x[::3], y[::3]
u, v = u[::3, ::3], v[::3, ::3]
ax.quiver(x, y + 5, u, v, transform=times10 + ax.transData)
ax.barbs(x - 3, y + 5, u**2, v**2, transform=times10 + ax.transData)
def test_contour_pre_transform_limits():
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.contourf(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_pcolor_pre_transform_limits():
# Based on test_contour_pre_transform_limits()
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.pcolor(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_pcolormesh_pre_transform_limits():
# Based on test_contour_pre_transform_limits()
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.pcolormesh(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_Affine2D_from_values():
points = np.array([[0, 0],
[10, 20],
[-1, 0],
])
t = mtransforms.Affine2D.from_values(1, 0, 0, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [10, 0], [-1, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 2, 0, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [0, 20], [0, -2]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 3, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [60, 0], [0, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 4, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [0, 80], [0, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 0, 5, 0)
actual = t.transform(points)
expected = np.array([[5, 0], [5, 0], [5, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 0, 0, 6)
actual = t.transform(points)
expected = np.array([[0, 6], [0, 6], [0, 6]])
assert_almost_equal(actual, expected)
def test_clipping_of_log():
# issue 804
M, L, C = Path.MOVETO, Path.LINETO, Path.CLOSEPOLY
points = [(0.2, -99), (0.4, -99), (0.4, 20), (0.2, 20), (0.2, -99)]
codes = [M, L, L, L, C]
path = Path(points, codes)
# something like this happens in plotting logarithmic histograms
trans = mtransforms.BlendedGenericTransform(mtransforms.Affine2D(),
LogScale.Log10Transform('clip'))
tpath = trans.transform_path_non_affine(path)
result = tpath.iter_segments(trans.get_affine(),
clip=(0, 0, 100, 100),
simplify=False)
tpoints, tcodes = list(zip(*result))
assert_allclose(tcodes, [M, L, L, L, C])
class NonAffineForTest(mtransforms.Transform):
"""
A class which looks like a non affine transform, but does whatever
the given transform does (even if it is affine). This is very useful
for testing NonAffine behaviour with a simple Affine transform.
"""
is_affine = False
output_dims = 2
input_dims = 2
def __init__(self, real_trans, *args, **kwargs):
self.real_trans = real_trans
mtransforms.Transform.__init__(self, *args, **kwargs)
def transform_non_affine(self, values):
return self.real_trans.transform(values)
def transform_path_non_affine(self, path):
return self.real_trans.transform_path(path)
class BasicTransformTests(unittest.TestCase):
def setUp(self):
self.ta1 = mtransforms.Affine2D(shorthand_name='ta1').rotate(np.pi / 2)
self.ta2 = mtransforms.Affine2D(shorthand_name='ta2').translate(10, 0)
self.ta3 = mtransforms.Affine2D(shorthand_name='ta3').scale(1, 2)
self.tn1 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn1')
self.tn2 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn2')
self.tn3 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn3')
# creates a transform stack which looks like ((A, (N, A)), A)
self.stack1 = (self.ta1 + (self.tn1 + self.ta2)) + self.ta3
# creates a transform stack which looks like (((A, N), A), A)
self.stack2 = self.ta1 + self.tn1 + self.ta2 + self.ta3
# creates a transform stack which is a subset of stack2
self.stack2_subset = self.tn1 + self.ta2 + self.ta3
# when in debug, the transform stacks can produce dot images:
# self.stack1.write_graphviz(file('stack1.dot', 'w'))
# self.stack2.write_graphviz(file('stack2.dot', 'w'))
# self.stack2_subset.write_graphviz(file('stack2_subset.dot', 'w'))
def test_transform_depth(self):
assert self.stack1.depth == 4
assert self.stack2.depth == 4
assert self.stack2_subset.depth == 3
def test_left_to_right_iteration(self):
stack3 = (self.ta1 + (self.tn1 + (self.ta2 + self.tn2))) + self.ta3
# stack3.write_graphviz(file('stack3.dot', 'w'))
target_transforms = [stack3,
(self.tn1 + (self.ta2 + self.tn2)) + self.ta3,
(self.ta2 + self.tn2) + self.ta3,
self.tn2 + self.ta3,
self.ta3,
]
r = [rh for _, rh in stack3._iter_break_from_left_to_right()]
assert len(r) == len(target_transforms)
for target_stack, stack in zip(target_transforms, r):
assert target_stack == stack
def test_transform_shortcuts(self):
assert self.stack1 - self.stack2_subset == self.ta1
assert self.stack2 - self.stack2_subset == self.ta1
assert self.stack2_subset - self.stack2 == self.ta1.inverted()
assert (self.stack2_subset - self.stack2).depth == 1
with pytest.raises(ValueError):
self.stack1 - self.stack2
aff1 = self.ta1 + (self.ta2 + self.ta3)
aff2 = self.ta2 + self.ta3
assert aff1 - aff2 == self.ta1
assert aff1 - self.ta2 == aff1 + self.ta2.inverted()
assert self.stack1 - self.ta3 == self.ta1 + (self.tn1 + self.ta2)
assert self.stack2 - self.ta3 == self.ta1 + self.tn1 + self.ta2
assert ((self.ta2 + self.ta3) - self.ta3 + self.ta3 ==
self.ta2 + self.ta3)
def test_contains_branch(self):
r1 = (self.ta2 + self.ta1)
r2 = (self.ta2 + self.ta1)
assert r1 == r2
assert r1 != self.ta1
assert r1.contains_branch(r2)
assert r1.contains_branch(self.ta1)
assert not r1.contains_branch(self.ta2)
assert not r1.contains_branch((self.ta2 + self.ta2))
assert r1 == r2
assert self.stack1.contains_branch(self.ta3)
assert self.stack2.contains_branch(self.ta3)
assert self.stack1.contains_branch(self.stack2_subset)
assert self.stack2.contains_branch(self.stack2_subset)
assert not self.stack2_subset.contains_branch(self.stack1)
assert not self.stack2_subset.contains_branch(self.stack2)
assert self.stack1.contains_branch((self.ta2 + self.ta3))
assert self.stack2.contains_branch((self.ta2 + self.ta3))
assert not self.stack1.contains_branch((self.tn1 + self.ta2))
def test_affine_simplification(self):
# tests that a transform stack only calls as much is absolutely
# necessary "non-affine" allowing the best possible optimization with
# complex transformation stacks.
points = np.array([[0, 0], [10, 20], [np.nan, 1], [-1, 0]],
dtype=np.float64)
na_pts = self.stack1.transform_non_affine(points)
all_pts = self.stack1.transform(points)
na_expected = np.array([[1., 2.], [-19., 12.],
[np.nan, np.nan], [1., 1.]], dtype=np.float64)
all_expected = np.array([[11., 4.], [-9., 24.],
[np.nan, np.nan], [11., 2.]],
dtype=np.float64)
# check we have the expected results from doing the affine part only
assert_array_almost_equal(na_pts, na_expected)
# check we have the expected results from a full transformation
assert_array_almost_equal(all_pts, all_expected)
# check we have the expected results from doing the transformation in
# two steps
assert_array_almost_equal(self.stack1.transform_affine(na_pts),
all_expected)
# check that getting the affine transformation first, then fully
# transforming using that yields the same result as before.
assert_array_almost_equal(self.stack1.get_affine().transform(na_pts),
all_expected)
# check that the affine part of stack1 & stack2 are equivalent
# (i.e. the optimization is working)
expected_result = (self.ta2 + self.ta3).get_matrix()
result = self.stack1.get_affine().get_matrix()
assert_array_equal(expected_result, result)
result = self.stack2.get_affine().get_matrix()
assert_array_equal(expected_result, result)
class TestTransformPlotInterface(unittest.TestCase):
def tearDown(self):
plt.close()
def test_line_extent_axes_coords(self):
# a simple line in axes coordinates
ax = plt.axes()
ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transAxes)
assert_array_equal(ax.dataLim.get_points(),
np.array([[np.inf, np.inf],
[-np.inf, -np.inf]]))
def test_line_extent_data_coords(self):
# a simple line in data coordinates
ax = plt.axes()
ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transData)
assert_array_equal(ax.dataLim.get_points(),
np.array([[0.1, 0.5], [1.2, 0.9]]))
def test_line_extent_compound_coords1(self):
# a simple line in data coordinates in the y component, and in axes
# coordinates in the x
ax = plt.axes()
trans = mtransforms.blended_transform_factory(ax.transAxes,
ax.transData)
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
assert_array_equal(ax.dataLim.get_points(),
np.array([[np.inf, -5.],
[-np.inf, 35.]]))
plt.close()
def test_line_extent_predata_transform_coords(self):
# a simple line in (offset + data) coordinates
ax = plt.axes()
trans = mtransforms.Affine2D().scale(10) + ax.transData
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
assert_array_equal(ax.dataLim.get_points(),
np.array([[1., -50.], [12., 350.]]))
plt.close()
def test_line_extent_compound_coords2(self):
# a simple line in (offset + data) coordinates in the y component, and
# in axes coordinates in the x
ax = plt.axes()
trans = mtransforms.blended_transform_factory(ax.transAxes,
mtransforms.Affine2D().scale(10) + ax.transData)
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
assert_array_equal(ax.dataLim.get_points(),
np.array([[np.inf, -50.], [-np.inf, 350.]]))
plt.close()
def test_line_extents_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
plt.plot(np.arange(10), transform=offset + ax.transData)
expected_data_lim = np.array([[0., 0.], [9., 9.]]) + 10
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def test_line_extents_non_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtransforms.Affine2D().translate(10, 10))
plt.plot(np.arange(10), transform=offset + na_offset + ax.transData)
expected_data_lim = np.array([[0., 0.], [9., 9.]]) + 20
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def test_pathc_extents_non_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtransforms.Affine2D().translate(10, 10))
pth = Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
patch = mpatches.PathPatch(pth,
transform=offset + na_offset + ax.transData)
ax.add_patch(patch)
expected_data_lim = np.array([[0., 0.], [10., 10.]]) + 20
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def test_pathc_extents_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
pth = Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
patch = mpatches.PathPatch(pth, transform=offset + ax.transData)
ax.add_patch(patch)
expected_data_lim = np.array([[0., 0.], [10., 10.]]) + 10
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def test_line_extents_for_non_affine_transData(self):
ax = plt.axes(projection='polar')
# add 10 to the radius of the data
offset = mtransforms.Affine2D().translate(0, 10)
plt.plot(np.arange(10), transform=offset + ax.transData)
# the data lim of a polar plot is stored in coordinates
# before a transData transformation, hence the data limits
# are not what is being shown on the actual plot.
expected_data_lim = np.array([[0., 0.], [9., 9.]]) + [0, 10]
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def assert_bbox_eq(bbox1, bbox2):
assert_array_equal(bbox1.bounds, bbox2.bounds)
def test_bbox_intersection():
bbox_from_ext = mtransforms.Bbox.from_extents
inter = mtransforms.Bbox.intersection
r1 = bbox_from_ext(0, 0, 1, 1)
r2 = bbox_from_ext(0.5, 0.5, 1.5, 1.5)
r3 = bbox_from_ext(0.5, 0, 0.75, 0.75)
r4 = bbox_from_ext(0.5, 1.5, 1, 2.5)
r5 = bbox_from_ext(1, 1, 2, 2)
# self intersection -> no change
assert_bbox_eq(inter(r1, r1), r1)
# simple intersection
assert_bbox_eq(inter(r1, r2), bbox_from_ext(0.5, 0.5, 1, 1))
# r3 contains r2
assert_bbox_eq(inter(r1, r3), r3)
# no intersection
assert inter(r1, r4) is None
# single point
assert_bbox_eq(inter(r1, r5), bbox_from_ext(1, 1, 1, 1))
def test_bbox_as_strings():
b = mtransforms.Bbox([[.5, 0], [.75, .75]])
assert_bbox_eq(b, eval(repr(b), {'Bbox': mtransforms.Bbox}))
asdict = eval(str(b), {'Bbox': dict})
for k, v in asdict.items():
assert getattr(b, k) == v
fmt = '.1f'
asdict = eval(format(b, fmt), {'Bbox': dict})
for k, v in asdict.items():
assert eval(format(getattr(b, k), fmt)) == v
def test_transform_single_point():
t = mtransforms.Affine2D()
r = t.transform_affine((1, 1))
assert r.shape == (2,)
def test_log_transform():
# Tests that the last line runs without exception (previously the
# transform would fail if one of the axes was logarithmic).
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.transData.transform((1, 1))
def test_nan_overlap():
a = mtransforms.Bbox([[0, 0], [1, 1]])
b = mtransforms.Bbox([[0, 0], [1, np.nan]])
assert not a.overlaps(b)
def test_transform_angles():
t = mtransforms.Affine2D() # Identity transform
angles = np.array([20, 45, 60])
points = np.array([[0, 0], [1, 1], [2, 2]])
# Identity transform does not change angles
new_angles = t.transform_angles(angles, points)
assert_array_almost_equal(angles, new_angles)
# points missing a 2nd dimension
with pytest.raises(ValueError):
t.transform_angles(angles, points[0:2, 0:1])
# Number of angles != Number of points
with pytest.raises(ValueError):
t.transform_angles(angles, points[0:2, :])
def test_nonsingular():
# test for zero-expansion type cases; other cases may be added later
zero_expansion = np.array([-0.001, 0.001])
cases = [(0, np.nan), (0, 0), (0, 7.9e-317)]
for args in cases:
out = np.array(mtransforms.nonsingular(*args))
assert_array_equal(out, zero_expansion)
def test_invalid_arguments():
t = mtransforms.Affine2D()
# There are two different exceptions, since the wrong number of
# dimensions is caught when constructing an array_view, and that
# raises a ValueError, and a wrong shape with a possible number
# of dimensions is caught by our CALL_CPP macro, which always
# raises the less precise RuntimeError.
with pytest.raises(ValueError):
t.transform(1)
with pytest.raises(ValueError):
t.transform([[[1]]])
with pytest.raises(RuntimeError):
t.transform([])
with pytest.raises(RuntimeError):
t.transform([1])
with pytest.raises(RuntimeError):
t.transform([[1]])
with pytest.raises(RuntimeError):
t.transform([[1, 2, 3]])
def test_transformed_path():
points = [(0, 0), (1, 0), (1, 1), (0, 1)]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
path = Path(points, codes)
trans = mtransforms.Affine2D()
trans_path = mtransforms.TransformedPath(path, trans)
assert_allclose(trans_path.get_fully_transformed_path().vertices, points)
# Changing the transform should change the result.
r2 = 1 / np.sqrt(2)
trans.rotate(np.pi / 4)
assert_allclose(trans_path.get_fully_transformed_path().vertices,
[(0, 0), (r2, r2), (0, 2 * r2), (-r2, r2)],
atol=1e-15)
# Changing the path does not change the result (it's cached).
path.points = [(0, 0)] * 4
assert_allclose(trans_path.get_fully_transformed_path().vertices,
[(0, 0), (r2, r2), (0, 2 * r2), (-r2, r2)],
atol=1e-15)
def test_transformed_patch_path():
trans = mtransforms.Affine2D()
patch = mpatches.Wedge((0, 0), 1, 45, 135, transform=trans)
tpatch = mtransforms.TransformedPatchPath(patch)
points = tpatch.get_fully_transformed_path().vertices
# Changing the transform should change the result.
trans.scale(2)
assert_allclose(tpatch.get_fully_transformed_path().vertices, points * 2)
# Changing the path should change the result (and cancel out the scaling
# from the transform).
patch.set_radius(0.5)
assert_allclose(tpatch.get_fully_transformed_path().vertices, points)
@pytest.mark.parametrize('locked_element', ['x0', 'y0', 'x1', 'y1'])
def test_lockable_bbox(locked_element):
other_elements = ['x0', 'y0', 'x1', 'y1']
other_elements.remove(locked_element)
orig = mtransforms.Bbox.unit()
locked = mtransforms.LockableBbox(orig, **{locked_element: 2})
# LockableBbox should keep its locked element as specified in __init__.
assert getattr(locked, locked_element) == 2
assert getattr(locked, 'locked_' + locked_element) == 2
for elem in other_elements:
assert getattr(locked, elem) == getattr(orig, elem)
# Changing underlying Bbox should update everything but locked element.
orig.set_points(orig.get_points() + 10)
assert getattr(locked, locked_element) == 2
assert getattr(locked, 'locked_' + locked_element) == 2
for elem in other_elements:
assert getattr(locked, elem) == getattr(orig, elem)
# Unlocking element should revert values back to the underlying Bbox.
setattr(locked, 'locked_' + locked_element, None)
assert getattr(locked, 'locked_' + locked_element) is None
assert np.all(orig.get_points() == locked.get_points())
# Relocking an element should change its value, but not others.
setattr(locked, 'locked_' + locked_element, 3)
assert getattr(locked, locked_element) == 3
assert getattr(locked, 'locked_' + locked_element) == 3
for elem in other_elements:
assert getattr(locked, elem) == getattr(orig, elem)
|
jonyroda97/redbot-amigosprovaveis
|
lib/matplotlib/tests/test_transforms.py
|
Python
|
gpl-3.0
| 24,823 | 0.000081 |
"""
This inline scripts makes it possible to use mitmproxy in scenarios where IP spoofing has been used to redirect
connections to mitmproxy. The way this works is that we rely on either the TLS Server Name Indication (SNI) or the
Host header of the HTTP request.
Of course, this is not foolproof - if an HTTPS connection comes without SNI, we don't
know the actual target and cannot construct a certificate that looks valid.
Similarly, if there's no Host header or a spoofed Host header, we're out of luck as well.
Using transparent mode is the better option most of the time.
Usage:
mitmproxy
-p 80
-R http://example.com/ // Used as the target location if no Host header is present
mitmproxy
-p 443
-R https://example.com/ // Used as the target locaction if neither SNI nor host header are present.
mitmproxy will always connect to the default location first, so it must be reachable.
As a workaround, you can spawn an arbitrary HTTP server and use that for both endpoints, e.g.
mitmproxy -p 80 -R http://localhost:8000
mitmproxy -p 443 -R https2http://localhost:8000
"""
def request(context, flow):
if flow.client_conn.ssl_established:
# TLS SNI or Host header
flow.request.host = flow.client_conn.connection.get_servername(
) or flow.request.pretty_host(hostheader=True)
# If you use a https2http location as default destination, these
# attributes need to be corrected as well:
flow.request.port = 443
flow.request.scheme = "https"
else:
# Host header
flow.request.host = flow.request.pretty_host(hostheader=True)
|
noikiy/mitmproxy
|
examples/dns_spoofing.py
|
Python
|
mit
| 1,652 | 0.004843 |
from datetime import datetime
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect, JsonResponse
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext, ugettext_lazy
from django.views import View
from couchdbkit import ResourceNotFound
from djng.views.mixins import JSONResponseMixin, allow_remote_invocation
from memoized import memoized
from corehq.apps.accounting.models import BillingAccount
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.analytics.tasks import track_workflow
from corehq.apps.app_manager.dbaccessors import (
get_app,
get_brief_app_docs_in_domain,
get_brief_apps_in_domain,
get_build_doc_by_version,
get_latest_released_app,
get_latest_released_app_versions_by_app_id,
)
from corehq.apps.app_manager.decorators import require_can_edit_apps
from corehq.apps.app_manager.util import is_linked_app
from corehq.apps.case_search.models import CaseSearchConfig
from corehq.apps.domain.dbaccessors import domain_exists
from corehq.apps.domain.decorators import (
domain_admin_required,
login_or_api_key,
)
from corehq.apps.domain.exceptions import DomainDoesNotExist
from corehq.apps.domain.views.base import DomainViewMixin
from corehq.apps.domain.views.settings import BaseAdminProjectSettingsView
from corehq.apps.fixtures.dbaccessors import get_fixture_data_type_by_tag
from corehq.apps.hqwebapp.decorators import use_multiselect
from corehq.apps.hqwebapp.doc_info import get_doc_info_by_id
from corehq.apps.hqwebapp.templatetags.hq_shared_tags import pretty_doc_info
from corehq.apps.linked_domain.const import (
LINKED_MODELS_MAP,
MODEL_APP,
MODEL_FIXTURE,
MODEL_KEYWORD,
MODEL_REPORT,
SUPERUSER_DATA_MODELS,
)
from corehq.apps.linked_domain.dbaccessors import (
get_active_domain_link,
get_available_domains_to_link,
get_available_upstream_domains,
get_linked_domains,
get_upstream_domain_link,
)
from corehq.apps.linked_domain.decorators import (
require_access_to_linked_domains,
require_linked_domain,
)
from corehq.apps.linked_domain.exceptions import (
DomainLinkAlreadyExists,
DomainLinkError,
DomainLinkNotAllowed,
UnsupportedActionError,
)
from corehq.apps.linked_domain.local_accessors import (
get_auto_update_rules,
get_custom_data_models,
get_data_dictionary,
get_dialer_settings,
get_enabled_toggles_and_previews,
get_fixture,
get_hmac_callout_settings,
get_otp_settings,
get_tableau_server_and_visualizations,
get_user_roles,
)
from corehq.apps.linked_domain.models import (
DomainLink,
DomainLinkHistory,
wrap_detail,
)
from corehq.apps.linked_domain.remote_accessors import get_remote_linkable_ucr
from corehq.apps.linked_domain.tasks import (
pull_missing_multimedia_for_app_and_notify_task,
push_models,
)
from corehq.apps.linked_domain.ucr import create_linked_ucr
from corehq.apps.linked_domain.updates import update_model_type
from corehq.apps.linked_domain.util import (
convert_app_for_remote_linking,
pull_missing_multimedia_for_app,
server_to_user_time,
user_has_admin_access_in_all_domains,
)
from corehq.apps.linked_domain.view_helpers import (
build_domain_link_view_model,
build_pullable_view_models_from_data_models,
build_view_models_from_data_models,
get_upstream_and_downstream_apps,
get_upstream_and_downstream_fixtures,
get_upstream_and_downstream_keywords,
get_upstream_and_downstream_reports,
)
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.dispatcher import ReleaseManagementReportDispatcher
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.sms.models import Keyword
from corehq.apps.userreports.dbaccessors import get_report_configs_for_domain
from corehq.apps.userreports.models import (
DataSourceConfiguration,
ReportConfiguration,
)
from corehq.apps.users.decorators import require_permission
from corehq.apps.users.models import Permissions, WebUser
from corehq.privileges import RELEASE_MANAGEMENT
from corehq.util.timezones.utils import get_timezone_for_request
@login_or_api_key
@require_linked_domain
def tableau_server_and_visualizations(request, domain):
return JsonResponse(get_tableau_server_and_visualizations(domain))
@login_or_api_key
@require_linked_domain
def toggles_and_previews(request, domain):
return JsonResponse(get_enabled_toggles_and_previews(domain))
@login_or_api_key
@require_linked_domain
def auto_update_rules(request, domain):
return JsonResponse(get_auto_update_rules(domain))
@login_or_api_key
@require_linked_domain
def custom_data_models(request, domain):
limit_types = request.GET.getlist('type')
return JsonResponse(get_custom_data_models(domain, limit_types))
@login_or_api_key
@require_linked_domain
def fixture(request, domain, tag):
return JsonResponse(get_fixture(domain, tag))
@login_or_api_key
@require_linked_domain
def user_roles(request, domain):
return JsonResponse({'user_roles': get_user_roles(domain)})
@login_or_api_key
@require_linked_domain
def brief_apps(request, domain):
return JsonResponse({'brief_apps': get_brief_app_docs_in_domain(domain, include_remote=False)})
@login_or_api_key
@require_linked_domain
def app_by_version(request, domain, app_id, version):
return JsonResponse({'app': get_build_doc_by_version(domain, app_id, version)})
@login_or_api_key
@require_linked_domain
def released_app_versions(request, domain):
return JsonResponse({'versions': get_latest_released_app_versions_by_app_id(domain)})
@login_or_api_key
@require_linked_domain
def case_search_config(request, domain):
try:
config = CaseSearchConfig.objects.get(domain=domain).to_json()
except CaseSearchConfig.DoesNotExist:
config = None
return JsonResponse({'config': config})
@login_or_api_key
@require_linked_domain
@require_permission(Permissions.view_reports)
def linkable_ucr(request, domain):
"""Returns a list of reports to be used by the downstream
domain on a remote server to create linked reports by calling the
`ucr_config` view below
"""
reports = get_report_configs_for_domain(domain)
return JsonResponse({
"reports": [
{"id": report._id, "title": report.title} for report in reports]
})
@login_or_api_key
@require_linked_domain
def ucr_config(request, domain, config_id):
report_config = ReportConfiguration.get(config_id)
if report_config.domain != domain:
return Http404
datasource_id = report_config.config_id
datasource_config = DataSourceConfiguration.get(datasource_id)
return JsonResponse({
"report": report_config.to_json(),
"datasource": datasource_config.to_json(),
})
@login_or_api_key
@require_linked_domain
def get_latest_released_app_source(request, domain, app_id):
master_app = get_app(None, app_id)
if master_app.domain != domain:
raise Http404
latest_master_build = get_latest_released_app(domain, app_id)
if not latest_master_build:
raise Http404
return JsonResponse(convert_app_for_remote_linking(latest_master_build))
@login_or_api_key
@require_linked_domain
def data_dictionary(request, domain):
return JsonResponse(get_data_dictionary(domain))
@login_or_api_key
@require_linked_domain
def dialer_settings(request, domain):
return JsonResponse(get_dialer_settings(domain))
@login_or_api_key
@require_linked_domain
def otp_settings(request, domain):
return JsonResponse(get_otp_settings(domain))
@login_or_api_key
@require_linked_domain
def hmac_callout_settings(request, domain):
return JsonResponse(get_hmac_callout_settings(domain))
@require_can_edit_apps
def pull_missing_multimedia(request, domain, app_id):
async_update = request.POST.get('notify') == 'on'
force = request.POST.get('force') == 'on'
if async_update:
pull_missing_multimedia_for_app_and_notify_task.delay(domain, app_id, request.user.email, force)
messages.success(request,
ugettext('Your request has been submitted. '
'We will notify you via email once completed.'))
else:
app = get_app(domain, app_id)
pull_missing_multimedia_for_app(app, force=force)
return HttpResponseRedirect(reverse('app_settings', args=[domain, app_id]))
@method_decorator(require_access_to_linked_domains, name='dispatch')
class DomainLinkView(BaseAdminProjectSettingsView):
urlname = 'domain_links'
page_title = ugettext_lazy("Linked Project Spaces")
template_name = 'linked_domain/domain_links.html'
@use_multiselect
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
@property
def page_context(self):
"""
This view services both domains that are master domains and domains that are linked domains
(and legacy domains that are both).
"""
timezone = get_timezone_for_request()
upstream_link = get_upstream_domain_link(self.domain)
linked_domains = [build_domain_link_view_model(link, timezone) for link in get_linked_domains(self.domain)]
upstream_apps, downstream_apps = get_upstream_and_downstream_apps(self.domain)
upstream_fixtures, downstream_fixtures = get_upstream_and_downstream_fixtures(self.domain, upstream_link)
upstream_reports, downstream_reports = get_upstream_and_downstream_reports(self.domain)
upstream_keywords, downstream_keywords = get_upstream_and_downstream_keywords(self.domain)
is_superuser = self.request.couch_user.is_superuser
timezone = get_timezone_for_request()
view_models_to_pull = build_pullable_view_models_from_data_models(
self.domain, upstream_link, downstream_apps, downstream_fixtures, downstream_reports,
downstream_keywords, timezone, is_superuser=is_superuser
)
view_models_to_push = build_view_models_from_data_models(
self.domain, upstream_apps, upstream_fixtures, upstream_reports, upstream_keywords,
is_superuser=is_superuser
)
account = BillingAccount.get_account_by_domain(self.request.domain)
available_domains_to_link = get_available_domains_to_link(self.request.domain,
self.request.couch_user,
billing_account=account)
upstream_domain_urls = []
upstream_domains = get_available_upstream_domains(self.request.domain,
self.request.couch_user,
billing_account=account)
for domain in upstream_domains:
upstream_domain_urls.append({'name': domain, 'url': reverse('domain_links', args=[domain])})
if upstream_link and upstream_link.is_remote:
remote_linkable_ucr = get_remote_linkable_ucr(upstream_link)
else:
remote_linkable_ucr = None
return {
'domain': self.domain,
'timezone': timezone.localize(datetime.utcnow()).tzname(),
'has_release_management_privilege': domain_has_privilege(self.domain, RELEASE_MANAGEMENT),
'is_superuser': is_superuser,
'view_data': {
'is_downstream_domain': bool(upstream_link),
'upstream_domains': upstream_domain_urls,
'available_domains': available_domains_to_link,
'upstream_link': build_domain_link_view_model(upstream_link, timezone) if upstream_link else None,
'view_models_to_pull': sorted(view_models_to_pull, key=lambda m: m['name']),
'view_models_to_push': sorted(view_models_to_push, key=lambda m: m['name']),
'linked_domains': sorted(linked_domains, key=lambda d: d['downstream_domain']),
'linkable_ucr': remote_linkable_ucr,
},
}
@method_decorator(domain_admin_required, name='dispatch')
class DomainLinkRMIView(JSONResponseMixin, View, DomainViewMixin):
urlname = "domain_link_rmi"
@allow_remote_invocation
def update_linked_model(self, in_data):
model = in_data['model']
type_ = model['type']
detail = model['detail']
detail_obj = wrap_detail(type_, detail) if detail else None
upstream_link = get_upstream_domain_link(self.domain)
error = ""
try:
update_model_type(upstream_link, type_, detail_obj)
model_detail = detail_obj.to_json() if detail_obj else None
upstream_link.update_last_pull(type_, self.request.couch_user._id, model_detail=model_detail)
except (DomainLinkError, UnsupportedActionError) as e:
error = str(e)
track_workflow(
self.request.couch_user.username,
"Linked domain: pulled data model",
{"data_model": type_}
)
timezone = get_timezone_for_request()
return {
'success': not error,
'error': error,
'last_update': server_to_user_time(upstream_link.last_pull, timezone)
}
@allow_remote_invocation
def delete_domain_link(self, in_data):
linked_domain = in_data['linked_domain']
link = DomainLink.objects.filter(linked_domain=linked_domain, master_domain=self.domain).first()
link.deleted = True
link.save()
track_workflow(self.request.couch_user.username, "Linked domain: domain link deleted")
return {
'success': True,
}
@allow_remote_invocation
def create_release(self, in_data):
push_models.delay(self.domain, in_data['models'], in_data['linked_domains'],
in_data['build_apps'], self.request.couch_user.username)
track_workflow(
self.request.couch_user.username,
"Linked domain: pushed data models",
{"data_models": in_data['models']}
)
return {
'success': True,
'message': ugettext('''
Your release has begun. You will receive an email when it is complete.
Until then, to avoid linked domains receiving inconsistent content, please
avoid editing any of the data contained in the release.
'''),
}
@allow_remote_invocation
def create_domain_link(self, in_data):
domain_to_link = in_data['downstream_domain']
try:
domain_link = link_domains(self.request.couch_user, self.domain, domain_to_link)
except (DomainDoesNotExist, DomainLinkAlreadyExists, DomainLinkNotAllowed, DomainLinkError) as e:
return {'success': False, 'message': str(e)}
track_workflow(self.request.couch_user.username, "Linked domain: domain link created")
domain_link_view_model = build_domain_link_view_model(domain_link, get_timezone_for_request())
return {'success': True, 'domain_link': domain_link_view_model}
@allow_remote_invocation
def create_remote_report_link(self, in_data):
linked_domain = in_data['linked_domain']
master_domain = in_data['master_domain'].strip('/').split('/')[-1]
report_id = in_data['report_id']
link = DomainLink.objects.filter(
remote_base_url__isnull=False,
linked_domain=linked_domain,
master_domain=master_domain,
).first()
if link:
create_linked_ucr(link, report_id)
return {'success': True}
else:
return {'success': False}
def link_domains(couch_user, upstream_domain, downstream_domain):
if not domain_exists(downstream_domain):
error = ugettext("The project space {} does not exist. Verify that the name is correct, and that the "
"domain has not been deleted.").format(downstream_domain)
raise DomainDoesNotExist(error)
if get_active_domain_link(upstream_domain, downstream_domain):
error = ugettext(
"The project space {} is already a downstream project space of {}."
).format(downstream_domain, upstream_domain)
raise DomainLinkAlreadyExists(error)
if not user_has_admin_access_in_all_domains(couch_user, [upstream_domain, downstream_domain]):
error = ugettext("You must be an admin in both project spaces to create a link.")
raise DomainLinkNotAllowed(error)
return DomainLink.link_domains(downstream_domain, upstream_domain)
class DomainLinkHistoryReport(GenericTabularReport):
name = 'Linked Project Space History'
base_template = "reports/base_template.html"
section_name = 'Project Settings'
slug = 'project_link_report'
dispatcher = ReleaseManagementReportDispatcher
ajax_pagination = True
asynchronous = False
sortable = False
@property
def fields(self):
if self.upstream_link:
fields = []
else:
fields = ['corehq.apps.linked_domain.filters.DomainLinkFilter']
fields.append('corehq.apps.linked_domain.filters.DomainLinkModelFilter')
return fields
@property
def link_model(self):
return self.request.GET.get('domain_link_model')
@property
@memoized
def domain_link(self):
if self.request.GET.get('domain_link'):
try:
return DomainLink.all_objects.get(
pk=self.request.GET.get('domain_link'),
master_domain=self.domain
)
except DomainLink.DoesNotExist:
pass
@property
@memoized
def upstream_link(self):
return get_upstream_domain_link(self.domain)
@property
@memoized
def selected_link(self):
return self.upstream_link or self.domain_link
@property
def total_records(self):
query = self._base_query()
return query.count()
def _base_query(self):
query = DomainLinkHistory.objects.filter(link=self.selected_link)
# filter out superuser data models
if not self.request.couch_user.is_superuser:
query = query.exclude(model__in=dict(SUPERUSER_DATA_MODELS).keys())
if self.link_model:
query = query.filter(model=self.link_model)
return query
@property
def shared_pagination_GET_params(self):
link_id = str(self.selected_link.pk) if self.selected_link else ''
return [
{'name': 'domain_link', 'value': link_id},
{'name': 'domain_link_model', 'value': self.link_model},
]
@property
def rows(self):
if not self.selected_link:
return []
rows = self._base_query()[self.pagination.start:self.pagination.start + self.pagination.count + 1]
return [self._make_row(record, self.selected_link) for record in rows]
def _make_row(self, record, link):
row = [
'{} -> {}'.format(link.master_domain, link.linked_domain),
server_to_user_time(record.date, self.timezone),
self._make_model_cell(record),
self._make_user_cell(record)
]
return row
def _make_user_cell(self, record):
doc_info = get_doc_info_by_id(self.domain, record.user_id)
user = WebUser.get_by_user_id(record.user_id)
if self.domain not in user.get_domains() and 'link' in doc_info:
doc_info['link'] = None
return pretty_doc_info(doc_info)
@memoized
def linked_app_names(self, domain):
return {
app._id: app.name for app in get_brief_apps_in_domain(domain)
if is_linked_app(app)
}
def _make_model_cell(self, record):
name = LINKED_MODELS_MAP[record.model]
if record.model == MODEL_APP:
detail = record.wrapped_detail
app_name = ugettext_lazy('Unknown App')
if detail:
app_names = self.linked_app_names(self.selected_link.linked_domain)
app_name = app_names.get(detail.app_id, detail.app_id)
return '{} ({})'.format(name, app_name)
if record.model == MODEL_FIXTURE:
detail = record.wrapped_detail
tag = ugettext_lazy('Unknown')
if detail:
data_type = get_fixture_data_type_by_tag(self.selected_link.linked_domain, detail.tag)
if data_type:
tag = data_type.tag
return '{} ({})'.format(name, tag)
if record.model == MODEL_REPORT:
detail = record.wrapped_detail
report_name = ugettext_lazy('Unknown Report')
if detail:
try:
report_name = ReportConfiguration.get(detail.report_id).title
except ResourceNotFound:
pass
return '{} ({})'.format(name, report_name)
if record.model == MODEL_KEYWORD:
detail = record.wrapped_detail
keyword_name = ugettext_lazy('Unknown Keyword')
if detail:
try:
keyword_name = Keyword.objects.get(id=detail.keyword_id).keyword
except Keyword.DoesNotExist:
pass
return f'{name} ({keyword_name})'
return name
@property
def headers(self):
tzname = self.timezone.localize(datetime.utcnow()).tzname()
columns = [
DataTablesColumn(ugettext('Link')),
DataTablesColumn(ugettext('Date ({})'.format(tzname))),
DataTablesColumn(ugettext('Data Model')),
DataTablesColumn(ugettext('User')),
]
return DataTablesHeader(*columns)
|
dimagi/commcare-hq
|
corehq/apps/linked_domain/views.py
|
Python
|
bsd-3-clause
| 22,021 | 0.002089 |
# Copyright (c) 2017 Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================================================================
from __future__ import absolute_import
from os import path
class Visualizable(object):
def __init__(self, visualizer=None):
if visualizer is not None:
assert isinstance(visualizer, BaseVisualizer), "visualizer should derive from BaseVisualizer"
self._visualizer = visualizer
def visualize(self, index, tag, value, **kwargs):
if self._visualizer is not None:
self._visualizer << (index, tag, value, kwargs)
@property
def can_visualize(self):
return self._visualizer is not None
class BaseVisualizer(object):
""" Provide a unified interface for observing the training progress """
def add_entry(self, index, key, result, **kwargs):
raise NotImplementedError()
def __lshift__(self, other):
if isinstance(other, tuple):
if len(other) >= 3:
self.add_entry(other[0], str(other[1]), other[2])
else:
raise ValueError("Provided tuple should be of the form (key, value)")
else:
raise ValueError("Trying to use stream operator without a tuple (key, value)")
class EmptyVisualizer(BaseVisualizer):
""" A boilerplate visualizer that does nothing """
def add_entry(self, index, key, result, **kwargs):
pass
class ConsoleVisualizer(BaseVisualizer):
""" Print visualization to stdout as:
key -> value
"""
CONSOLE_DEFAULT_FORMAT = "[%s] %d : %s -> %.3f"
def __init__(self, format=None, prefix=None):
self._format = format or ConsoleVisualizer.CONSOLE_DEFAULT_FORMAT
self._prefix = prefix or '-'
def add_entry(self, index, key, result, **kwargs):
print(self._format % (self._prefix, index, key, result))
class CsvVisualizer(BaseVisualizer):
""" Write data to file. The following formats are supported: CSV, JSON, Excel. """
def __init__(self, output_file, override=False):
if path.exists(output_file) and not override:
raise Exception('%s already exists and override is False' % output_file)
super(CsvVisualizer, self).__init__()
self._file = output_file
self._data = {}
def add_entry(self, index, key, result, **kwargs):
if key in self._data[index]:
print('Warning: Found previous value for %s in visualizer' % key)
self._data[index].update({key: result})
def close(self, format='csv'):
import pandas as pd
if format == 'csv':
pd.DataFrame.from_dict(self._data, orient='index').to_csv(self._file)
elif format == 'json':
pd.DataFrame.from_dict(self._data, orient='index').to_json(self._file)
else:
writer = pd.ExcelWriter(self._file)
pd.DataFrame.from_dict(self._data, orient='index').to_excel(writer)
writer.save()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return self
|
village-people/flying-pig
|
malmopy/visualization/visualizer.py
|
Python
|
mit
| 4,199 | 0.00381 |
# Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Differentially private optimizers for TensorFlow."""
from absl import logging
import tensorflow as tf
from tensorflow_privacy.privacy.dp_query import gaussian_query
def make_optimizer_class(cls):
"""Given a subclass of `tf.compat.v1.train.Optimizer`, returns a DP-SGD subclass of it.
Args:
cls: Class from which to derive a DP subclass. Should be a subclass of
`tf.compat.v1.train.Optimizer`.
Returns:
A DP-SGD subclass of `cls`.
"""
parent_code = tf.compat.v1.train.Optimizer.compute_gradients.__code__
has_compute_gradients = hasattr(cls, 'compute_gradients')
if has_compute_gradients:
child_code = cls.compute_gradients.__code__
GATE_OP = tf.compat.v1.train.Optimizer.GATE_OP # pylint: disable=invalid-name
if has_compute_gradients and child_code is not parent_code:
logging.warning(
'WARNING: Calling make_optimizer_class() on class %s that overrides '
'method compute_gradients(). Check to ensure that '
'make_optimizer_class() does not interfere with overridden version.',
cls.__name__)
class DPOptimizerClass(cls): # pylint: disable=empty-docstring
__doc__ = ("""Differentially private subclass of `{base_class}`.
You can use this as a differentially private replacement for
`{base_class}`. Note that you must ensure
that any loss processed by this optimizer comes in vector
form.
This is the fully general form of the optimizer that allows you
to define your own privacy mechanism. If you are planning to use
the standard Gaussian mechanism, it is simpler to use the more
specific `{gaussian_class}` class instead.
When instantiating this optimizer, you need to supply several
DP-related arguments followed by the standard arguments for
`{short_base_class}`.
Examples:
```python
# Create GaussianSumQuery.
dp_sum_query = gaussian_query.GaussianSumQuery(l2_norm_clip=1.0, stddev=0.5)
# Create optimizer.
opt = {dp_class}(dp_sum_query, 1, False, <standard arguments>)
```
When using the optimizer, be sure to pass in the loss as a
rank-one tensor with one entry for each example.
```python
# Compute loss as a tensor. Do not call tf.reduce_mean as you
# would with a standard optimizer.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
train_op = opt.minimize(loss, global_step=global_step)
```
""").format(
base_class='tf.compat.v1.train.' + cls.__name__,
gaussian_class='DP' +
cls.__name__.replace('Optimizer', 'GaussianOptimizer'),
short_base_class=cls.__name__,
dp_class='DP' + cls.__name__)
def __init__(
self,
dp_sum_query,
num_microbatches=None,
unroll_microbatches=False,
while_loop_parallel_iterations=10,
*args, # pylint: disable=keyword-arg-before-vararg, g-doc-args
**kwargs):
"""Initializes the DPOptimizerClass.
Args:
dp_sum_query: `DPQuery` object, specifying differential privacy
mechanism to use.
num_microbatches: Number of microbatches into which each minibatch is
split. If `None`, will default to the size of the minibatch, and
per-example gradients will be computed.
unroll_microbatches: If true, processes microbatches within a Python
loop instead of a `tf.while_loop`. Can be used if using a
`tf.while_loop` raises an exception.
while_loop_parallel_iterations: The number of iterations allowed to run
in parallel. It must be a positive integer. Applicable only when
unroll_microbatches is set to False. It gives users some control over
memory consumption.
*args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method.
"""
super().__init__(*args, **kwargs)
self._dp_sum_query = dp_sum_query
self._num_microbatches = num_microbatches
self._global_state = None
# TODO(b/122613513): Set unroll_microbatches=True to avoid this bug.
# Beware: When num_microbatches is large (>100), enabling this parameter
# may cause an OOM error.
self._unroll_microbatches = unroll_microbatches
self._while_loop_parallel_iterations = while_loop_parallel_iterations
self._was_compute_gradients_called = False
def compute_gradients(self,
loss,
var_list,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None,
gradient_tape=None):
"""DP-SGD version of base class method."""
self._was_compute_gradients_called = True
if self._global_state is None:
self._global_state = self._dp_sum_query.initial_global_state()
if callable(loss):
# TF is running in Eager mode, check we received a vanilla tape.
if not gradient_tape:
raise ValueError('When in Eager mode, a tape needs to be passed.')
vector_loss = loss()
if self._num_microbatches is None:
self._num_microbatches = tf.shape(input=vector_loss)[0]
sample_state = self._dp_sum_query.initial_sample_state(var_list)
microbatches_losses = tf.reshape(vector_loss,
[self._num_microbatches, -1])
sample_params = (
self._dp_sum_query.derive_sample_params(self._global_state))
def process_microbatch(i, sample_state):
"""Process one microbatch (record) with privacy helper."""
microbatch_loss = tf.reduce_mean(
input_tensor=tf.gather(microbatches_losses, [i]))
with gradient_tape.stop_recording():
grads = gradient_tape.gradient(microbatch_loss, var_list)
sample_state = self._dp_sum_query.accumulate_record(
sample_params, sample_state, grads)
return sample_state
for idx in range(self._num_microbatches):
sample_state = process_microbatch(idx, sample_state)
grad_sums, self._global_state, _ = (
self._dp_sum_query.get_noised_result(sample_state,
self._global_state))
def normalize(v):
return v / tf.cast(self._num_microbatches, tf.float32)
final_grads = tf.nest.map_structure(normalize, grad_sums)
grads_and_vars = list(zip(final_grads, var_list))
return grads_and_vars
else:
# Note: it would be closer to the correct i.i.d. sampling of records if
# we sampled each microbatch from the appropriate binomial distribution,
# although that still wouldn't be quite correct because it would be
# sampling from the dataset without replacement.
if self._num_microbatches is None:
self._num_microbatches = tf.shape(input=loss)[0]
microbatches_losses = tf.reshape(loss, [self._num_microbatches, -1])
sample_params = (
self._dp_sum_query.derive_sample_params(self._global_state))
def process_microbatch(i, sample_state):
"""Process one microbatch (record) with privacy helper."""
self_super = super(DPOptimizerClass, self)
mean_loss = tf.reduce_mean(
input_tensor=tf.gather(microbatches_losses, [i]))
if hasattr(self_super, 'compute_gradients'):
# This case covers optimizers in tf.train.
compute_gradients_fn = self_super.compute_gradients
else:
# This case covers Keras optimizers from optimizers_v2.
compute_gradients_fn = self_super._compute_gradients # pylint: disable=protected-access
if gradient_tape:
# This is intended to work for TF2 and may not work for TF1.
with gradient_tape.stop_recording():
grads_list = list(gradient_tape.gradient(mean_loss, var_list))
else:
grads, _ = zip(*compute_gradients_fn(
mean_loss, var_list, gate_gradients, aggregation_method,
colocate_gradients_with_ops, grad_loss))
grads_list = list(grads)
sample_state = self._dp_sum_query.accumulate_record(
sample_params, sample_state, grads_list)
return sample_state
if var_list is None:
var_list = (
tf.compat.v1.trainable_variables() + tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
sample_state = self._dp_sum_query.initial_sample_state(var_list)
if self._unroll_microbatches:
for idx in range(self._num_microbatches):
sample_state = process_microbatch(idx, sample_state)
else:
# Use of while_loop here requires that sample_state be a nested
# structure of tensors. In general, we would prefer to allow it to be
# an arbitrary opaque type.
cond_fn = lambda i, _: tf.less(i, self._num_microbatches)
body_fn = lambda i, state: [tf.add(i, 1), process_microbatch(i, state)] # pylint: disable=line-too-long
idx = tf.constant(0)
_, sample_state = tf.while_loop(
cond=cond_fn,
body=body_fn,
loop_vars=[idx, sample_state],
parallel_iterations=self._while_loop_parallel_iterations)
grad_sums, self._global_state, _ = (
self._dp_sum_query.get_noised_result(sample_state,
self._global_state))
def normalize(v):
try:
return tf.truediv(v, tf.cast(self._num_microbatches, tf.float32))
except TypeError:
return None
final_grads = tf.nest.map_structure(normalize, grad_sums)
return list(zip(final_grads, var_list))
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
# pylint: disable=g-doc-args, g-doc-return-or-yield
"""DP-SGD version of base class method."""
assert self._was_compute_gradients_called, (
'compute_gradients() on the differentially private optimizer was not'
' called. Which means that the training is not differentially '
'private. It happens for example in Keras training in TensorFlow '
'2.0+.')
return super(DPOptimizerClass, self).apply_gradients(
grads_and_vars=grads_and_vars, global_step=global_step, name=name)
return DPOptimizerClass
def make_gaussian_optimizer_class(cls):
"""Given a subclass of `tf.compat.v1.train.Optimizer`, returns a subclass using DP-SGD with Gaussian averaging.
Args:
cls: Class from which to derive a DP subclass. Should be a subclass of
`tf.compat.v1.train.Optimizer`.
Returns:
A subclass of `cls` using DP-SGD with Gaussian averaging.
"""
class DPGaussianOptimizerClass(make_optimizer_class(cls)): # pylint: disable=empty-docstring
__doc__ = ("""DP subclass of `{}`.
You can use this as a differentially private replacement for
`tf.compat.v1.train.{}`. This optimizer implements DP-SGD using
the standard Gaussian mechanism.
When instantiating this optimizer, you need to supply several
DP-related arguments followed by the standard arguments for
`{}`.
Examples:
```python
# Create optimizer.
opt = {}(l2_norm_clip=1.0, noise_multiplier=0.5, num_microbatches=1,
<standard arguments>)
```
When using the optimizer, be sure to pass in the loss as a
rank-one tensor with one entry for each example.
```python
# Compute loss as a tensor. Do not call tf.reduce_mean as you
# would with a standard optimizer.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
train_op = opt.minimize(loss, global_step=global_step)
```
""").format(
'tf.compat.v1.train.' + cls.__name__, cls.__name__, cls.__name__,
'DP' + cls.__name__.replace('Optimizer', 'GaussianOptimizer'))
def __init__(
self,
l2_norm_clip,
noise_multiplier,
num_microbatches=None,
unroll_microbatches=False,
*args, # pylint: disable=keyword-arg-before-vararg
**kwargs):
"""Initializes the `DPGaussianOptimizerClass`.
Args:
l2_norm_clip: Clipping norm (max L2 norm of per microbatch gradients).
noise_multiplier: Ratio of the standard deviation to the clipping norm.
num_microbatches: Number of microbatches into which each minibatch is
split. If `None`, will default to the size of the minibatch, and
per-example gradients will be computed.
unroll_microbatches: If true, processes microbatches within a Python
loop instead of a `tf.while_loop`. Can be used if using a
`tf.while_loop` raises an exception.
*args: These will be passed on to the base class `__init__` method.
**kwargs: These will be passed on to the base class `__init__` method.
"""
self._l2_norm_clip = l2_norm_clip
self._noise_multiplier = noise_multiplier
self._num_microbatches = num_microbatches
self._base_optimizer_class = cls
dp_sum_query = gaussian_query.GaussianSumQuery(
l2_norm_clip, l2_norm_clip * noise_multiplier)
super(DPGaussianOptimizerClass,
self).__init__(dp_sum_query, num_microbatches, unroll_microbatches,
*args, **kwargs)
def get_config(self):
"""Creates configuration for Keras serialization.
This method will be called when Keras creates model checkpoints
and is necessary so that deserialization can be performed.
Returns:
A dict object storing arguments to be passed to the __init__ method
upon deserialization.
"""
config = self._base_optimizer_class.get_config(self)
config.update({
'l2_norm_clip': self._l2_norm_clip,
'noise_multiplier': self._noise_multiplier,
'num_microbatches': self._num_microbatches
})
return config
return DPGaussianOptimizerClass
AdagradOptimizer = tf.compat.v1.train.AdagradOptimizer
AdamOptimizer = tf.compat.v1.train.AdamOptimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
RMSPropOptimizer = tf.compat.v1.train.RMSPropOptimizer
DPAdagradOptimizer = make_optimizer_class(AdagradOptimizer)
DPAdamOptimizer = make_optimizer_class(AdamOptimizer)
DPGradientDescentOptimizer = make_optimizer_class(GradientDescentOptimizer)
DPRMSPropOptimizer = make_optimizer_class(RMSPropOptimizer)
DPAdagradGaussianOptimizer = make_gaussian_optimizer_class(AdagradOptimizer)
DPAdamGaussianOptimizer = make_gaussian_optimizer_class(AdamOptimizer)
DPGradientDescentGaussianOptimizer = make_gaussian_optimizer_class(
GradientDescentOptimizer)
DPRMSPropGaussianOptimizer = make_gaussian_optimizer_class(RMSPropOptimizer)
|
tensorflow/privacy
|
tensorflow_privacy/privacy/optimizers/dp_optimizer.py
|
Python
|
apache-2.0
| 15,920 | 0.005339 |
import wx
from gui.controller.CustomListCtrl import CustomListCtrl
from gui.controller.PlotCtrl import PlotCtrl
class WofSitesView(wx.Frame):
def __init__(self, parent, title, table_columns):
wx.Frame.__init__(self, parent=parent, id=-1, title=title, pos=wx.DefaultPosition, size=(680, 700),
style=wx.FRAME_FLOAT_ON_PARENT | wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER ^ wx.MAXIMIZE_BOX)
self.start_date = wx.DateTime_Now() - 7 * wx.DateSpan_Day()
self.end_date = wx.DateTime_Now()
self.parent = parent
self._data = None
panel = wx.Panel(self)
top_panel = wx.Panel(panel)
middle_panel = wx.Panel(panel, size=(-1, 30))
lower_panel = wx.Panel(panel)
hboxTopPanel = wx.BoxSizer(wx.HORIZONTAL)
self.plot = PlotCtrl(top_panel)
hboxTopPanel.Add(self.plot.canvas, 1, wx.EXPAND | wx.ALL, 2)
top_panel.SetSizer(hboxTopPanel)
hboxMidPanel = wx.BoxSizer(wx.HORIZONTAL)
self.startDateText = wx.StaticText(middle_panel, id=wx.ID_ANY, label="Start")
self.startDatePicker = wx.DatePickerCtrl(middle_panel, id=wx.ID_ANY, dt=self.start_date)
self.endDateText = wx.StaticText(middle_panel, id=wx.ID_ANY, label="End")
self.endDatePicker = wx.DatePickerCtrl(middle_panel, id=wx.ID_ANY, dt=self.end_date)
self.exportBtn = wx.Button(middle_panel, id=wx.ID_ANY, label="Export")
self.addToCanvasBtn = wx.Button(middle_panel, id=wx.ID_ANY, label="Add to Canvas")
self.PlotBtn = wx.Button(middle_panel, id=wx.ID_ANY, label="Preview")
self.line_style_combo = wx.ComboBox(middle_panel, value="Line style")
self.line_style_options = ["Line", "Scatter"]
self.line_style_combo.AppendItems(self.line_style_options)
hboxMidPanel.Add(self.startDateText, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL)
hboxMidPanel.Add(self.startDatePicker, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.endDateText, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL)
hboxMidPanel.Add(self.endDatePicker, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.PlotBtn, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.exportBtn, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.addToCanvasBtn, 1, wx.EXPAND | wx.ALL, 2)
hboxMidPanel.Add(self.line_style_combo, 1, wx.EXPAND | wx.ALL, 2)
middle_panel.SetSizer(hboxMidPanel)
hboxLowPanel = wx.BoxSizer(wx.HORIZONTAL)
# Build time series table
self.variableList = CustomListCtrl(lower_panel)
self.variableList.set_columns(table_columns)
hboxLowPanel.Add(self.variableList, 1, wx.EXPAND | wx.ALL, 2)
lower_panel.SetSizer(hboxLowPanel)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(top_panel, 1, wx.EXPAND | wx.ALL, 2)
vbox.Add(middle_panel, 0, wx.EXPAND | wx.ALL, 2)
vbox.Add(lower_panel, 1, wx.EXPAND | wx.ALL, 2)
panel.SetSizer(vbox)
self.status_bar = self.CreateStatusBar()
self.status_bar.SetStatusText("Ready")
self.Show()
|
Castronova/EMIT
|
gui/views/WofSitesView.py
|
Python
|
gpl-2.0
| 3,111 | 0.002893 |
#-*- coding: utf-8 -*-
from openerp.osv import fields, osv
class finance_contract_rachat(osv.osv_memory):
_name = "finance.contract.rachat"
_columns = {
'date': fields.date('Date de rachat'),
'date_dem': fields.date('Date de la demande'),
'motif': fields.text('Motif'),
'memo': fields.text('Memo'),
'act_rachat': fields.boolean('Rachat'),
'act_res': fields.boolean('Resiliation'),
'contract_id': fields.many2one('finance.contract', 'Contrat')
}
def set_rachat(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
vals = {
'res_reason': obj.motif and obj.motif or False,
'res_memo': obj.memo and obj.memo or False,
'res_date': obj.date and obj.date or False,
'res_dated': obj.date_dem and obj.date_dem or False,
'res': obj.act_res,
'rachat': obj.act_rachat
}
return self.pool.get('finance.contract').save_rachat(cr, uid, obj.contract_id.id, vals)
|
ATSTI/administra
|
open_corretora/brokerage/wizard/end_contract.py
|
Python
|
gpl-2.0
| 1,064 | 0.005639 |
# flake8: noqa
# -*- coding: utf-8 -*-
###############################################
# Geosite local settings
###############################################
import os
# Outside URL
SITEURL = 'http://$DOMAIN'
OGC_SERVER['default']['LOCATION'] = os.path.join(GEOSERVER_URL, 'geoserver/')
OGC_SERVER['default']['PUBLIC_LOCATION'] = os.path.join(SITEURL, 'geoserver/')
# databases unique to site if not defined in site settings
"""
SITE_DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, '../development.db'),
},
}
"""
|
simonemurzilli/geonode
|
geonode/contrib/geosites/site_template/local_settings_template.py
|
Python
|
gpl-3.0
| 597 | 0 |
from core.serializers import ProjectSerializer
from rest_framework import generics
from core.models import Project
class ProjectList(generics.ListCreateAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
|
wathsalav/xos
|
xos/core/views/projects.py
|
Python
|
apache-2.0
| 382 | 0.007853 |
# Copyright 2014-2015 Facundo Batista, Nicolás Demarchi
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check https://github.com/PyAr/fades
"""A collection of utilities for fades."""
import os
import sys
import json
import logging
import subprocess
from urllib import request
from urllib.error import HTTPError
import pkg_resources
logger = logging.getLogger(__name__)
SHOW_VERSION_CMD = """
import sys, json
d = dict(path=sys.executable)
d.update(zip('major minor micro releaselevel serial'.split(), sys.version_info))
print(json.dumps(d))
"""
BASE_PYPI_URL = 'https://pypi.python.org/pypi/{name}/json'
STDOUT_LOG_PREFIX = ":: "
class ExecutionError(Exception):
"""Execution of subprocess ended not in 0."""
def __init__(self, retcode, cmd, collected_stdout):
"""Init."""
self._retcode = retcode
self._cmd = cmd
self._collected_stdout = collected_stdout
super().__init__()
def dump_to_log(self, logger):
"""Send the cmd info and collected stdout to logger."""
logger.error("Execution ended in %s for cmd %s", self._retcode, self._cmd)
for line in self._collected_stdout:
logger.error(STDOUT_LOG_PREFIX + line)
def logged_exec(cmd):
"""Execute a command, redirecting the output to the log."""
logger = logging.getLogger('fades.exec')
logger.debug("Executing external command: %r", cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = []
for line in p.stdout:
line = line[:-1].decode("utf8")
stdout.append(line)
logger.debug(STDOUT_LOG_PREFIX + line)
retcode = p.wait()
if retcode:
raise ExecutionError(retcode, cmd, stdout)
return stdout
def get_basedir():
"""Get the base fades directory, from xdg or kinda hardcoded."""
try:
from xdg import BaseDirectory # NOQA
return os.path.join(BaseDirectory.xdg_data_home, 'fades')
except ImportError:
logger.debug("Package xdg not installed; using ~/.fades folder")
from os.path import expanduser
return expanduser("~/.fades")
def get_confdir():
"""Get the config fades directory, from xdg or kinda hardcoded."""
try:
from xdg import BaseDirectory # NOQA
return os.path.join(BaseDirectory.xdg_config_home, 'fades')
except ImportError:
logger.debug("Package xdg not installed; using ~/.fades folder")
from os.path import expanduser
return expanduser("~/.fades")
def _get_interpreter_info(interpreter=None):
"""Return the interpreter's full path using pythonX.Y format."""
if interpreter is None:
# If interpreter is None by default returns the current interpreter data.
major, minor = sys.version_info[:2]
executable = sys.executable
else:
args = [interpreter, '-c', SHOW_VERSION_CMD]
try:
requested_interpreter_info = logged_exec(args)
except Exception as error:
logger.error("Error getting requested interpreter version: %s", error)
exit()
requested_interpreter_info = json.loads(requested_interpreter_info[0])
executable = requested_interpreter_info['path']
major = requested_interpreter_info['major']
minor = requested_interpreter_info['minor']
if executable[-1].isdigit():
executable = executable.split(".")[0][:-1]
interpreter = "{}{}.{}".format(executable, major, minor)
return interpreter
def get_interpreter_version(requested_interpreter):
"""Return a 'sanitized' interpreter and indicates if it is the current one."""
logger.debug('Getting interpreter version for: %s', requested_interpreter)
current_interpreter = _get_interpreter_info()
logger.debug('Current interpreter is %s', current_interpreter)
if requested_interpreter is None:
return(current_interpreter, True)
else:
requested_interpreter = _get_interpreter_info(requested_interpreter)
is_current = requested_interpreter == current_interpreter
logger.debug('Interpreter=%s. It is the same as fades?=%s',
requested_interpreter, is_current)
return (requested_interpreter, is_current)
def get_latest_version_number(project_name):
"""Return latest version of a package."""
try:
raw = request.urlopen(BASE_PYPI_URL.format(name=project_name)).read()
except HTTPError as error:
logger.warning("Network error. Error: %s", error)
raise error
try:
data = json.loads(raw.decode("utf8"))
latest_version = data["info"]["version"]
return latest_version
except (KeyError, ValueError) as error: # malformed json or empty string
logger.error("Could not get the version of the package. Error: %s", error)
raise error
def check_pypi_updates(dependencies):
"""Return a list of dependencies to upgrade."""
dependencies_up_to_date = []
for dependency in dependencies.get('pypi', []):
# get latest version from PyPI api
try:
latest_version = get_latest_version_number(dependency.project_name)
except Exception as error:
logger.warning("--check-updates command will be aborted. Error: %s", error)
return dependencies
# get required version
required_version = None
if dependency.specs:
_, required_version = dependency.specs[0]
if required_version:
dependencies_up_to_date.append(dependency)
if latest_version > required_version:
logger.info("There is a new version of %s: %s",
dependency.project_name, latest_version)
elif latest_version < required_version:
logger.warning("The requested version for %s is greater "
"than latest found in PyPI: %s",
dependency.project_name, latest_version)
else:
logger.info("The requested version for %s is the latest one in PyPI: %s",
dependency.project_name, latest_version)
else:
project_name_plus = "{}=={}".format(dependency.project_name, latest_version)
dependencies_up_to_date.append(pkg_resources.Requirement.parse(project_name_plus))
logger.info("There is a new version of %s: %s and will use it.",
dependency.project_name, latest_version)
dependencies["pypi"] = dependencies_up_to_date
return dependencies
|
arielrossanigo/fades
|
fades/helpers.py
|
Python
|
gpl-3.0
| 7,161 | 0.001397 |
#!/usr/bin/env python2
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
import six
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument)
self.boto_profile = self.args.boto_profile
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--boto-profile', action='store',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = {}
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that wo can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that wo can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(dest):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(dest):
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, dest)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/inventory/aws/ec2.py
|
Python
|
apache-2.0
| 55,406 | 0.002978 |
import json
import os
from ctypes import addressof, byref, c_double, c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.band import BandList
from django.contrib.gis.gdal.raster.const import GDAL_RESAMPLE_ALGORITHMS
from django.contrib.gis.gdal.srs import SpatialReference, SRSException
from django.contrib.gis.geometry.regex import json_regex
from django.utils import six
from django.utils.encoding import (
force_bytes, force_text, python_2_unicode_compatible,
)
from django.utils.functional import cached_property
class TransformPoint(list):
indices = {
'origin': (0, 3),
'scale': (1, 5),
'skew': (2, 4),
}
def __init__(self, raster, prop):
x = raster.geotransform[self.indices[prop][0]]
y = raster.geotransform[self.indices[prop][1]]
list.__init__(self, [x, y])
self._raster = raster
self._prop = prop
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][0]] = value
self._raster.geotransform = gtf
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
gtf = self._raster.geotransform
gtf[self.indices[self._prop][1]] = value
self._raster.geotransform = gtf
@python_2_unicode_compatible
class GDALRaster(GDALBase):
"""
Wraps a raster GDAL Data Source object.
"""
def __init__(self, ds_input, write=False):
self._write = 1 if write else 0
Driver.ensure_registered()
# Preprocess json inputs. This converts json strings to dictionaries,
# which are parsed below the same way as direct dictionary inputs.
if isinstance(ds_input, six.string_types) and json_regex.match(ds_input):
ds_input = json.loads(ds_input)
# If input is a valid file path, try setting file as source.
if isinstance(ds_input, six.string_types):
if not os.path.exists(ds_input):
raise GDALException('Unable to read raster source input "{}"'.format(ds_input))
try:
# GDALOpen will auto-detect the data source type.
self._ptr = capi.open_ds(force_bytes(ds_input), self._write)
except GDALException as err:
raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err))
elif isinstance(ds_input, dict):
# A new raster needs to be created in write mode
self._write = 1
# Create driver (in memory by default)
driver = Driver(ds_input.get('driver', 'MEM'))
# For out of memory drivers, check filename argument
if driver.name != 'MEM' and 'name' not in ds_input:
raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name))
# Check if width and height where specified
if 'width' not in ds_input or 'height' not in ds_input:
raise GDALException('Specify width and height attributes for JSON or dict input.')
# Check if srid was specified
if 'srid' not in ds_input:
raise GDALException('Specify srid for JSON or dict input.')
# Create GDAL Raster
self._ptr = capi.create_ds(
driver._ptr,
force_bytes(ds_input.get('name', '')),
ds_input['width'],
ds_input['height'],
ds_input.get('nr_of_bands', len(ds_input.get('bands', []))),
ds_input.get('datatype', 6),
None
)
# Set band data if provided
for i, band_input in enumerate(ds_input.get('bands', [])):
band = self.bands[i]
band.data(band_input['data'])
if 'nodata_value' in band_input:
band.nodata_value = band_input['nodata_value']
# Set SRID
self.srs = ds_input.get('srid')
# Set additional properties if provided
if 'origin' in ds_input:
self.origin.x, self.origin.y = ds_input['origin']
if 'scale' in ds_input:
self.scale.x, self.scale.y = ds_input['scale']
if 'skew' in ds_input:
self.skew.x, self.skew.y = ds_input['skew']
elif isinstance(ds_input, c_void_p):
# Instantiate the object using an existing pointer to a gdal raster.
self._ptr = ds_input
else:
raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input)))
def __del__(self):
if self._ptr and capi:
capi.close_ds(self._ptr)
def __str__(self):
return self.name
def __repr__(self):
"""
Short-hand representation because WKB may be very large.
"""
return '<Raster object at %s>' % hex(addressof(self._ptr))
def _flush(self):
"""
Flush all data from memory into the source file if it exists.
The data that needs flushing are geotransforms, coordinate systems,
nodata_values and pixel values. This function will be called
automatically wherever it is needed.
"""
# Raise an Exception if the value is being changed in read mode.
if not self._write:
raise GDALException('Raster needs to be opened in write mode to change values.')
capi.flush_ds(self._ptr)
@property
def name(self):
"""
Returns the name of this raster. Corresponds to filename
for file-based rasters.
"""
return force_text(capi.get_ds_description(self._ptr))
@cached_property
def driver(self):
"""
Returns the GDAL Driver used for this raster.
"""
ds_driver = capi.get_ds_driver(self._ptr)
return Driver(ds_driver)
@property
def width(self):
"""
Width (X axis) in pixels.
"""
return capi.get_ds_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels.
"""
return capi.get_ds_ysize(self._ptr)
@property
def srs(self):
"""
Returns the SpatialReference used in this GDALRaster.
"""
try:
wkt = capi.get_ds_projection_ref(self._ptr)
if not wkt:
return None
return SpatialReference(wkt, srs_type='wkt')
except SRSException:
return None
@srs.setter
def srs(self, value):
"""
Sets the spatial reference used in this GDALRaster. The input can be
a SpatialReference or any parameter accepted by the SpatialReference
constructor.
"""
if isinstance(value, SpatialReference):
srs = value
elif isinstance(value, six.integer_types + six.string_types):
srs = SpatialReference(value)
else:
raise ValueError('Could not create a SpatialReference from input.')
capi.set_ds_projection_ref(self._ptr, srs.wkt.encode())
self._flush()
@property
def geotransform(self):
"""
Returns the geotransform of the data source.
Returns the default geotransform if it does not exist or has not been
set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0].
"""
# Create empty ctypes double array for data
gtf = (c_double * 6)()
capi.get_ds_geotransform(self._ptr, byref(gtf))
return list(gtf)
@geotransform.setter
def geotransform(self, values):
"Sets the geotransform for the data source."
if sum([isinstance(x, (int, float)) for x in values]) != 6:
raise ValueError('Geotransform must consist of 6 numeric values.')
# Create ctypes double array with input and write data
values = (c_double * 6)(*values)
capi.set_ds_geotransform(self._ptr, byref(values))
self._flush()
@property
def origin(self):
"""
Coordinates of the raster origin.
"""
return TransformPoint(self, 'origin')
@property
def scale(self):
"""
Pixel scale in units of the raster projection.
"""
return TransformPoint(self, 'scale')
@property
def skew(self):
"""
Skew of pixels (rotation parameters).
"""
return TransformPoint(self, 'skew')
@property
def extent(self):
"""
Returns the extent as a 4-tuple (xmin, ymin, xmax, ymax).
"""
# Calculate boundary values based on scale and size
xval = self.origin.x + self.scale.x * self.width
yval = self.origin.y + self.scale.y * self.height
# Calculate min and max values
xmin = min(xval, self.origin.x)
xmax = max(xval, self.origin.x)
ymin = min(yval, self.origin.y)
ymax = max(yval, self.origin.y)
return xmin, ymin, xmax, ymax
@property
def bands(self):
return BandList(self)
def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0):
"""
Returns a warped GDALRaster with the given input characteristics.
The input is expected to be a dictionary containing the parameters
of the target raster. Allowed values are width, height, SRID, origin,
scale, skew, datatype, driver, and name (filename).
By default, the warp functions keeps all parameters equal to the values
of the original source raster. For the name of the target raster, the
name of the source raster will be used and appended with
_copy. + source_driver_name.
In addition, the resampling algorithm can be specified with the "resampling"
input parameter. The default is NearestNeighbor. For a list of all options
consult the GDAL_RESAMPLE_ALGORITHMS constant.
"""
# Get the parameters defining the geotransform, srid, and size of the raster
if 'width' not in ds_input:
ds_input['width'] = self.width
if 'height' not in ds_input:
ds_input['height'] = self.height
if 'srid' not in ds_input:
ds_input['srid'] = self.srs.srid
if 'origin' not in ds_input:
ds_input['origin'] = self.origin
if 'scale' not in ds_input:
ds_input['scale'] = self.scale
if 'skew' not in ds_input:
ds_input['skew'] = self.skew
# Get the driver, name, and datatype of the target raster
if 'driver' not in ds_input:
ds_input['driver'] = self.driver.name
if 'name' not in ds_input:
ds_input['name'] = self.name + '_copy.' + self.driver.name
if 'datatype' not in ds_input:
ds_input['datatype'] = self.bands[0].datatype()
# Set the number of bands
ds_input['nr_of_bands'] = len(self.bands)
# Create target raster
target = GDALRaster(ds_input, write=True)
# Copy nodata values to warped raster
for index, band in enumerate(self.bands):
target.bands[index].nodata_value = band.nodata_value
# Select resampling algorithm
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Reproject image
capi.reproject_image(
self._ptr, self.srs.wkt.encode(),
target._ptr, target.srs.wkt.encode(),
algorithm, 0.0, max_error,
c_void_p(), c_void_p(), c_void_p()
)
# Make sure all data is written to file
target._flush()
return target
def transform(self, srid, driver=None, name=None, resampling='NearestNeighbour',
max_error=0.0):
"""
Returns a copy of this raster reprojected into the given SRID.
"""
# Convert the resampling algorithm name into an algorithm id
algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling]
# Instantiate target spatial reference system
target_srs = SpatialReference(srid)
# Create warped virtual dataset in the target reference system
target = capi.auto_create_warped_vrt(
self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(),
algorithm, max_error, c_void_p()
)
target = GDALRaster(target)
# Construct the target warp dictionary from the virtual raster
data = {
'srid': srid,
'width': target.width,
'height': target.height,
'origin': [target.origin.x, target.origin.y],
'scale': [target.scale.x, target.scale.y],
'skew': [target.skew.x, target.skew.y],
}
# Set the driver and filepath if provided
if driver:
data['driver'] = driver
if name:
data['name'] = name
# Warp the raster into new srid
return self.warp(data, resampling=resampling, max_error=max_error)
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/contrib/gis/gdal/raster/source.py
|
Python
|
artistic-2.0
| 13,274 | 0.000904 |
# Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto
import boto.s3
import boto.s3.connection
import datetime
import logging
import re
import sys
import zlib
import six
# THIS MUST END IN A /
S3PREFIX = "logs/"
S3_KEY_RE = re.compile(r'.*/(?P<stream_name>[\w-]+)/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/.+(?P<gz>\.gz)?$')
#----------------------- SCRIBE LOG CHUNK OBJECTS -----------------------#
class BadKeyError(Exception):
def __init__(self, key, keytype=""):
self.key = key
self.keytype = keytype
def __repr__(self):
return "<BadKeyError %s:%s>" % (self.keytype, self.key)
def __str__(self):
return "BadKeyError: %s key %s did not match the expected format" % (self.keytype, self.key)
class ScribeFile(object):
"""Base class for Scribe file objects. These represent a single log chunk,
and can be read or listed. Scribe File objects are equal if the combination of
their date, stream name, and aggregator are the same. This allows you to, for example,
create a set of files from both s3 and a local cache without reading the same
chunk twice.
Important methods:
read: adds a file's contents to the stream ostream, transparently handling gzip'd data
Properties:
sort_key: A key to sort or compare with
size: The length of the record in bytes
"""
def __init__(self, stream_name, year, month, day):
self.stream_name = stream_name
self.year = year
self.month = month
self.day = day
self.date = datetime.date(self.year, self.month, self.day)
@property
def size(self):
raise NotImplementedError
def read(self, ostream=sys.stdout):
raise NotImplementedError
def read_orig(self, ostream=sys.stdout):
raise NotImplementedError
class ScribeS3File(ScribeFile):
"""Represent scribe log chunks on S3"""
def __init__(self, key):
self.key = key
keymd = S3_KEY_RE.match(key.name)
if not keymd:
raise BadKeyError(key, "S3")
super(ScribeS3File, self).__init__(
keymd.group('stream_name'),
int(keymd.group('year')),
int(keymd.group('month')),
int(keymd.group('day')),
)
def read(self, ostream=sys.stdout):
"""Read self into the ostream"""
decompressor = zlib.decompressobj(31)
# Python 2 works with string, python 3 with bytes
remainder = "" if six.PY2 else b""
if self.key.name.endswith(".gz"):
for data in self.key:
remainder += data
try:
ostream.write(decompressor.decompress(remainder))
remainder = decompressor.unconsumed_tail
except zlib.error:
# maybe we didn't have enough data in this chunk to
# decompress any. if so, build up a string to decompress
pass
else:
for data in self.key:
ostream.write(data)
if len(remainder) > 0:
logging.error("Encountered %d extra bits in zlib output", len(remainder))
def read_orig(self, ostream=sys.stdout):
"""Read the original of self (compressed if applicable) to ostream"""
self.key.get_contents_to_file(ostream)
@property
def size(self):
return self.key.size
#----------------------- SCRIBE CONNECTION MANAGERS -----------------------#
class ScribeS3(object):
"""This class represents an S3 connection and abstracts scribe interactions"""
LOGS_BASE_PATH = "{prefix}{stream}/{year:=04d}/{month:=02d}/{day:=02d}"
LOG_FILE_PATH = LOGS_BASE_PATH + "/{aggregator}-{part:=05d}.gz"
COMPLETE_FILE_PATH = LOGS_BASE_PATH + "/COMPLETE"
def __init__(
self,
s3_host,
aws_access_key_id,
aws_secret_access_key,
s3_bucket,
s3_key_prefix=None,
):
self.s3_key_prefix = s3_key_prefix
if self.s3_key_prefix and self.s3_key_prefix[-1] != '/':
self.s3_key_prefix += '/'
self.s3_connection = boto.s3.connection.S3Connection(
host=s3_host,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
self.s3_bucket = self.s3_connection.get_bucket(s3_bucket)
logging.debug('connected to s3 with %s', self.s3_connection)
@property
def streams(self):
ret = set()
for prefix in self.s3_bucket.list(prefix=self.s3_key_prefix, delimiter="/"):
prefix = prefix.name.replace(self.s3_key_prefix or S3PREFIX, "", 1).rstrip('/')
ret.add(prefix)
return ret
def complete_for(self, stream_name, date):
"""Are the S3 uploads for the given stream_name on the given date marked as complete?"""
complete_key_name = self.COMPLETE_FILE_PATH.format(
prefix=self.s3_key_prefix,
stream=stream_name,
year=date.year,
month=date.month,
day=date.day,
)
key = self.s3_bucket.get_key(complete_key_name)
return bool(key)
def get_logs(self, stream_name, date):
prefix = self.LOGS_BASE_PATH.format(
prefix=self.s3_key_prefix,
stream=stream_name,
year=date.year,
month=date.month,
day=date.day,
)
ret = set()
for s3_name in self.s3_bucket.list(prefix=prefix):
if s3_name.name.endswith("COMPLETE"):
continue
if s3_name.name.endswith("_SUCCESS"):
continue
if s3_name.name.endswith(".bad"):
continue
ret.add(ScribeS3File(s3_name))
return ret
def get_log(self, stream_name, date, aggregator, part):
"""Get a specific log
.. warning:: This function is deprecated and should not be used.
"""
key_name = self.LOG_FILE_PATH.format(
prefix=self.s3_key_prefix,
stream=stream_name,
year=date.year,
month=date.month,
day=date.day,
aggregator=aggregator,
part=part,
)
key = self.s3_bucket.get_key(key_name)
if key:
return ScribeS3File(key)
return None
#----------------------- COMMAND OBJECTS -----------------------#
class ScribeReader(object):
"""
ScribeReader provides an interface for interacting with individual log elements
(ScribeFile objects) in Scribe
"""
def __init__(self, stream_name, s3_connections=None, fs_connection=None, ostream=sys.stdout, not_in_s3=False):
"""Initialize the ScribeReader
Args:
stream_name: The stream to read from
s3_connections: Optionally, an iterable of ScribeS3 objects
fs_connection: Optionally, a ScribeFS object
not_in_s3: Remove only keys unique to the fs_connection
Will read from s3_connection and/or fs_connection, depending on which are provided
"""
self.stream_name = stream_name
self.s3_connections = s3_connections
self.fs_connection = fs_connection
self.ostream = ostream
self.not_in_s3 = not_in_s3
def logs_for_date(self, date):
"""Write to the initial ostream for the given date"""
keys = set()
if self.fs_connection:
keys |= self.fs_connection.get_logs(self.stream_name, date)
if self.s3_connections:
for connection in self.s3_connections:
if connection is None:
continue
s3_keys = connection.get_logs(self.stream_name, date)
if self.not_in_s3:
keys -= s3_keys
else:
keys |= s3_keys
return sorted(keys, key=lambda key: key.key.last_modified)
def get_for_date(self, date):
for key in self.logs_for_date(date):
key.read(ostream=self.ostream)
yield
|
Yelp/yelp_clog
|
clog/scribe_net.py
|
Python
|
apache-2.0
| 8,613 | 0.001974 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017-06-27 michael_yin
#
"""
"""
from django.conf import settings
from django.conf.urls import include, url
from django.core.urlresolvers import reverse
|
hellowebbooks/hellowebbooks-website
|
blog/urls.py
|
Python
|
mit
| 237 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
import datetime
AUTHOR = u'Jayson Stemmler'
SITENAME = u'Jayson Stemmler'
SITEURL = ''
SITENAME = "Jayson Stemmler's Blog"
SITETITLE = 'Jayson Stemmler'
SITESUBTITLE = 'Research / Data Scientist'
SITEDESCRIPTION = ''
# SITELOGO = SITEURL + '/images/profile.png'
# FAVICON = SITEURL + '/images/favicon.ico'
COPYRIGHT_NAME = "Jayson Stemmler"
COPYRIGHT_YEAR = datetime.datetime.today().strftime('%Y')
# THEME_DIR = os.path.join(os.getenv("HOME"), 'Documents/Blogging/pelican-themes')
# THEME = os.path.join(THEME_DIR, 'Flex')
THEME = 'themes/Flex'
USE_FOLDER_AS_CATEGORY = True
PATH = 'content'
PAGE_PATHS = ['pages']
ARTICLE_PATHS = ['articles']
TIMEZONE = 'America/Los_Angeles'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
STATIC_PATHS = ['images', 'extra/CNAME']
EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},}
ARTICLE_URL = 'posts/{date:%Y}/{date:%b}/{slug}'
ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%b}/{slug}.html'
PAGE_URL = 'pages/{slug}'
PAGE_SAVE_AS = 'pages/{slug}.html'
YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'
MONTH_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/{date:%b}/index.html'
## THEME OPTIONS
DISQUS_SITENAME = "jdstemmlerblog"
GOOGLE_ANALYTICS = "UA-99010895-1"
MAIN_MENU = True
SITELOGO = 'https://storage.googleapis.com/jdstemmler-blog-images/profile.png'
LINKS = (('Resume', 'https://represent.io/jdstemmler'),)
SOCIAL = (('linkedin', 'https://linkedin.com/in/jdstemmler/en'),
('github', 'https://github.com/jdstemmler'))
MENUITEMS = (('Archives', '/archives.html'),
('Categories', '/categories.html'),
('Tags', '/tags.html'),)
BROWSER_COLOR = '#333333'
ROBOTS = 'index, follow'
|
jdstemmler/jdstemmler.github.io
|
pelicanconf.py
|
Python
|
mit
| 2,048 | 0.001953 |
# -*- coding:Utf-8 -*-
#####################################################################
#This file is part of RGPA.
#Foobar is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#Foobar is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
#authors :
#Nicolas AMIOT : nicolas.amiot@univ-rennes1.fr
#Bernard UGUEN : bernard.uguen@univ-rennes1.fr
#Mohamed LAARAIEDH : mohamed.laaraiedh@univ-rennes1.fr
#####################################################################
"""
.. curentmodule:: boxn
.. autosummary:
"""
from pylayers.util.project import *
import numpy as np
import os
import pdb
import copy
import time
try:
from tvtk.api import tvtk
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
#GeomNetType = np.dtype([('Id',np.uint64),
# ('time',np.uint64),
# ('p',float,(3)),
# ('v',float,(3)),
# ('a',float,(3))])
class LBoxN(PyLayers):
"""
class LBoxN
List of BoxN
Atributes
---------
box : np.array
array containing BoxN object. default void
vol : list
volume of each boxes from self.box. default void
bd : np.arrays 2*len(box) x ndim
contains all the boundaries of boxes from self.box. default void
ndim : int
dimension of the nbox
parmsh : dictionnary
keys ['display'] =True
['interactive'] =False
LB : LBoxN
Create a BoxN object from another one. default : None, the LBoxN obebject created is void.
Methods
-------
mesure(self):
measure intervals of box
append(self,b):
append a box 'b' to a Lbox
append_l(self,lb):
append a lbox 'lb' to a lbox
info(self):
display LBoxN information
bd2coord(self,Mapping = False):
convert boundaries of Lbox to their vertexes coordintates
octant(self):
quadtree on Lboxes
volume(self):
estimate volume of LBoxes
intersect(self,lb):
EXPERIMENTAL find the intersection of LBoxN
show3(self,col='b',Id=0):
required file generation for geomview display
"""
#__slots__=('box','vol','bd','ndim','ctr','grav','parmsh')
def __init__(self,Lb=None,ndim=3):
self.ctr= []
if Lb==None:
self.box = np.array([])
self.vol = []
self.bd=[]
self.ndim = ndim # !!! TO BE DONE
# self.bnum = []
else :
self.box = np.array([])
self.vol = []
self.bd=[]
for b in Lb:
self.append(b)
self.ndim=b.ndim
self.mesure()
self.parmsh={}
self.parmsh['display']=True
self.parmsh['interactive']=False
def mesure(self):
""" LMeasure BoxN
Obtain measure of :
- size of each interval from each dimension for each boxes
- center of each interval from each dimension for each boxes
- Volume of the BoxN for each boxes (NOT WORKING)
"""
if len(self.bd ) != 0:
lbd=len(self.bd)/2
self.ctr=np.zeros((lbd,self.ndim))
for i in xrange(lbd):self.ctr[i,:]=(self.bd[2*i]+self.bd[(2*i)+1])/2.
#########################FONTIONNE MAIS TROP LOURD/TENT quand bcp de box
# C =np.array(([[1/2.,1/2.]]))
# #M =np.array(([[-1.,1.]]))
# I = np.identity(len(self.bd)/2)
# CTR = np.kron(I,C)
# #MES = np.kron(I,M)
# #self.mes = np.dot(MES,self.bd)#self.bd[1,:]-self.bd[0,:]
# self.ctr = np.dot(CTR,self.bd)#(self.bd[1,:]+self.bd[0,:])/2.0
#self.vol = np.prod(self.mes,axis=1)
else :
self.ctr = []
def append(self,b):
"""append : Append a box to LboxN
Parameters
----------
b : BoxN
box to added
Returns
-------
Nothing but update self.box status
"""
self.box=np.append(self.box,b)
try:
self.bd=np.vstack((self.bd,b.bd[0]))
self.bd=np.vstack((self.bd,b.bd[1]))
except:
self.bd=b.bd[0]
self.bd=np.vstack((self.bd,b.bd[1]))
V1 = sum(self.vol)
V2 = b.vol
# uptate center of gravity
try:
self.grav = (V1*self.grav+V2*b.ctr)/(V1+V2)
except:
self.grav = b.ctr
self.vol.append(b.vol)
self.ctr.append(b.ctr)
def append_l(self,lb):
"""Append LBoxN to LBoxN
Parameters
----------
lb : LBoxN
lbox to be added
Returns
-------
Nothing but update self.box status
"""
# for i in xrange(len(lb.box)):self.append(lb.box[i])
self.box=np.append(self.box,lb.box)
try :
self.bd=np.vstack((self.bd,lb.bd))
self.ctr=np.vstack((self.ctr,lb.ctr))
self.vol=self.vol+lb.vol
except:
self.bd=lb.bd
self.ctr=lb.ctr
self.vol=lb.vol
# try:
# self.bd=np.vstack((self.bd,lb.bd[i][0]))
# self.bd=np.vstack((self.bd,lb.bd[i][1]))
# except:
# self.bd=lb.bd[i][0]
# self.bd=lb.bd[i][1]
# self.box = self.box + lb.box
# V1 = sum(self.vol)
# V2 = lb.box[0].vol*len(lb.box)
# # uptate center of gravity
# try:
# self.grav = (V1*self.grav+V2*lb.grav)/(V1+V2)
# except:
# self.grav = lb.grav
# self.vol = self.vol + lb.vol
def info(self):
""" display LBoxN information
"""
Vtot = 0
for k in range(len(self.box)):
#print "Box : ",k," Volume :",self.vol[k]
#print "ndim :",self.ndim
print("------------")
self.box[k].info()
Vtot = Vtot+self.box[k].vol
print("Volume : ",Vtot)
def bd2coord(self,Mapping = False):
"""Boundary to coordinates
Convert boundaries of Lbox to their vertexes coordintates
in :
[xmin ymin zmin]
[xmax ymax zmax]
out :
[xmin ymin zmin]
[xmin ymax zmin]
[xmax ymin zmin]
[xmax ymax zmin]
[xmin ymin zmax]
[xmin ymax zmax]
[xmax ymin zmax]
[xmax ymax zmax]
Parameters
----------
Mapping : Boolean
return a mapping of the vertex in regards of Lbox. Default = False
Returns
-------
P : array 2^ndim x ndim
coordinates of box vertex
"""
lbd=len(self.bd)
dimm1 = pow(2,self.ndim-1)
dim = pow(2,self.ndim)
P=np.zeros((lbd*dimm1,self.ndim)) #P=(len self.bd/2)*4
# organisation de P
if self.ndim == 3:
R=np.repeat(range(0,dimm1*lbd,dimm1),2)
R[range(1,len(R),2)]=R[range(1,len(R),2)]+1
if self.ndim == 2:
R=np.repeat(range(0,dimm1*lbd,dim),2)
R[range(1,len(R),2)]=R[range(1,len(R),2)]+1
if self.ndim == 3:
RZ=np.repeat(range(0,dimm1*lbd,dim),dimm1)+(lbd/2)*range(0,dimm1,1)
# aller chercher dans self.bd
R2a=np.repeat(self.bd[range(0,lbd,2),:],dimm1,axis=0)
R2b=np.repeat(self.bd[range(1,lbd,2),:],dimm1,axis=0)
# # X
# P[R,0]=R2a[:,0]#np.repeat(L.bd[range(0,lbd,2),0],4)
# P[R+2,0]=R2b[:,0]#np.repeat(L.bd[range(1,lbd,2),0],4)
# # Y
# P[np.sort(np.mod(R+3,4*lbd)),1]=R2a[:,1]#np.repeat(L.bd[range(0,lbd,2),1],4)
# P[R+1,1]=R2b[:,1]#np.repeat(L.bd[range(1,lbd,2),1],4)
# X
P[np.sort(np.mod(R+3,dimm1*lbd)),0]=R2a[:,0]#np.repeat(L.bd[range(0,lbd,2),1],4)
P[R+1,0]=R2b[:,0]#np.repeat(L.bd[range(1,lbd,2),1],4)
# Y
P[R,1]=R2a[:,1]#np.repeat(L.bd[range(0,lbd,2),0],4)
P[R+2,1]=R2b[:,1]#np.repeat(L.bd[range(1,lbd,2),0],4)
if self.ndim == 3:
# Z
P[RZ,2]=R2a[:,2]#np.repeat(L.bd[range(0,lbd,2),2],4)
P[RZ+4,2]=R2b[:,2]#np.repeat(L.bd[range(1,lbd,2),2],4)
# mapping coresponding box
if Mapping == True:
Map=np.repeat(range(0,lbd/2,1),dim)
# Map=10*np.repeat(self.bnum,8)+range(0,8,1)*(len(self.bnum))
return(P,Map)
# P=np.array((
# [self.bd[0,0],self.bd[0,1],self.bd[0,2]],
# [self.bd[0,0],self.bd[1,1],self.bd[0,2]],
# [self.bd[1,0],self.bd[1,1],self.bd[0,2]],
# [self.bd[1,0],self.bd[0,1],self.bd[0,2]],
# [self.bd[0,0],self.bd[0,1],self.bd[1,2]],
# [self.bd[0,0],self.bd[1,1],self.bd[1,2]],
# [self.bd[1,0],self.bd[1,1],self.bd[1,2]],
# [self.bd[1,0],self.bd[0,1],self.bd[1,2]]
# ))
else:
return(P)
def octant(self):
""" quadtree on boxes
Divide each Lboxes into 2^ndim equal parts
aka Split each interval from each dimension into 2 equal part
Returns
-------
lb : LBoxN 2^ndim*self.box x ndim
return theLBoxn from quadtree process
"""
if self.ndim == 3:
tlb = []
lbox = LBoxN([],ndim=self.ndim)
C=np.array(([[1,1/2.,0],[0,1/2.,1]])).T
I=np.identity(len(self.bd)/2)
CC=np.kron(I,C)
BD=np.dot(CC,self.bd)
M=np.repeat(3*np.arange(0,len(self.bd)/2),16) # groupe de 16 boundaries equivaut a 8 sous boites
X = (range(0,2)+range(1,3))*4*(len(self.bd)/2)
Y =(range(0,2)*2+range(1,3)*2)*2*(len(self.bd)/2)
Z = (range(0,2)*4+range(1,3)*4)*(len(self.bd)/2)
Rx = BD[X+M,0]
Ry = BD[Y+M,1]
Rz = BD[Z+M,2]
lbd = np.array((Rx,Ry,Rz )).T
llbd=len(lbd)
xr=xrange(0,llbd,2)
lb=LBoxN([BoxN(lbd[i:i+2],ndim=self.ndim) for i in xr])
lb.mesure()
if self.ndim == 2:
tlb = []
lbox = LBoxN(ndim=self.ndim)
C=np.array(([[1,1/2.,0],[0,1/2.,1]])).T
I=np.identity(len(self.bd)/2)
CC=np.kron(I,C)
BD=np.dot(CC,self.bd)
M=np.repeat(3*np.arange(0,len(self.bd)/2),8) # groupe de 16 boundaries equivaut a 8 sous boites
X = (range(0,2)+range(1,3))*2*(len(self.bd)/2)
Y =(range(0,2)*2+range(1,3)*2)*(len(self.bd)/2)
Rx = BD[X+M,0]
Ry = BD[Y+M,1]
lbd = np.array((Rx,Ry )).T
llbd=len(lbd)
lb=LBoxN(np.array([BoxN(lbd[i:i+2],ndim=self.ndim) for i in xrange(0,llbd,2)]))
return(lb)
def volume(self):
""" Evaluate Boxes volume
Compute the volume on each LBoxes
"""
self.vol=[]
for b in self.box:
vol = b.vol
if vol>=0:
self.vol.append(vol)
else:
pdb.set_trace()
def intersect(self,lb):
""" EXPERIMENTAL
Intersection of 2 LBOXN
"""
new_lb=LBoxN(ndim=self.ndim)
for k in range(len(self.box)):
for l in range(len(lb.box)):
b=self.box[k].intersect(lb.box[l])
if b.vol>0: # si intersection non vide
new_lb.append(b)
return(new_lb)
def _show3(self,col='r',Id=[0],H_Id=0,alpha=0.2):
# gett list of all boundaries
lbd = np.array([b.bd for b in self.box])
shb = lbd.shape
lbdr = lbd.reshape(shb[0]*shb[1],shb[2])
# mapping of single boundaries to get the 8 vertices of the boxN
mapp=np.array([[0,0,0],[0,1,0],[1,1,0],[1,0,0],[0,0,1],[0,1,1],[1,1,1],[1,0,1]],dtype='int')
# repeat the coordinates mapping for all the boxes + shift index
mappe=np.tile(mapp,(shb[0],1)) + np.repeat(np.arange(shb[0]),8)[:,None]
# get coordintaes of all verticesof all boxN
allpt = np.array([lbdr[mappe[:,0],0],lbdr[mappe[:,1],1],lbdr[mappe[:,2],2]])
edges=[[0,1,2],
[0,2,3],
[0,1,5],
[0,4,5],
[1,5,2],
[5,6,2],
[2,6,7],
[2,7,3],
[0,3,4],
[3,7,4],
[4,5,6],
[4,6,7]]
# as many edges as boxN
edgese = np.tile(edges,(shb[0],1)) + np.repeat(np.arange(shb[0])*8,12)[:,None]
mesh=tvtk.PolyData(points=allpt.T,polys=edgese)
if col =='r':
color=(1,0,0)
elif col =='b':
color=(0,0,1)
mlab.pipeline.surface(mesh,opacity=alpha,color=color)
# for b in self.box:
# b._show3(col='r',Id=[0],H_Id=0,alpha=0.2)
def show3(self,col='b',Id=0):
"""Show box into geomview
generate a geomview file which allow to represent a box.
Parameters
----------
col : string
choose box color. compliant with matplotlib colorConverter. default 'r'
Id : list
Identity of boxes to show.Default : [0]
Returns
-------
filename : string
name of the generated file
"""
filename = "lbox"+str(Id)+".list"
filename2 = basename +"/geom/"+filename
fd = open(filename2,"w")
fd.write("LIST\n")
for k in range(len(self.box)):
b = self.box[k]
b.parmsh['display']=False
filebox = b.show3(col=col,Id=[Id,k])
# filebox = b.show3(col=col[k],Id=[Id,k])
chaine = "{<"+filebox+"}\n"
fd.write(chaine)
#chaine = "{<cloud.list}\n"
#fd.write(chaine)
fd.close()
if self.parmsh['display']:
chaine = "geomview -nopanel -b 1 1 1 " + filename2 + " 2>/dev/null &"
os.system(chaine)
return(filename)
class BoxN(PyLayers):
"""BoxN Class
A box is determined by its boundary interval along each dimension
Attributes
----------
bd : numpy array 2 x ndim
box boundary
ndim : int
dimension of the box (2D, 3D,...)
self.parmsh : dictionnary
display dictionnary for show 3 method TO BE CHANGE !!!
keys :['display']=True
['interactive']=False
OBTAIN FROM mesure()
self.mes : array 1 x ndim
size of intervals of each dimension
self.ctr : array 1 x ndim
center of intervals of each dimension
self.vol : float
Volume of box
Methods
-------
info() : info about class
def mesure(self):
measure intervals of box
volume() : evaluate volume
inbox(p) : is p in box ?
intersect(box) : intersection of two boxes
show3() : geomview vizualization
cut() : cut a box along given direction
TODO
----
Remove parmsh and replace it by a ini file
"""
# __slots__=('bd','ndim','mes','ctr','vol','parmsh')
def __init__(self,bd=None,ndim=3):
# if bd==None:
# self.bd = np.array([]).astype('float')
# else:
# for i in range(np.shape(bd)[1]):
# assert bd[1,i]>=bd[0,i] , pdb.set_trace()
# self.bd = bd.astype('float')
self.bd=bd
self.ndim = ndim#np.shape(bd)[1]
self.mesure()
self.parmsh={}
self.parmsh['display']=True
self.parmsh['interactive']=False
# print "%s from %s" % (inspect.stack()[1][3],inspect.stack()[1][1])
def mesure(self):
""" Measure BoxN
Obtain measure of :
- size of each interval from each dimension
- center of each interval from each dimension
- Volume of the BoxN
"""
self.mes = self.bd[1,:]-self.bd[0,:]
self.ctr = (self.bd[1,:]+self.bd[0,:])/2.0
self.vol = np.prod(self.mes)
def setbd(self,vmin,vmax,axis=0):
"""
setbd : set boundary value on axis
"""
assert vmin<=vmax, "Incorrect bound"
self.bd[0,axis]= vmin.astype('float')
self.bd[1,axis]= vmax.astype('float')
self.mesure()
def void(self):
""" return True if box is void
"""
b = False
if self.meas==[]:
b = True
else:
pmes = np.prod(self.meas)
if pmes==0:
b = True
return(b)
def info(self):
""" Information on BoxN
"""
print( "Volume (.vol) :",self.vol)
print( "Center (.ctr) :",self.ctr)
def bd2coord(self):
"""Boundary to coordinates
Return an array containing of vertex from a box
3D case :
in :
[xmin ymin zmin]
[xmax ymax zmax]
out :
[xmin ymin zmin]
[xmin ymax zmin]
[xmax ymin zmin]
[xmax ymax zmin]
[xmin ymin zmax]
[xmin ymax zmax]
[xmax ymin zmax]
[xmax ymax zmax]
Returns
-------
P : array 2^ndim x ndim
coordinates of box vertex
"""
if self.ndim == 3:
P=np.array(([self.bd[0,0],self.bd[0,1],self.bd[0,2]],
[self.bd[0,0],self.bd[1,1],self.bd[0,2]],
[self.bd[1,0],self.bd[1,1],self.bd[0,2]],
[self.bd[1,0],self.bd[0,1],self.bd[0,2]],
[self.bd[0,0],self.bd[0,1],self.bd[1,2]],
[self.bd[0,0],self.bd[1,1],self.bd[1,2]],
[self.bd[1,0],self.bd[1,1],self.bd[1,2]],
[self.bd[1,0],self.bd[0,1],self.bd[1,2]]))
return(P)
if self.ndim == 2:
P=np.array(([self.bd[0,0],self.bd[0,1]],
[self.bd[0,0],self.bd[1,1]],
[self.bd[1,0],self.bd[1,1]],
[self.bd[1,0],self.bd[0,1]]))
return(P)
def coord2bd(self,coord):
"""
Coordinates to boundary
update boundary array from numpy array of coordinates
Parameters
----------
coord : array 2^ndim x ndim
vertexes coordinates of a boxN
Returns
-------
Nothing but fills self.bd
"""
self.bd[0,:]=np.min(coord,axis=0)
self.bd[1,:]=np.max(coord,axis=0)
# def octant(self):
# tlb = []
# lbox = LBoxN([])
# BD = np.array((self.bd[0],(self.bd[0]+self.bd[1])/2,self.bd[1] ))
## Rx = BD[(range(0,2)+range(1,3))*4,0]
## Ry = BD[(range(0,2)*2+range(1,3)*2)*2,1]
## Rz = BD[range(0,2)*4+range(1,3)*4,2]
## O = np.array((Rx,Ry,Rz )).T
## LB = LBoxN([BoxN(O[0:2]),BoxN(O[2:4]),BoxN(O[4:6]),BoxN(O[6:8]),BoxN(O[8:10]),BoxN(O[10:12]),BoxN(O[12:14]),BoxN(O[14:16])])
## # LB.bnum = range(00,010,1)
## return(LB)
##
def octant(self):
""" quadtree on boxes OBSOLETE
Divide boxes into 2^ndim equal parts
aka Split each interval from each dimension into 2 equal part
"""
tlb = []
lbox = LBoxN([])
for k in range(self.ndim):
tlb.append(self.cut(self.ctr[k],axis=k))
lbm = tlb[0]
for l in range(len(tlb)-1):
lbp=lbm.intersect(tlb[l+1])
lbm=lbp
return(lbm)
def intersect(self,box):
""" Find intersection between current box and a given one
Parameters
----------
box : BoxN
a BoxN object
Returns
-------
new_box : BoxN
a BoxN object
"""
new_box = BoxN(np.zeros((2,self.ndim)),ndim=self.ndim)
for k in range(self.ndim):
newmin = max(self.bd[0,k],box.bd[0,k])
newmax = min(self.bd[1,k],box.bd[1,k])
if (newmax>newmin):
new_box.bd[0,k]= newmin
new_box.bd[1,k]= newmax
new_box.mesure()
return(new_box)
def bdiff(self,box):
""" OBSOLETE
USE self.intersect instead !!!
"""
new_box = BoxN(np.zeros((2,self.ndim)),ndim=self.ndim)
for k in range(self.ndim):
newmin = max(self.bd[0,k],box.bd[0,k])
newmax = min(self.bd[1,k],box.bd[1,k])
if (newmax>newmin):
new_box.bd[0,k]= newmin
new_box.bd[1,k]= newmax
new_box.mesure()
return(new_box)
def _show3(self,col='r',Id=[0],H_Id=0,alpha=0.2):
mapp=np.array([[0,0,0],[0,1,0],[1,1,0],[1,0,0],[0,0,1],[0,1,1],[1,1,1],[1,0,1]],dtype='int')
b= np.array([self.bd[mapp[:,0],0],self.bd[mapp[:,1],1],self.bd[mapp[:,2],2]])
edges=[[0,1,2],
[0,2,3],
[0,1,5],
[0,4,5],
[1,5,2],
[5,6,2],
[2,6,7],
[2,7,3],
[0,3,4],
[3,7,4],
[4,5,6],
[4,6,7]]
# trick for correcting color assignement
mesh=tvtk.PolyData(points=b.T,polys=edges)
if col =='r':
color=(1,0,0)
elif col =='b':
color=(0,0,1)
mlab.pipeline.surface(mesh,opacity=alpha,color=color)
def show3(self,dim=(0,1,2),col='r',Id=[0],H_Id=0,alpha=0.2):
"""Show box into geomview
generate a geomview file which allow to represent a box.
Parameters
----------
dim : tuple
chose dimension to display. default : (0,1,2)
col : string
choose box color. compliant with matplotlib colorConverter. default 'r'
Id : list
Identity of boxes to show.Default : [0]
alpha : float
set transparency. Default 0.2
Returns
-------
fname : string
name of the generated file
"""
#for k in range(len(self.bb)):
b = np.zeros((6,4,3)).astype('int')
b[0,:,:] = np.array([[0,0,0],[1,0,0],[1,1,0],[0,1,0]])
b[1,:,:] = np.array([[1,0,0],[1,0,1],[1,1,1],[1,1,0]])
b[2,:,:] = np.array([[0,0,0],[0,0,1],[0,1,1],[0,1,0]])
b[3,:,:] = np.array([[0,1,0],[1,1,0],[1,1,1],[0,1,1]])
b[4,:,:] = np.array([[0,0,1],[1,0,1],[1,1,1],[0,1,1]])
b[5,:,:] = np.array([[0,0,0],[0,0,1],[1,0,1],[1,0,0]])
trans = -np.sort(-np.linspace(0.1,0.5,H_Id),-1)
if self.parmsh['interactive']==True:
print('(read geometry {define foo \n')
print("LIST\n")
print("{appearance {-edge material {alpha "+str(alpha)+ " diffuse 1 0 0" +" }}")
# 6 faces du cube
for k in range(6):
print("{QUAD ")
# 4 sommets de la face
for li in range(4):
x = str(self.bd[b[k,i,0],dim[0]])
y = str(self.bd[b[k,i,1],dim[1]])
z = str(self.bd[b[k,i,2],dim[2]])
print(x+" "+y+" "+z+" ")
print("} ")
print('}})')
else:
ch ='_'
for c in Id:
ch = ch+str(c)+'_'
fname = "box"+ch+".list"
filename = basename+"/geom/"+fname
fd = open(filename,"w")
fd.write("LIST\n")
if self.ndim==3 :
for k in range(6):
# 6 faces du cube
#filebbk = "face" +str(Id)+str(o)+str(l)+str(k)+".quad"
#fdk = open("./geom/"+filebbk,"w")
if col=='r':
col = " 1 0 0 "
elif col=='b':
col = " 0 0 1 "
elif col=='m':
col = " 1 0 1 "
elif col=='y':
col = " 1 1 0 "
elif col=='c':
col = " 0 1 1 "
elif col=='g':
col = " 0 1 0 "
elif col=='k':
col = " 0 0 0 "
elif col=='orange':
col = " 1 1 0.125 "
elif col=='skyblue':
col = " 0.91 1 1 "
fd.write("{appearance {-edge material {alpha "+str(alpha)+" diffuse "+col +" }}")
fd.write("{ QUAD\n")
# 4 points de la face
for i in range(4):
x = str(self.bd[b[k,i,0],dim[0]])
y = str(self.bd[b[k,i,1],dim[1]])
z = str(self.bd[b[k,i,2],dim[2]])
fd.write(x+" "+y+" "+z+"\n")
# if self.ndim==2 :
# for i in range(4):
# x = str(self.bd[b[k,i,0],dim[0]])
# y = str(self.bd[b[k,i,1],dim[1]])
# z = str(5.0)
# fd.write(x+" "+y+" "+z+"\n")
fd.write(" }}\n")
fd.close()
if self.ndim==2 :
Z=1.0*np.array(range(0,2)*len(self.bd))
for k in range(6):
# 6 faces du cube
#filebbk = "face" +str(Id)+str(o)+str(l)+str(k)+".quad"
#fdk = open("./geom/"+filebbk,"w")
if col=='r':
col = " 1 0 0 "
elif col=='b':
col = " 0 0 1 "
elif col=='m':
col = " 1 0 1 "
elif col=='y':
col = " 1 1 0 "
elif col=='c':
col = " 0 1 1 "
elif col=='g':
col = " 0 1 0 "
elif col=='k':
col = " 0 0 0 "
elif col=='orange':
col = " 1 0.59 0.125 "
elif col=='skyblue':
col = " 0.61 1 1 "
fd.write("{appearance {-edge material {alpha "+str(alpha)+" diffuse "+col +" }}")
fd.write("{ QUAD\n")
# 4 points de la face
for i in range(4):
x = str(self.bd[b[k,i,0],dim[0]])
y = str(self.bd[b[k,i,1],dim[1]])
# z = str(self.bd[b[k,i,2],dim[2]])
z = str(Z[b[k,i,2]])
fd.write(x+" "+y+" "+z+"\n")
# if self.ndim==2 :
# for i in range(4):
# x = str(self.bd[b[k,i,0],dim[0]])
# y = str(self.bd[b[k,i,1],dim[1]])
# z = str(5.0)
# fd.write(x+" "+y+" "+z+"\n")
fd.write(" }}\n")
fd.close()
if self.parmsh['display']:
chaine = "geomview -nopanel -b 1 1 1 " + filename + " 2>/dev/null &"
os.system(chaine)
return(fname)
|
pylayers/pylayers
|
pylayers/location/geometric/util/boxn.py
|
Python
|
mit
| 27,884 | 0.029373 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date
from django.conf import settings
def settings_context(request):
"""
Makes available a template var for some interesting var in settings.py
"""
try:
ITEMS_PER_PAGE = settings.ITEMS_PER_PAGE
except AttributeError:
print "oooo"
ITEMS_PER_PAGE = 20
try:
TAGS_PER_PAGE = settings.TAGS_PER_PAGE
except AttributeError:
TAGS_PER_PAGE = 200
return {"ITEMS_PER_PAGE": ITEMS_PER_PAGE, "TAGS_PER_PAGE": TAGS_PER_PAGE}
|
matagus/django-jamendo
|
apps/jamendo/context_processors.py
|
Python
|
bsd-3-clause
| 558 | 0.005376 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0004_auto_20160901_2322'),
]
operations = [
migrations.RunSQL("DROP TABLE bookmarks_bookmark;"),
migrations.RunSQL("ALTER TABLE core_bookmark RENAME TO bookmarks_bookmark;"),
migrations.RunSQL("UPDATE django_content_type SET app_label='bookmarks' WHERE app_label='core';"),
]
|
tom-henderson/bookmarks
|
bookmarks/bookmarks/migrations/0005_rename_app.py
|
Python
|
mit
| 431 | 0.006961 |
'''
cloudminingstatus.py
@summary: Show selected API data from cloudhasher and miningpool.
@author: Andreas Krueger
@since: 12 Feb 2017
@contact: https://github.com/drandreaskrueger
@copyright: @author @since @license
@license: Donationware, see README.md. Plus see LICENSE.
@version: v0.1.0
@status: It is working well.
@todo: Make it into webservice?
'''
from __future__ import print_function
import time
import sys
import pprint
import requests # pip install requests
SLEEP_SECONDS= 5*60
SHOW_COMPOSITE_RESULTS = True
try:
from credentials_ME import POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY
except:
from credentials import POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY
POOL_API_URL="http://soil.miners-zone.net/apisoil/accounts/%s"
HASHER_ORDERS_API_URL="https://www.nicehash.com/api?method=orders.get&my&algo=20&location=0&id=%s&key=%s"
HASHER_BALANCE_API_URL="https://www.nicehash.com/api?method=balance&id=%s&key=%s" # unused
def humanTime(epoch):
return time.strftime("GMT %H:%M:%S %a %d %b %Y", time.gmtime(epoch))
POOL_JSON=[('currentHashrate', (lambda x: "%6.2f MHash/s 30m average" % (x/1000000.0))),
('hashrate' , (lambda x: "%6.2f MHash/s 3h average" % (x/1000000.0))),
('paymentsTotal' , (lambda x:x)),
('stats' , (lambda x: "%10.4f SOIL paid" % (float(x['paid'])/1000000000))),
('stats' , (lambda x: "%10.4f SOIL balance" % (float(x['balance'])/1000000000))),
('24hreward',(lambda x: "%10.4f SOIL" % (float(x)/1000000000))),
('stats' , (lambda x: "%d blocksFound" % (x['blocksFound']))),
('stats' , (lambda x: "%s lastShare" % (humanTime(x['lastShare'])))),
('workers' , (lambda x: "%s last beat" % (humanTime(x['0']['lastBeat'])))),
('workers' , (lambda x: "%s Online" % (not bool(x['0']['offline'])))),
('workersTotal', (lambda x:x)),
]
HASHER_JSON_PATH=('result', 'orders', 0)
HASHER_JSON=[
('alive', (lambda x: x)),
('workers', (lambda x: x)),
('id', (lambda x: x)),
('pool_host', (lambda x: x)),
('pool_user', (lambda x: x)),
('limit_speed', (lambda x: "%6.2f MHash/s" % (float(x)*1000))),
('accepted_speed', (lambda x: "%6.2f MHash/s" % (float(x)*1000))),
('btc_paid', (lambda x: x)),
('btc_avail', (lambda x: x)),
('price', (lambda x: "%s BTC/GH/Day" % x)),
('end', (lambda x: "%4.2f days order lifetime" % (x/1000.0/60/60/24))),
]
def getJsonData(url):
"""
get url, check for status_code==200, return as json
"""
try:
r=requests.get(url)
except Exception as e:
print ("no connection: ", e)
return False
if r.status_code != 200:
print ("not answered OK==200, but ", r.status_code)
return False
try:
j=r.json()
except Exception as e:
print ("no json, text:")
print (r.text)
# raise e
return False
return j
def showPoolData(url):
"""
gets all json data from pool, but shows only what is in POOL_JSON
"""
print ("Pool:")
j=getJsonData(url)
if not j:
return False
# pprint.pprint (j)
for Jkey, Jfn in POOL_JSON:
print (Jfn(j[Jkey]), "(%s)" % Jkey)
return j
def showHasherData(url):
"""
gets all json data from cloudhasher, but shows only what is in HASHER_JSON
"""
print ("CloudHasher:")
j=getJsonData(url)
if not j:
return False
# pprint.pprint (j)
# climb down into the one branch with all the interesting data:
j=j [HASHER_JSON_PATH[0]] [HASHER_JSON_PATH[1]] [HASHER_JSON_PATH[2]]
# pprint.pprint (j)
for Jkey, Jfn in HASHER_JSON:
print (Jfn(j[Jkey]), "(%s)" % Jkey)
estimate = (float(j['btc_avail']) / ( float(j['price'])*float(j['accepted_speed'])) )
print ("%.2f days" % estimate, end='')
print ("(remaining btc / order price / hashrate)")
return j
def showCompositeResults(pooldata, hasherdata):
"""
Estimates a coin prices by money spent versus money mined.
N.B.: In this form probably only be roughly correct
during first buy order? We'll see.
"""
coinsMined = float(pooldata['stats']['paid'])
coinsMined += float(pooldata['stats']['balance'])
coinsMined /= 1000000000
hashingCostsBtc = float(hasherdata['btc_paid'])
satoshiPrice = hashingCostsBtc / coinsMined * 100000000
print ("%.1f Satoshi/SOIL (mining price approx)" % satoshiPrice)
return satoshiPrice
def loop(sleepseconds):
"""
Shows both, then sleeps, the repeats.
"""
while True:
print ()
pooldata=showPoolData(url=POOL_API_URL%POOL_API_USERNAME)
print ()
hasherdata=showHasherData(url=HASHER_ORDERS_API_URL%(HASHER_API_ID, HASHER_API_KEY))
print ()
if SHOW_COMPOSITE_RESULTS and pooldata and hasherdata:
showCompositeResults(pooldata, hasherdata)
print ()
print (humanTime(time.time()), end='')
print ("... sleep %s seconds ..." % sleepseconds)
time.sleep(sleepseconds)
def checkCredentials():
"""
See credentials.py
"""
yourCredentials=(POOL_API_USERNAME, HASHER_API_ID, HASHER_API_KEY)
if "" in yourCredentials:
print ("You must fill in credentials.py first.")
print (yourCredentials)
return False
else:
return True
if __name__ == '__main__':
if not checkCredentials():
sys.exit()
try:
loop(sleepseconds=SLEEP_SECONDS)
except KeyboardInterrupt:
print ("Bye.")
sys.exit()
|
drandreaskrueger/cloudminingstatus
|
cloudminingstatus.py
|
Python
|
agpl-3.0
| 5,999 | 0.015503 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
|
haamoon/tensorpack
|
examples/DeepQNetwork/common.py
|
Python
|
apache-2.0
| 3,829 | 0.000261 |
# coding=utf-8
from kombu import Connection, Exchange, Queue, Consumer
from kombu.async import Hub
web_exchange = Exchange('web_develop', 'direct', durable=True)
standard_queue = Queue('standard', exchange=web_exchange,
routing_key='web.develop')
URI = 'librabbitmq://dongwm:123456@localhost:5672/web_develop'
hub = Hub()
def on_message(body, message):
print("Body:'%s', Headers:'%s', Payload:'%s'" % (
body, message.content_encoding, message.payload))
message.ack()
with Connection(URI) as connection:
connection.register_with_event_loop(hub)
with Consumer(connection, standard_queue, callbacks=[on_message]):
try:
hub.run_forever()
except KeyboardInterrupt:
exit(1)
|
dongweiming/web_develop
|
chapter9/section4/kombu_consumer.py
|
Python
|
gpl-3.0
| 761 | 0.001314 |
#!/usr/bin/env python
import os, sys
from polib import pofile
from config import CONFIGURATION
from extract import SOURCE_WARN
from execute import execute
TRANSIFEX_HEADER = 'Translations in this file have been downloaded from %s'
TRANSIFEX_URL = 'https://www.transifex.com/projects/p/edx-studio/'
def push():
execute('tx push -s')
def pull():
for locale in CONFIGURATION.locales:
if locale != CONFIGURATION.source_locale:
#execute('tx pull -l %s' % locale)
execute('tx pull --all')
clean_translated_locales()
def clean_translated_locales():
"""
Strips out the warning from all translated po files
about being an English source file.
"""
for locale in CONFIGURATION.locales:
if locale != CONFIGURATION.source_locale:
clean_locale(locale)
def clean_locale(locale):
"""
Strips out the warning from all of a locale's translated po files
about being an English source file.
Iterates over machine-generated files.
"""
dirname = CONFIGURATION.get_messages_dir(locale)
for filename in ('django-partial.po', 'djangojs.po', 'mako.po'):
clean_file(dirname.joinpath(filename))
def clean_file(file):
"""
Strips out the warning from a translated po file about being an English source file.
Replaces warning with a note about coming from Transifex.
"""
po = pofile(file)
if po.header.find(SOURCE_WARN) != -1:
new_header = get_new_header(po)
new = po.header.replace(SOURCE_WARN, new_header)
po.header = new
po.save()
def get_new_header(po):
team = po.metadata.get('Language-Team', None)
if not team:
return TRANSIFEX_HEADER % TRANSIFEX_URL
else:
return TRANSIFEX_HEADER % team
if __name__ == '__main__':
if len(sys.argv)<2:
raise Exception("missing argument: push or pull")
arg = sys.argv[1]
if arg == 'push':
push()
elif arg == 'pull':
pull()
else:
raise Exception("unknown argument: (%s)" % arg)
|
praveen-pal/edx-platform
|
i18n/transifex.py
|
Python
|
agpl-3.0
| 2,066 | 0.005808 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='organization',
name='logo',
field=models.ImageField(help_text='Please add only .PNG files for logo images. This logo will be used on certificates.', max_length=255, null=True, upload_to='organization_logos', blank=True),
),
]
|
edx/edx-organizations
|
organizations/migrations/0002_auto_20170117_1434.py
|
Python
|
agpl-3.0
| 490 | 0.002041 |
# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Some unit tests for utility functions.
"""
from binascii import hexlify
import errno
import os
from hashlib import sha1
import unittest
import paramiko.util
from paramiko.util import lookup_ssh_host_config as host_config, safe_string
from paramiko.py3compat import StringIO, byte_ord, b
# Note some lines in this configuration have trailing spaces on purpose
test_config_file = """\
Host *
User robey
IdentityFile =~/.ssh/id_rsa
# comment
Host *.example.com
\tUser bjork
Port=3333
Host *
"""
dont_strip_whitespace_please = "\t \t Crazy something dumb "
test_config_file += dont_strip_whitespace_please
test_config_file += """
Host spoo.example.com
Crazy something else
"""
test_hosts_file = """\
secure.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA1PD6U2/TVxET6lkpKhOk5r\
9q/kAYG6sP9f5zuUYP8i7FOFp/6ncCEbbtg/lB+A3iidyxoSWl+9jtoyyDOOVX4UIDV9G11Ml8om3\
D+jrpI9cycZHqilK0HmxDeCuxbwyMuaCygU9gS2qoRvNLWZk70OpIKSSpBo0Wl3/XUmz9uhc=
happy.example.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA8bP1ZA7DCZDB9J0s50l31M\
BGQ3GQ/Fc7SX6gkpXkwcZryoi4kNFhHu5LvHcZPdxXV1D+uTMfGS1eyd2Yz/DoNWXNAl8TI0cAsW\
5ymME3bQ4J/k1IKxCtz/bAlAqFgKoc+EolMziDYqWIATtW0rYTJvzGAzTmMj80/QpsFH+Pc2M=
"""
# for test 1:
from paramiko import *
class UtilTest(unittest.TestCase):
def test_import(self):
"""
verify that all the classes can be imported from paramiko.
"""
symbols = list(globals().keys())
self.assertTrue("Transport" in symbols)
self.assertTrue("SSHClient" in symbols)
self.assertTrue("MissingHostKeyPolicy" in symbols)
self.assertTrue("AutoAddPolicy" in symbols)
self.assertTrue("RejectPolicy" in symbols)
self.assertTrue("WarningPolicy" in symbols)
self.assertTrue("SecurityOptions" in symbols)
self.assertTrue("SubsystemHandler" in symbols)
self.assertTrue("Channel" in symbols)
self.assertTrue("RSAKey" in symbols)
self.assertTrue("DSSKey" in symbols)
self.assertTrue("Message" in symbols)
self.assertTrue("SSHException" in symbols)
self.assertTrue("AuthenticationException" in symbols)
self.assertTrue("PasswordRequiredException" in symbols)
self.assertTrue("BadAuthenticationType" in symbols)
self.assertTrue("ChannelException" in symbols)
self.assertTrue("SFTP" in symbols)
self.assertTrue("SFTPFile" in symbols)
self.assertTrue("SFTPHandle" in symbols)
self.assertTrue("SFTPClient" in symbols)
self.assertTrue("SFTPServer" in symbols)
self.assertTrue("SFTPError" in symbols)
self.assertTrue("SFTPAttributes" in symbols)
self.assertTrue("SFTPServerInterface" in symbols)
self.assertTrue("ServerInterface" in symbols)
self.assertTrue("BufferedFile" in symbols)
self.assertTrue("Agent" in symbols)
self.assertTrue("AgentKey" in symbols)
self.assertTrue("HostKeys" in symbols)
self.assertTrue("SSHConfig" in symbols)
self.assertTrue("util" in symbols)
def test_parse_config(self):
global test_config_file
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(
config._config,
[
{"host": ["*"], "config": {}},
{
"host": ["*"],
"config": {
"identityfile": ["~/.ssh/id_rsa"],
"user": "robey",
},
},
{
"host": ["*.example.com"],
"config": {"user": "bjork", "port": "3333"},
},
{"host": ["*"], "config": {"crazy": "something dumb"}},
{
"host": ["spoo.example.com"],
"config": {"crazy": "something else"},
},
],
)
def test_host_config(self):
global test_config_file
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
for host, values in {
"irc.danger.com": {
"crazy": "something dumb",
"hostname": "irc.danger.com",
"user": "robey",
},
"irc.example.com": {
"crazy": "something dumb",
"hostname": "irc.example.com",
"user": "robey",
"port": "3333",
},
"spoo.example.com": {
"crazy": "something dumb",
"hostname": "spoo.example.com",
"user": "robey",
"port": "3333",
},
}.items():
values = dict(
values,
hostname=host,
identityfile=[os.path.expanduser("~/.ssh/id_rsa")],
)
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_generate_key_bytes(self):
x = paramiko.util.generate_key_bytes(
sha1, b"ABCDEFGH", "This is my secret passphrase.", 64
)
hex = "".join(["%02x" % byte_ord(c) for c in x])
self.assertEqual(
hex,
"9110e2f6793b69363e58173e9436b13a5a4b339005741d5c680e505f57d871347b4239f14fb5c46e857d5e100424873ba849ac699cea98d729e57b3e84378e8b",
)
def test_host_keys(self):
with open("hostfile.temp", "w") as f:
f.write(test_hosts_file)
try:
hostdict = paramiko.util.load_host_keys("hostfile.temp")
self.assertEqual(2, len(hostdict))
self.assertEqual(1, len(list(hostdict.values())[0]))
self.assertEqual(1, len(list(hostdict.values())[1]))
fp = hexlify(
hostdict["secure.example.com"]["ssh-rsa"].get_fingerprint()
).upper()
self.assertEqual(b"E6684DB30E109B67B70FF1DC5C7F1363", fp)
finally:
os.unlink("hostfile.temp")
def test_host_config_expose_issue_33(self):
test_config_file = """
Host www13.*
Port 22
Host *.example.com
Port 2222
Host *
Port 3333
"""
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
host = "www13.example.com"
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config),
{"hostname": host, "port": "22"},
)
def test_eintr_retry(self):
self.assertEqual("foo", paramiko.util.retry_on_signal(lambda: "foo"))
# Variables that are set by raises_intr
intr_errors_remaining = [3]
call_count = [0]
def raises_intr():
call_count[0] += 1
if intr_errors_remaining[0] > 0:
intr_errors_remaining[0] -= 1
raise IOError(errno.EINTR, "file", "interrupted system call")
self.assertTrue(paramiko.util.retry_on_signal(raises_intr) is None)
self.assertEqual(0, intr_errors_remaining[0])
self.assertEqual(4, call_count[0])
def raises_ioerror_not_eintr():
raise IOError(errno.ENOENT, "file", "file not found")
self.assertRaises(
IOError,
lambda: paramiko.util.retry_on_signal(raises_ioerror_not_eintr),
)
def raises_other_exception():
raise AssertionError("foo")
self.assertRaises(
AssertionError,
lambda: paramiko.util.retry_on_signal(raises_other_exception),
)
def test_proxycommand_config_equals_parsing(self):
"""
ProxyCommand should not split on equals signs within the value.
"""
conf = """
Host space-delimited
ProxyCommand foo bar=biz baz
Host equals-delimited
ProxyCommand=foo bar=biz baz
"""
f = StringIO(conf)
config = paramiko.util.parse_ssh_config(f)
for host in ("space-delimited", "equals-delimited"):
self.assertEqual(
host_config(host, config)["proxycommand"], "foo bar=biz baz"
)
def test_proxycommand_interpolation(self):
"""
ProxyCommand should perform interpolation on the value
"""
config = paramiko.util.parse_ssh_config(
StringIO(
"""
Host specific
Port 37
ProxyCommand host %h port %p lol
Host portonly
Port 155
Host *
Port 25
ProxyCommand host %h port %p
"""
)
)
for host, val in (
("foo.com", "host foo.com port 25"),
("specific", "host specific port 37 lol"),
("portonly", "host portonly port 155"),
):
self.assertEqual(host_config(host, config)["proxycommand"], val)
def test_proxycommand_tilde_expansion(self):
"""
Tilde (~) should be expanded inside ProxyCommand
"""
config = paramiko.util.parse_ssh_config(
StringIO(
"""
Host test
ProxyCommand ssh -F ~/.ssh/test_config bastion nc %h %p
"""
)
)
self.assertEqual(
"ssh -F %s/.ssh/test_config bastion nc test 22"
% os.path.expanduser("~"),
host_config("test", config)["proxycommand"],
)
def test_host_config_test_negation(self):
test_config_file = """
Host www13.* !*.example.com
Port 22
Host *.example.com !www13.*
Port 2222
Host www13.*
Port 8080
Host *
Port 3333
"""
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
host = "www13.example.com"
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config),
{"hostname": host, "port": "8080"},
)
def test_host_config_test_proxycommand(self):
test_config_file = """
Host proxy-with-equal-divisor-and-space
ProxyCommand = foo=bar
Host proxy-with-equal-divisor-and-no-space
ProxyCommand=foo=bar
Host proxy-without-equal-divisor
ProxyCommand foo=bar:%h-%p
"""
for host, values in {
"proxy-with-equal-divisor-and-space": {
"hostname": "proxy-with-equal-divisor-and-space",
"proxycommand": "foo=bar",
},
"proxy-with-equal-divisor-and-no-space": {
"hostname": "proxy-with-equal-divisor-and-no-space",
"proxycommand": "foo=bar",
},
"proxy-without-equal-divisor": {
"hostname": "proxy-without-equal-divisor",
"proxycommand": "foo=bar:proxy-without-equal-divisor-22",
},
}.items():
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_host_config_test_identityfile(self):
test_config_file = """
IdentityFile id_dsa0
Host *
IdentityFile id_dsa1
Host dsa2
IdentityFile id_dsa2
Host dsa2*
IdentityFile id_dsa22
"""
for host, values in {
"foo": {"hostname": "foo", "identityfile": ["id_dsa0", "id_dsa1"]},
"dsa2": {
"hostname": "dsa2",
"identityfile": ["id_dsa0", "id_dsa1", "id_dsa2", "id_dsa22"],
},
"dsa22": {
"hostname": "dsa22",
"identityfile": ["id_dsa0", "id_dsa1", "id_dsa22"],
},
}.items():
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_config_addressfamily_and_lazy_fqdn(self):
"""
Ensure the code path honoring non-'all' AddressFamily doesn't asplode
"""
test_config = """
AddressFamily inet
IdentityFile something_%l_using_fqdn
"""
config = paramiko.util.parse_ssh_config(StringIO(test_config))
assert config.lookup(
"meh"
) # will die during lookup() if bug regresses
def test_clamp_value(self):
self.assertEqual(32768, paramiko.util.clamp_value(32767, 32768, 32769))
self.assertEqual(32767, paramiko.util.clamp_value(32767, 32765, 32769))
self.assertEqual(32769, paramiko.util.clamp_value(32767, 32770, 32769))
def test_config_dos_crlf_succeeds(self):
config_file = StringIO("host abcqwerty\r\nHostName 127.0.0.1\r\n")
config = paramiko.SSHConfig()
config.parse(config_file)
self.assertEqual(config.lookup("abcqwerty")["hostname"], "127.0.0.1")
def test_get_hostnames(self):
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(
config.get_hostnames(), {"*", "*.example.com", "spoo.example.com"}
)
def test_quoted_host_names(self):
test_config_file = """\
Host "param pam" param "pam"
Port 1111
Host "param2"
Port 2222
Host param3 parara
Port 3333
Host param4 "p a r" "p" "par" para
Port 4444
"""
res = {
"param pam": {"hostname": "param pam", "port": "1111"},
"param": {"hostname": "param", "port": "1111"},
"pam": {"hostname": "pam", "port": "1111"},
"param2": {"hostname": "param2", "port": "2222"},
"param3": {"hostname": "param3", "port": "3333"},
"parara": {"hostname": "parara", "port": "3333"},
"param4": {"hostname": "param4", "port": "4444"},
"p a r": {"hostname": "p a r", "port": "4444"},
"p": {"hostname": "p", "port": "4444"},
"par": {"hostname": "par", "port": "4444"},
"para": {"hostname": "para", "port": "4444"},
}
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
for host, values in res.items():
self.assertEquals(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_quoted_params_in_config(self):
test_config_file = """\
Host "param pam" param "pam"
IdentityFile id_rsa
Host "param2"
IdentityFile "test rsa key"
Host param3 parara
IdentityFile id_rsa
IdentityFile "test rsa key"
"""
res = {
"param pam": {"hostname": "param pam", "identityfile": ["id_rsa"]},
"param": {"hostname": "param", "identityfile": ["id_rsa"]},
"pam": {"hostname": "pam", "identityfile": ["id_rsa"]},
"param2": {"hostname": "param2", "identityfile": ["test rsa key"]},
"param3": {
"hostname": "param3",
"identityfile": ["id_rsa", "test rsa key"],
},
"parara": {
"hostname": "parara",
"identityfile": ["id_rsa", "test rsa key"],
},
}
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
for host, values in res.items():
self.assertEquals(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_quoted_host_in_config(self):
conf = SSHConfig()
correct_data = {
"param": ["param"],
'"param"': ["param"],
"param pam": ["param", "pam"],
'"param" "pam"': ["param", "pam"],
'"param" pam': ["param", "pam"],
'param "pam"': ["param", "pam"],
'param "pam" p': ["param", "pam", "p"],
'"param" pam "p"': ["param", "pam", "p"],
'"pa ram"': ["pa ram"],
'"pa ram" pam': ["pa ram", "pam"],
'param "p a m"': ["param", "p a m"],
}
incorrect_data = ['param"', '"param', 'param "pam', 'param "pam" "p a']
for host, values in correct_data.items():
self.assertEquals(conf._get_hosts(host), values)
for host in incorrect_data:
self.assertRaises(Exception, conf._get_hosts, host)
def test_safe_string(self):
vanilla = b"vanilla"
has_bytes = b"has \7\3 bytes"
safe_vanilla = safe_string(vanilla)
safe_has_bytes = safe_string(has_bytes)
expected_bytes = b"has %07%03 bytes"
err = "{!r} != {!r}"
msg = err.format(safe_vanilla, vanilla)
assert safe_vanilla == vanilla, msg
msg = err.format(safe_has_bytes, expected_bytes)
assert safe_has_bytes == expected_bytes, msg
def test_proxycommand_none_issue_418(self):
test_config_file = """
Host proxycommand-standard-none
ProxyCommand None
Host proxycommand-with-equals-none
ProxyCommand=None
"""
for host, values in {
"proxycommand-standard-none": {
"hostname": "proxycommand-standard-none"
},
"proxycommand-with-equals-none": {
"hostname": "proxycommand-with-equals-none"
},
}.items():
f = StringIO(test_config_file)
config = paramiko.util.parse_ssh_config(f)
self.assertEqual(
paramiko.util.lookup_ssh_host_config(host, config), values
)
def test_proxycommand_none_masking(self):
# Re: https://github.com/paramiko/paramiko/issues/670
source_config = """
Host specific-host
ProxyCommand none
Host other-host
ProxyCommand other-proxy
Host *
ProxyCommand default-proxy
"""
config = paramiko.SSHConfig()
config.parse(StringIO(source_config))
# When bug is present, the full stripping-out of specific-host's
# ProxyCommand means it actually appears to pick up the default
# ProxyCommand value instead, due to cascading. It should (for
# backwards compatibility reasons in 1.x/2.x) appear completely blank,
# as if the host had no ProxyCommand whatsoever.
# Threw another unrelated host in there just for sanity reasons.
self.assertFalse("proxycommand" in config.lookup("specific-host"))
self.assertEqual(
config.lookup("other-host")["proxycommand"], "other-proxy"
)
self.assertEqual(
config.lookup("some-random-host")["proxycommand"], "default-proxy"
)
|
mirrorcoder/paramiko
|
tests/test_util.py
|
Python
|
lgpl-2.1
| 19,156 | 0.000104 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from WebIDL import IDLExternalInterface, IDLInterface, WebIDLError
class Configuration:
"""
Represents global configuration state based on IDL parse data and
the configuration file.
"""
def __init__(self, filename, parseData):
# Read the configuration file.
glbl = {}
execfile(filename, glbl)
config = glbl['DOMInterfaces']
# Build descriptors for all the interfaces we have in the parse data.
# This allows callers to specify a subset of interfaces by filtering
# |parseData|.
self.descriptors = []
self.interfaces = {}
self.maxProtoChainLength = 0
for thing in parseData:
# Servo does not support external interfaces.
if isinstance(thing, IDLExternalInterface):
raise WebIDLError("Servo does not support external interfaces.",
[thing.location])
# Some toplevel things are sadly types, and those have an
# isInterface that doesn't mean the same thing as IDLObject's
# isInterface()...
if not isinstance(thing, IDLInterface):
continue
iface = thing
self.interfaces[iface.identifier.name] = iface
if iface.identifier.name not in config:
# Completely skip consequential interfaces with no descriptor
# if they have no interface object because chances are we
# don't need to do anything interesting with them.
if iface.isConsequential() and not iface.hasInterfaceObject():
continue
entry = {}
else:
entry = config[iface.identifier.name]
if not isinstance(entry, list):
assert isinstance(entry, dict)
entry = [entry]
self.descriptors.extend(
[Descriptor(self, iface, x) for x in entry])
# Mark the descriptors for which only a single nativeType implements
# an interface.
for descriptor in self.descriptors:
intefaceName = descriptor.interface.identifier.name
otherDescriptors = [d for d in self.descriptors
if d.interface.identifier.name == intefaceName]
descriptor.uniqueImplementation = len(otherDescriptors) == 1
self.enums = [e for e in parseData if e.isEnum()]
self.dictionaries = [d for d in parseData if d.isDictionary()]
self.callbacks = [c for c in parseData if
c.isCallback() and not c.isInterface()]
# Keep the descriptor list sorted for determinism.
self.descriptors.sort(lambda x, y: cmp(x.name, y.name))
def getInterface(self, ifname):
return self.interfaces[ifname]
def getDescriptors(self, **filters):
"""Gets the descriptors that match the given filters."""
curr = self.descriptors
for key, val in filters.iteritems():
if key == 'webIDLFile':
getter = lambda x: x.interface.filename()
elif key == 'hasInterfaceObject':
getter = lambda x: x.interface.hasInterfaceObject()
elif key == 'isCallback':
getter = lambda x: x.interface.isCallback()
elif key == 'isJSImplemented':
getter = lambda x: x.interface.isJSImplemented()
else:
getter = lambda x: getattr(x, key)
curr = filter(lambda x: getter(x) == val, curr)
return curr
def getEnums(self, webIDLFile):
return filter(lambda e: e.filename() == webIDLFile, self.enums)
@staticmethod
def _filterForFile(items, webIDLFile=""):
"""Gets the items that match the given filters."""
if not webIDLFile:
return items
return filter(lambda x: x.filename() == webIDLFile, items)
def getDictionaries(self, webIDLFile=""):
return self._filterForFile(self.dictionaries, webIDLFile=webIDLFile)
def getCallbacks(self, webIDLFile=""):
return self._filterForFile(self.callbacks, webIDLFile=webIDLFile)
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name.
"""
iface = self.getInterface(interfaceName)
descriptors = self.getDescriptors(interface=iface)
# We should have exactly one result.
if len(descriptors) != 1:
raise NoSuchDescriptorError("For " + interfaceName + " found " +
str(len(descriptors)) + " matches")
return descriptors[0]
def getDescriptorProvider(self):
"""
Gets a descriptor provider that can provide descriptors as needed.
"""
return DescriptorProvider(self)
class NoSuchDescriptorError(TypeError):
def __init__(self, str):
TypeError.__init__(self, str)
class DescriptorProvider:
"""
A way of getting descriptors for interface names
"""
def __init__(self, config):
self.config = config
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name given the
context of the current descriptor.
"""
return self.config.getDescriptor(interfaceName)
def MemberIsUnforgeable(member, descriptor):
return ((member.isAttr() or member.isMethod()) and
not member.isStatic() and
(member.isUnforgeable() or
bool(descriptor.interface.getExtendedAttribute("Unforgeable"))))
class Descriptor(DescriptorProvider):
"""
Represents a single descriptor for an interface. See Bindings.conf.
"""
def __init__(self, config, interface, desc):
DescriptorProvider.__init__(self, config)
self.interface = interface
# Read the desc, and fill in the relevant defaults.
ifaceName = self.interface.identifier.name
# Callback types do not use JS smart pointers, so we should not use the
# built-in rooting mechanisms for them.
if self.interface.isCallback():
self.needsRooting = False
ty = "%sBinding::%s" % (ifaceName, ifaceName)
self.returnType = "Rc<%s>" % ty
self.argumentType = "???"
self.nativeType = ty
else:
self.needsRooting = True
self.returnType = "Root<%s>" % ifaceName
self.argumentType = "&%s" % ifaceName
self.nativeType = "*const %s" % ifaceName
self.concreteType = ifaceName
self.register = desc.get('register', True)
self.outerObjectHook = desc.get('outerObjectHook', 'None')
self.proxy = False
self.weakReferenceable = desc.get('weakReferenceable', False)
# If we're concrete, we need to crawl our ancestor interfaces and mark
# them as having a concrete descendant.
self.concrete = (not self.interface.isCallback() and
not self.interface.getExtendedAttribute("Abstract"))
self.hasUnforgeableMembers = (self.concrete and
any(MemberIsUnforgeable(m, self) for m in
self.interface.members))
self.operations = {
'IndexedGetter': None,
'IndexedSetter': None,
'IndexedDeleter': None,
'NamedGetter': None,
'NamedSetter': None,
'NamedDeleter': None,
'Stringifier': None,
}
def addOperation(operation, m):
if not self.operations[operation]:
self.operations[operation] = m
# Since stringifiers go on the prototype, we only need to worry
# about our own stringifier, not those of our ancestor interfaces.
for m in self.interface.members:
if m.isMethod() and m.isStringifier():
addOperation('Stringifier', m)
if self.concrete:
iface = self.interface
while iface:
for m in iface.members:
if not m.isMethod():
continue
def addIndexedOrNamedOperation(operation, m):
self.proxy = True
if m.isIndexed():
operation = 'Indexed' + operation
else:
assert m.isNamed()
operation = 'Named' + operation
addOperation(operation, m)
if m.isGetter():
addIndexedOrNamedOperation('Getter', m)
if m.isSetter():
addIndexedOrNamedOperation('Setter', m)
if m.isCreator():
addIndexedOrNamedOperation('Creator', m)
if m.isDeleter():
addIndexedOrNamedOperation('Deleter', m)
iface = iface.parent
if iface:
iface.setUserData('hasConcreteDescendant', True)
if self.proxy:
iface = self.interface
while iface.parent:
iface = iface.parent
iface.setUserData('hasProxyDescendant', True)
self.name = interface.identifier.name
# self.extendedAttributes is a dict of dicts, keyed on
# all/getterOnly/setterOnly and then on member name. Values are an
# array of extended attributes.
self.extendedAttributes = {'all': {}, 'getterOnly': {}, 'setterOnly': {}}
def addExtendedAttribute(attribute, config):
def add(key, members, attribute):
for member in members:
self.extendedAttributes[key].setdefault(member, []).append(attribute)
if isinstance(config, dict):
for key in ['all', 'getterOnly', 'setterOnly']:
add(key, config.get(key, []), attribute)
elif isinstance(config, list):
add('all', config, attribute)
else:
assert isinstance(config, str)
if config == '*':
iface = self.interface
while iface:
add('all', map(lambda m: m.name, iface.members), attribute)
iface = iface.parent
else:
add('all', [config], attribute)
self._binaryNames = desc.get('binaryNames', {})
self._binaryNames.setdefault('__legacycaller', 'LegacyCall')
self._binaryNames.setdefault('__stringifier', 'Stringifier')
self._internalNames = desc.get('internalNames', {})
for member in self.interface.members:
if not member.isAttr() and not member.isMethod():
continue
binaryName = member.getExtendedAttribute("BinaryName")
if binaryName:
assert isinstance(binaryName, list)
assert len(binaryName) == 1
self._binaryNames.setdefault(member.identifier.name,
binaryName[0])
self._internalNames.setdefault(member.identifier.name,
member.identifier.name.replace('-', '_'))
# Build the prototype chain.
self.prototypeChain = []
parent = interface
while parent:
self.prototypeChain.insert(0, parent.identifier.name)
parent = parent.parent
config.maxProtoChainLength = max(config.maxProtoChainLength,
len(self.prototypeChain))
def binaryNameFor(self, name):
return self._binaryNames.get(name, name)
def internalNameFor(self, name):
return self._internalNames.get(name, name)
def getExtendedAttributes(self, member, getter=False, setter=False):
def maybeAppendInfallibleToAttrs(attrs, throws):
if throws is None:
attrs.append("infallible")
elif throws is True:
pass
else:
raise TypeError("Unknown value for 'Throws'")
name = member.identifier.name
if member.isMethod():
attrs = self.extendedAttributes['all'].get(name, [])
throws = member.getExtendedAttribute("Throws")
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
assert member.isAttr()
assert bool(getter) != bool(setter)
key = 'getterOnly' if getter else 'setterOnly'
attrs = self.extendedAttributes['all'].get(name, []) + self.extendedAttributes[key].get(name, [])
throws = member.getExtendedAttribute("Throws")
if throws is None:
throwsAttr = "GetterThrows" if getter else "SetterThrows"
throws = member.getExtendedAttribute(throwsAttr)
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
def getParentName(self):
assert self.interface.parent is not None
return self.interface.parent.identifier.name
def hasDescendants(self):
return (self.interface.getUserData("hasConcreteDescendant", False) or
self.interface.getUserData("hasProxyDescendant", False))
def isGlobal(self):
"""
Returns true if this is the primary interface for a global object
of some sort.
"""
return (self.interface.getExtendedAttribute("Global") or
self.interface.getExtendedAttribute("PrimaryGlobal"))
# Some utility methods
def getModuleFromObject(object):
return object.location.filename().split('/')[-1].split('.webidl')[0] + 'Binding'
def getTypesFromDescriptor(descriptor):
"""
Get all argument and return types for all members of the descriptor
"""
members = [m for m in descriptor.interface.members]
if descriptor.interface.ctor():
members.append(descriptor.interface.ctor())
members.extend(descriptor.interface.namedConstructors)
signatures = [s for m in members if m.isMethod() for s in m.signatures()]
types = []
for s in signatures:
assert len(s) == 2
(returnType, arguments) = s
types.append(returnType)
types.extend(a.type for a in arguments)
types.extend(a.type for a in members if a.isAttr())
return types
def getTypesFromDictionary(dictionary):
"""
Get all member types for this dictionary
"""
types = []
curDict = dictionary
while curDict:
types.extend([m.type for m in curDict.members])
curDict = curDict.parent
return types
def getTypesFromCallback(callback):
"""
Get the types this callback depends on: its return type and the
types of its arguments.
"""
sig = callback.signatures()[0]
types = [sig[0]] # Return type
types.extend(arg.type for arg in sig[1]) # Arguments
return types
|
nikkisquared/servo
|
components/script/dom/bindings/codegen/Configuration.py
|
Python
|
mpl-2.0
| 15,248 | 0.000787 |
import os
import sys
import skimage.transform
import skimage.exposure
import time
import glob
import numpy as np
import mahotas
import random
import matplotlib
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import json
from scipy.ndimage.filters import maximum_filter
base_path = os.path.dirname(__file__)
sys.path.insert(1,os.path.join(base_path, '../../common'))
sys.path.insert(2,os.path.join(base_path, '../../database'))
from utility import Utility
from settings import Paths
from project import Project
from paths import Paths
from db import DB
# the idea is to grow the labels to cover the whole membrane
# image and label should be [0,1]
def adjust_imprecise_boundaries(image, label, number_iterations=5):
label = label.copy()
label_orig = label.copy()
for i in xrange(number_iterations):
# grow labels by one pixel
label = maximum_filter(label, 2)
# only keep pixels that are on dark membrane
non_valid_label = np.logical_and(label==1, image>0.7)
label[non_valid_label] = 0
# make sure original labels are preserved
label = np.logical_or(label==1, label_orig==1)
return label
def deform_images(image1, image2, image3=None):
# assumes image is uint8
def apply_deformation(image, coordinates):
# ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid.
deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect')
deformed = np.reshape(deformed, image.shape)
return deformed
if np.max(image1) < 1.1:
image1 = np.uint8(image1*255)
image2 = np.uint8(image2*255)
if not image3 is None:
image3 = np.uint8(image3*255)
displacement_x = np.random.normal(size=image1.shape, scale=10)
displacement_y = np.random.normal(size=image1.shape, scale=10)
# smooth over image
coords_x, coords_y = np.meshgrid(np.arange(0,image1.shape[0]), np.arange(0,image1.shape[1]), indexing='ij')
displacement_x = coords_x.flatten() + scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten()
displacement_y = coords_y.flatten() + scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten()
coordinates = np.vstack([displacement_x, displacement_y])
deformed1 = apply_deformation(image1, coordinates) / 255.0
deformed2 = apply_deformation(image2, coordinates) / 255.0
if not image3 is None:
deformed3 = apply_deformation(image3, coordinates)
return (deformed1, deformed2, deformed3)
return (deformed1, deformed2)
def deform_images_list(images):
# assumes image is uint8
def apply_deformation(image, coordinates):
# ndimage expects uint8 otherwise introduces artifacts. Don't ask me why, its stupid.
deformed = scipy.ndimage.map_coordinates(image, coordinates, mode='reflect')
deformed = np.reshape(deformed, image.shape)
return deformed
displacement_x = np.random.normal(size=images.shape[:2], scale=10)
displacement_y = np.random.normal(size=images.shape[:2], scale=10)
# smooth over image
coords_x, coords_y = np.meshgrid(np.arange(0,images.shape[0]), np.arange(0,images.shape[1]), indexing='ij')
displacement_x = coords_x.flatten() #+ scipy.ndimage.gaussian_filter(displacement_x, sigma=5).flatten()
displacement_y = coords_y.flatten() #+ scipy.ndimage.gaussian_filter(displacement_y, sigma=5).flatten()
coordinates = np.vstack([displacement_x, displacement_y])
deformed = images.copy()
for i in xrange(images.shape[2]):
deformed[:,:,i] = apply_deformation(np.uint8(images[:,:,i]), coordinates)
return deformed
def normalizeImage(img, saturation_level=0.05, doClahe=False): #was 0.005
if not doClahe:
sortedValues = np.sort( img.ravel())
minVal = np.float32(sortedValues[np.int(len(sortedValues) * (saturation_level / 2))])
maxVal = np.float32(sortedValues[np.int(len(sortedValues) * (1 - saturation_level / 2))])
normImg = np.float32(img - minVal) * (255 / (maxVal-minVal))
normImg[normImg<0] = 0
normImg[normImg>255] = 255
output = (np.float32(normImg) / 255.0)
return output
else:
output = skimage.exposure.equalize_adapthist(img)
return output
def generate_experiment_data_supervised(purpose='train', nsamples=1000, patchSize=29, balanceRate=0.5, rng=np.random):
start_time = time.time()
if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'):
pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/'
else:
pathPrefix = '/n/pfister_lab/vkaynig/'
img_search_string_membraneImages = pathPrefix + 'labels/membranes_nonDilate/' + purpose + '/*.tif'
img_search_string_backgroundMaskImages = pathPrefix + 'labels/background_nonDilate/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_label = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_backgroundMask = sorted( glob.glob( img_search_string_backgroundMaskImages ) )
whole_set_patches = np.zeros((nsamples, patchSize*patchSize), dtype=np.float)
whole_set_labels = np.zeros(nsamples, dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
for img_index in xrange(np.shape(img_files_gray)[0]):
img = mahotas.imread(img_files_gray[img_index])
img = normalizeImage(img)
grayImages[:,:,img_index] = img
label_img = mahotas.imread(img_files_label[img_index])
labelImages[:,:,img_index] = label_img
mask_img = mahotas.imread(img_files_backgroundMask[img_index])
maskImages[:,:,img_index] = mask_img
for img_index in xrange(np.shape(img_files_gray)[0]):
img = grayImages[:,:,img_index]
label_img = labelImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
#get rid of invalid image borders
border_patch = np.int(np.ceil(patchSize/2.0))
border = np.int(np.ceil(np.sqrt(2*(border_patch**2))))
label_img[:border,:] = 0 #top
label_img[-border:,:] = 0 #bottom
label_img[:,:border] = 0 #left
label_img[:,-border:] = 0 #right
mask_img[:border,:] = 0
mask_img[-border:,:] = 0
mask_img[:,:border] = 0
mask_img[:,-border:] = 0
membrane_indices = np.nonzero(label_img)
non_membrane_indices = np.nonzero(mask_img)
positiveSample = True
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
if positiveSample:
randmem = random.choice(xrange(len(membrane_indices[0])))
(row,col) = (membrane_indices[0][randmem],
membrane_indices[1][randmem])
label = 1.0
positiveSample = False
else:
randmem = random.choice(xrange(len(non_membrane_indices[0])))
(row,col) = (non_membrane_indices[0][randmem],
non_membrane_indices[1][randmem])
label = 0.0
positiveSample = True
imgPatch = img[row-border+1:row+border, col-border+1:col+border]
imgPatch = skimage.transform.rotate(imgPatch, random.choice(xrange(360)))
imgPatch = imgPatch[border-border_patch:border+border_patch-1,border-border_patch:border+border_patch-1]
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
imgPatch = np.rot90(imgPatch, random.randint(0,3))
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = label
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = rng.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i] = labels[shuffleIndex[i]]
data_set = (whole_data, whole_set_labels)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ' + '%.2fm' % (total_time / 60.)
rval = data_set
return rval
def generate_image_data(img, patchSize=29, rows=1):
img = normalizeImage(img)
# pad image borders
border = np.int(np.ceil(patchSize/2.0))
img_padded = np.pad(img, border, mode='reflect')
whole_set_patches = np.zeros((len(rows)*img.shape[1], patchSize**2))
counter = 0
for row in rows:
for col in xrange(img.shape[1]):
imgPatch = img_padded[row+1:row+2*border, col+1:col+2*border]
whole_set_patches[counter,:] = imgPatch.flatten()
counter += 1
#normalize data
whole_set_patches = np.float32(whole_set_patches)
whole_set_patches = whole_set_patches - 0.5
return whole_set_patches
def stupid_map_wrapper(parameters):
f = parameters[0]
args = parameters[1:]
return f(*args)
def gen_annotated_image(path, dim):
image = np.zeros( (dim[0], dim[1]) )
# assumes
image[:,:] = 0
annotations = []
# load the annotations
with open( path ) as labels_f:
annotations = json.load( labels_f )
n_labels = len(annotations)
if n_labels == 0:
return
for i_label in range(n_labels):
i_coord = 0
coordinates = annotations[ i_label ]
for i in range(0, len(coordinates), 2):
x = min(coordinates[i], dim[1]-1)
y = min(coordinates[i+1], dim[0]-1)
image[x][y] = i_label + 1
return image, annotations
def gen_training_data(project, nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
print 'gen_data'
if project == None:
return
n_labels = len( project.labels )
start_time = time.time()
files_gray = []
files_annotations = []
images = DB.getTrainingImages( project.id, new=False )
path = Paths.TrainGrayscale
# build the list of images to sample from while discarding those
# without annnotations.
for image in images:
d_path = '%s/%s.tif'%(path, image.id)
m_path = '%s/%s.%s.json'%(Paths.Labels, image.id, project.id)
if os.path.exists( d_path ) and os.path.exists( m_path ):
files_gray.append( d_path )
files_annotations.append( m_path )
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
# return nothing if images or annotations not found
if len( files_gray ) == 0 or len( files_annotations ) == 0:
return None
print files_gray
print files_annotations
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil( (nsamples) / np.float(np.shape(files_gray)[0]) ))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
# pad image borders
border = np.int(np.ceil(patchSize/2.0))
pad = patchSize
n_samples_remaining = nsamples
n_images = len(files_gray)
n_samples_per_image = int(nsamples/n_images)
n_samples_per_label = [ int(nsamples/n_labels) for label in project.labels]
print 'n_samples_per_image:', n_samples_per_image
print 'n_samples_per_label:', n_samples_per_label
for i_image in range( n_images ):
img = mahotas.imread(files_gray[ i_image ])
ann, annotations = gen_annotated_image( files_annotations[ i_image ], img.shape )
img = np.pad(img, ((pad, pad), (pad, pad)), 'symmetric')
img = normalizeImage(img, doClahe=True)
# get the label indices
#indices = np.nonzero( ann )
ann = np.pad(ann, ((pad, pad), (pad, pad)), 'symmetric')
# set pixel values to label
ann = ann - 1
ann[ ann < 0 ] = 0
print ann.shape
print img.shape
print 'min-max'
print np.min( ann ), np.max( ann )
print np.min( img ), np.max( img )
#nsamples_perImage = int(n_samples_remaining/(n_images - img_index))
#n_img_samples = min( len(indices[0]), nsamples_perImage)
n_samples_per_image = int(n_samples_remaining/(n_images - i_image))
#n_n_img_samples = nsamples_perImage
print '--------'
print 'i_image:',i_image
print 'image',files_gray[i_image]
#print 'n_samples_remaining:', n_samples_remaining
#print 'n_samples_per_image:', n_samples_per_image
#print 'n_samples_per_image_label:',n_samples_per_image_label
print n_samples_per_label
# sample equally from each label
for i_label in range( n_labels ):
n_samples = int(n_samples_per_label[ i_label ]/(n_images - i_image))
if n_samples == 0:
continue
coordinates = annotations[ i_label ]
# stay within bounds of available labels
n_samples = min( len(coordinates)/2, n_samples )
print '--'
print 'i_label:', i_label
print 'n_samples:', n_samples
i_coord = 0
for i_sample in range( n_samples ):
(row, col) = (coordinates[i_coord], coordinates[i_coord+1])
print i_label, row, col
imgPatch = img[row:row+patchSize, col:col+patchSize]
annPatch = ann[row:row+patchSize, col:col+patchSize]
print 'sample#:', counter
print 'original'
print np.unique(annPatch)
print annPatch.flatten()
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
annPatch = np.fliplr(annPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
annPatch = np.rot90(annPatch, rotateInt)
imgPatch, annPatch = deform_images( imgPatch, annPatch )
annPatch = np.round( annPatch )
imgPatch = imgPatch / np.double(np.max(imgPatch))
#annPatch = annPatch / np.double(np.max(annPatch))
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
annPatch = annPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = np.int32(annPatch.flatten())
#whole_set_labels[counter] = np.int32(annPatch.flatten() > 0)
print 'modified:'
print 'row:', row, 'col:', col
print 'patch'
print whole_set_patches[counter,:]
print np.min( whole_set_patches[counter,:] ), np.max( whole_set_patches[counter,:] )
print 'labels'
print whole_set_labels[counter]
print np.unique( whole_set_labels[counter] )
counter += 1
# advance to next coordinate
i_coord += 2
#n_img_samples -= n_label_samples
#n_samples_remaining -= n_label_samples
n_samples_per_label[ i_label ] -= n_samples
if True:
continue
for i in range(n_img_samples):
randmem = random.choice(xrange(len(indices[0])))
(row,col) = (indices[0][randmem], indices[1][randmem])
print 'sampling from...', row, col
print 'img:', files_gray[img_index]
imgPatch = img[row:row+patchSize, col:col+patchSize]
annPatch = ann[row:row+patchSize, col:col+patchSize]
print 'original'
print np.unique(annPatch)
print annPatch.flatten()
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
annPatch = np.fliplr(annPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
annPatch = np.rot90(annPatch, rotateInt)
imgPatch, annPatch = deform_images( imgPatch, annPatch )
annPatch = np.round( annPatch )
imgPatch = imgPatch / np.double(np.max(imgPatch))
#annPatch = annPatch / np.double(np.max(annPatch))
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
annPatch = annPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = np.int32(annPatch.flatten())
#whole_set_labels[counter] = np.int32(annPatch.flatten() > 0)
print 'modified:'
print 'row:', row, 'col:', col
print 'patch'
print whole_set_patches[counter,:]
print np.min( whole_set_patches[counter,:] ), np.max( whole_set_patches[counter,:] )
print 'labels'
print whole_set_membranes[counter]
print np.unique( whole_set_membranes[counter] )
counter += 1
print counter
print '-----'
n_samples_remaining -= n_img_samples
print 'counter:', counter
print 'n_samples_per_label:', n_samples_per_label
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
print np.max(whole_data), np.min(whole_data)
print np.max(whole_set_labels), np.min(whole_set_labels)
data = whole_data.copy()
labels = whole_set_labels.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(labels)[0])
for i in xrange(np.shape(labels)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
data_set = (whole_data, whole_set_labels)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
# changed the patch sampling to use upper left corner instead of middle pixel
# for patch labels it doesn't matter and it makes sampling even and odd patches easier
def generate_experiment_data_patch_prediction(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1):
print 'generate_experiment_data_patch_prediction'
exit(1)
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
pathPrefix = '/media/vkaynig/Data2/Cmor_paper_data/not_normalized/'
# pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Thalamus-LGN/Data/25-175_train/'
#pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cerebellum-P7/Dense/'
# pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/Cortex-ECS/'
if not os.path.exists(pathPrefix):
pathPrefix = '/n/pfister_lab/vkaynig/'
# if purpose=='train':
# if np.random.random()>0.5:
# pathPrefix = pathPrefix + 'dense40_train/'
# else:
# pathPrefix = pathPrefix + 'dense49_train/'
# else:
# pathPrefix = pathPrefix + 'dense40_train/'
print "#################################"
print purpose
print pathPrefix
img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
#<felix-addition>
pathPrefix = '/n/home00/fgonda/icon/data/reference/'
#img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
img_search_string_membraneImages = pathPrefix + 'labels/membranes/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
#</felix-addition>
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
print len(img_files_gray)
print len(img_files_membrane)
print len(img_files_labels)
whole_set_patches = np.zeros((nsamples, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
read_order = np.random.permutation(np.shape(img_files_gray)[0])
read_order = read_order[:nsamples]
for counter, img_index in enumerate(read_order):
#print img_files_gray[img_index]
img = mahotas.imread(img_files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img, doClahe=True)
grayImages[:,:,counter] = img
membrane_img = mahotas.imread(img_files_membrane[img_index])/255.
membraneImages[:,:,counter] = membrane_img
maskImages[:,:,counter] = 1.0
if purpose == 'validate':
label_img = mahotas.imread(img_files_labels[img_index])
label_img = np.double(label_img)
if label_img.ndim == 3:
label_img = label_img[:,:,0] + 256*label_img[:,:,1] + 256**2 * label_img[:,:,2]
labelImages[:,:,counter] = label_img
print
counter = 0
for img_index in xrange(nsamples):#np.shape(img_files_gray)[0]):
#print img_files_gray[read_order[img_index]]
img = grayImages[:,:,img_index]
label_img = labelImages[:,:,img_index]
membrane_img = membraneImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
if purpose=='train':
membrane_img = adjust_imprecise_boundaries(img, membrane_img, 0)
#get rid of invalid image borders
mask_img[:,-(patchSize-1):] = 0
mask_img[-(patchSize-1):,:] = 0
valid_indices = np.nonzero(mask_img)
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize]
membranePatch = membrane_img[row:row+patchSize, col:col+patchSize]
labelPatch = label_img[row:row+patchSize, col:col+patchSize]
print 'sample#:', counter
print 'original'
print np.unique(imgPatch)
print imgPatch.flatten()
if random.random() < 0.5:
imgPatch = np.fliplr(imgPatch)
membranePatch = np.fliplr(membranePatch)
if purpose == 'validate':
labelPatch = np.fliplr(labelPatch)
rotateInt = random.randint(0,3)
imgPatch = np.rot90(imgPatch, rotateInt)
membranePatch = np.rot90(membranePatch, rotateInt)
if purpose=='validate':
labelPatch = np.rot90(labelPatch, rotateInt)
if purpose=='validate':
labelPatch = relabel(labelPatch)
imgPatch, membranePatch = deform_images(imgPatch, membranePatch)
# get rid of interpolation artifacts
membranePatch = np.round(membranePatch)
membranePatch, _ = mahotas.label(1-membranePatch)
else:
imgPatch, membranePatch = deform_images(imgPatch, membranePatch)
# get rid of interpolation artifacts
membranePatch = np.round(membranePatch)
imgPatch = imgPatch / np.double(np.max(imgPatch))
membranePatch = membranePatch / np.double(np.max(membranePatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
whole_set_patches[counter,:] = imgPatch.flatten()
whole_set_labels[counter] = labelPatch.flatten()
whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0)
print 'row:', row, 'col:', col
print 'patch'
print whole_set_patches[counter,:]
print np.min( whole_set_patches[counter,:] ), np.max( whole_set_patches[counter,:] )
print 'labels'
print whole_set_membranes[counter]
print np.unique( whole_set_membranes[counter] )
#print np.unique(whole_set_patches[counter,:])
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
print np.max(whole_data), np.min(whole_data)
print np.max(whole_set_membranes), np.min(whole_set_membranes)
data = whole_data.copy()
labels = whole_set_labels.copy()
membranes = whole_set_membranes.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(membranes)[0])
for i in xrange(np.shape(membranes)[0]):
whole_data[i,:] = data[shuffleIndex[i],:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
whole_set_membranes[i,:] = membranes[shuffleIndex[i],:]
if purpose == 'validate':
data_set = (whole_data, whole_set_membranes, whole_set_labels)
else:
data_set = (whole_data, whole_set_membranes)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
def gen_validation_data(project, nsamples=1000, patchSize=29, outPatchSize=1):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
return None
def generate_experiment_data_patch_prediction_layers(purpose='train', nsamples=1000, patchSize=29, outPatchSize=1, nr_layers=3):
def relabel(image):
id_list = np.unique(image)
for index, id in enumerate(id_list):
image[image==id] = index
return image
start_time = time.time()
if os.path.exists('/media/vkaynig/Data1/Cmor_paper_data/'):
pathPrefix = '/media/vkaynig/Data1/Cmor_paper_data/'
else:
pathPrefix = '/n/pfister_lab/vkaynig/'
img_search_string_membraneImages = pathPrefix + 'labels/membranes_fullContour/' + purpose + '/*.tif'
img_search_string_labelImages = pathPrefix + 'labels/' + purpose + '/*.tif'
img_search_string_grayImages = pathPrefix + 'images/' + purpose + '/*.tif'
img_files_gray = sorted( glob.glob( img_search_string_grayImages ) )
img_files_membrane = sorted( glob.glob( img_search_string_membraneImages ) )
img_files_labels = sorted( glob.glob( img_search_string_labelImages ) )
whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float)
whole_set_labels = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
whole_set_membranes = np.zeros((nsamples, outPatchSize**2), dtype=np.int32)
#how many samples per image?
nsamples_perImage = np.uint(np.ceil(
(nsamples) / np.float(np.shape(img_files_gray)[0])
))
print 'using ' + np.str(nsamples_perImage) + ' samples per image.'
counter = 0
img = mahotas.imread(img_files_gray[0])
grayImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
labelImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
membraneImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
maskImages = np.zeros((img.shape[0],img.shape[1], np.shape(img_files_gray)[0]))
# read the data
# in random order
#read_order = np.random.permutation(np.shape(img_files_gray)[0])
for img_index in range(np.shape(img_files_gray)[0]):
#print img_files_gray[img_index]
img = mahotas.imread(img_files_gray[img_index])
# normalizes [0,1]
img = normalizeImage(img)
grayImages[:,:,img_index] = img
membrane_img = mahotas.imread(img_files_membrane[img_index])/255.
membraneImages[:,:,img_index] = membrane_img
maskImages[:,:,img_index] = 1.0
if purpose == 'validate':
label_img = mahotas.imread(img_files_labels[img_index])
label_img = np.double(label_img)
labelImages[:,:,img_index] = label_img
for img_index in xrange(np.shape(img_files_gray)[0]):
img_cs = int(np.floor(nr_layers/2))
img_valid_range_indices = np.clip(range(img_index-img_cs,img_index+img_cs+1),0,np.shape(img_files_gray)[0]-1)
img = grayImages[:,:,img_valid_range_indices]
label_img = labelImages[:,:,img_index]
membrane_img = membraneImages[:,:,img_index]
mask_img = maskImages[:,:,img_index]
if purpose=='train':
# adjust according to middle image
membrane_img = adjust_imprecise_boundaries(img[:,:,img_cs], membrane_img, 0)
#get rid of invalid image borders
mask_img[:,-patchSize:] = 0
mask_img[-patchSize:,:] = 0
valid_indices = np.nonzero(mask_img)
for i in xrange(nsamples_perImage):
if counter >= nsamples:
break
randmem = random.choice(xrange(len(valid_indices[0])))
(row,col) = (valid_indices[0][randmem],
valid_indices[1][randmem])
imgPatch = img[row:row+patchSize, col:col+patchSize,:]
membranePatch = membrane_img[row:row+patchSize, col:col+patchSize]
labelPatch = label_img[row:row+patchSize, col:col+patchSize]
if random.random() < 0.5:
for flip_i in xrange(nr_layers):
imgPatch[:,:,flip_i] = np.fliplr(imgPatch[:,:,flip_i])
membranePatch = np.fliplr(membranePatch)
if purpose == 'validate':
labelPatch = np.fliplr(labelPatch)
rotateInt = random.randint(0,3)
for rot_i in xrange(nr_layers):
imgPatch[:,:,rot_i] = np.rot90(imgPatch[:,:,rot_i], rotateInt)
membranePatch = np.rot90(membranePatch, rotateInt)
if purpose=='validate':
labelPatch = np.rot90(labelPatch, rotateInt)
if purpose=='validate':
labelPatch = relabel(labelPatch)
deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch*255,(patchSize,patchSize,1)), np.uint8(np.reshape(labelPatch,(patchSize,patchSize,1)))]))
imgPatch, membranePatch, labelPatch = np.split(deformed_images,[imgPatch.shape[2],imgPatch.shape[2]+1], axis=2)
else:
deformed_images = deform_images_list(np.dstack([imgPatch*255, np.reshape(membranePatch,(patchSize,patchSize,1))*255]))
imgPatch, membranePatch = np.split(deformed_images,[imgPatch.shape[2]], axis=2)
imgPatch = imgPatch / np.double(np.max(imgPatch))
membranePatch = membranePatch / np.double(np.max(membranePatch))
# crop labelPatch to potentially smaller output size
offset_small_patch = int(np.ceil((patchSize - outPatchSize) / 2.0))
membranePatch = membranePatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
labelPatch = labelPatch[offset_small_patch:offset_small_patch+outPatchSize,
offset_small_patch:offset_small_patch+outPatchSize]
#whole_set_patches = np.zeros((nsamples, nr_layers, patchSize**2), dtype=np.float)
for patch_i in xrange(nr_layers):
whole_set_patches[counter,patch_i,:] = imgPatch[:,:,patch_i].flatten()
whole_set_labels[counter] = labelPatch.flatten()
whole_set_membranes[counter] = np.int32(membranePatch.flatten() > 0)
counter += 1
#normalize data
whole_data = np.float32(whole_set_patches)
whole_data = whole_data - 0.5
data = whole_data.copy()
labels = whole_set_labels.copy()
membranes = whole_set_membranes.copy()
#remove the sorting in image order
shuffleIndex = np.random.permutation(np.shape(membranes)[0])
for i in xrange(np.shape(membranes)[0]):
whole_data[i,:,:] = data[shuffleIndex[i],:,:]
whole_set_labels[i,:] = labels[shuffleIndex[i],:]
whole_set_membranes[i,:] = membranes[shuffleIndex[i],:]
if purpose == 'validate':
data_set = (whole_data, whole_set_membranes, whole_set_labels)
else:
data_set = (whole_data, whole_set_membranes)
end_time = time.time()
total_time = (end_time - start_time)
print 'Running time: ', total_time / 60.
print 'finished sampling data'
return data_set
if __name__=="__main__":
import uuid
test = generate_experiment_data_patch_prediction(purpose='validate', nsamples=20, patchSize=1024, outPatchSize=1024)
# dir_path = './training_patches/'
# for i in xrange(30):
# unique_filename = str(uuid.uuid4())
# img = np.reshape(test[1][i],(388,388))
# img_gray = np.reshape(test[0][i],(572,572))
# mahotas.imsave(dir_path+unique_filename+'.tif', np.uint8(img*255))
# mahotas.imsave(dir_path+unique_filename+'_gray.tif', np.uint8((img_gray+0.5)*255))
#data_val = generate_experiment_data_supervised(purpose='validate', nsamples=10000, patchSize=65, balanceRate=0.5)
#data = generate_experiment_data_patch_prediction(purpose='validate', nsamples=2, patchSize=315, outPatchSize=215)
# plt.imshow(np.reshape(data[0][0],(315,315))); plt.figure()
# plt.imshow(np.reshape(data[1][0],(215,215))); plt.figure()
# plt.imshow(np.reshape(data[2][0],(215,215))); plt.show()
# image = mahotas.imread('ac3_input_0141.tif')
# image = normalizeImage(image)
# label = mahotas.imread('ac3_labels_0141.tif') / 255.
# test = adjust_imprecise_boundaries(image, label, 10)
# plt.imshow(label+image); plt.show()
# plt.imshow(test+image); plt.show()
|
fegonda/icon_demo
|
code/model/unet/ff.py
|
Python
|
mit
| 36,759 | 0.014935 |
"""
sum(2 * 2**i for i in range(i)) == 2 * (2**i - 1) == n
i == log_2(n // 2 + 1)
"""
from math import ceil, log
import time
def count_ways(n, current_power=None, memo=None):
if memo is None:
memo = {}
if current_power is None:
current_power = ceil(log(n // 2 + 1, 2))
key = (n, current_power)
if key in memo:
return memo[key]
current_term = 2 ** current_power
max_available = 2 * (2 ** (current_power + 1) - 1)
assert n <= max_available
next_max_available = 2 * (2 ** current_power - 1)
ans = 0
if n >= 2 * current_term:
if n == 2 * current_term:
ans += 1
else:
ans += count_ways(n - 2 * current_term, current_power - 1, memo)
if n >= current_term:
if n == current_term:
ans += 1
elif n - current_term <= next_max_available:
ans += count_ways(n - current_term, current_power - 1, memo)
if n <= next_max_available:
ans += count_ways(n, current_power - 1, memo)
memo[key] = ans
return ans
t0 = time.time()
print(count_ways(10 ** 25))
t1 = time.time()
print('Total time:', (t1 - t0) * 1000, 'ms')
|
simonolander/euler
|
euler-169-sum-of-powers-of-2.py
|
Python
|
mit
| 1,172 | 0 |
import sys
import bpy
from bpy.props import StringProperty
class ExportLog(object):
""" Class which tracks warnings and errors during export """
WARNING = "Warning"
ERROR = "Error"
MESSAGE_SEPERATOR = "\n"
SEVERITY_DIVIDER = "|#|"
EXPORTED_MESSAGE_QUEUE = []
def __init__(self):
self._message_queue = []
def info(self, *args):
""" Adds a new info, this will not be logged but just printed to stdout """
print("Info:", *args)
def warning(self, *args):
""" Adds a new warning to the log """
self._add_entry(self.WARNING, *args)
def error(self, *args):
""" Adds a new error to the log """
self._add_entry(self.ERROR, *args)
def _add_entry(self, severity, *args):
""" Internal method to append a new entry to the message queue """
content = ' '.join([str(i) for i in args])
self._message_queue.append((severity, content))
print(severity + ":", content, file=sys.stderr)
def report(self):
""" Shows a dialog with all warnings and errors, but only in case
there were some """
if self._message_queue:
ExportLog.EXPORTED_MESSAGE_QUEUE = self._message_queue
bpy.ops.pbe_export.status()
class OperatorExportStatus(bpy.types.Operator):
bl_idname = "pbe_export.status"
bl_label = "Export Status"
def execute(self, context):
wm = context.window_manager
return wm.invoke_popup(self, width=800, height=400)
def draw(self, context):
self.layout.row().label("Export status:")
self.layout.row()
for severity, message in ExportLog.EXPORTED_MESSAGE_QUEUE:
row = self.layout.row()
message = message.replace("\n", "")
row.label(message, icon="CANCEL" if severity == ExportLog.ERROR else "ERROR")
self.layout.row()
def register():
bpy.utils.register_class(OperatorExportStatus)
#bpy.utils.register_class(OperatorExportStatusOk)
def unregister():
bpy.utils.unregister_class(OperatorExportStatus)
#bpy.utils.unregister_class(OperatorExportStatusOk)
|
tobspr/Panda3D-Bam-Exporter
|
src/ExportLog.py
|
Python
|
mit
| 2,156 | 0.004174 |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.db.transaction import atomic
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template import RequestContext
from django.utils import timezone
from ultimate.leagues.models import Game, League
from ultimate.user.models import Player, PlayerRatings
from ultimate.forms import EditPlayerForm, EditPlayerRatingsForm, EditProfileForm, SignupForm
@login_required
def index(request):
leagues = League.objects.filter(state__in=['closed', 'open', 'preview']).order_by('league_start_date')
leagues = [l for l in leagues if l.is_visible(request.user)]
future_games = Game.objects.filter(
Q(league__in=leagues) &
Q(date__gte=timezone.now().date()) &
Q(teams__teammember__user=request.user)
).order_by('date')
future_games = [game for game in future_games if game.get_display_teams().exists()]
try:
next_game = future_games.pop(0)
except (IndexError, Game.DoesNotExist) as e:
next_game = None
try:
following_game = future_games.pop(0)
except (IndexError, Game.DoesNotExist) as e:
following_game = None
registrations = []
for league in leagues:
for registration in league.get_registrations_for_user(request.user):
registrations.append(registration)
return render(request, 'user/index.html',
{
'current_leagues': leagues,
'following_game': following_game,
'next_game': next_game,
'registrations': registrations
})
@atomic
def signup(request):
form = None
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
user = form.save()
Player.objects.get_or_create(user=user,
defaults={'date_of_birth': form.cleaned_data.get('date_of_birth'),
'gender': form.cleaned_data.get('gender')})
messages.success(request, 'Your account was created. You may now log in.')
return HttpResponseRedirect(reverse('user'))
else:
messages.error(request, 'There was an error on the form you submitted.')
if not form:
form = SignupForm()
return render(request, 'user/signup.html',
{'form': form})
@login_required
def editprofile(request):
try:
player = Player.objects.get(user=request.user)
except Player.DoesNotExist:
player = Player(user=request.user)
if request.method == 'POST':
form = EditProfileForm(request.POST, instance=request.user)
if form.is_valid():
form.save(commit=False)
player_form = EditPlayerForm(request.POST, instance=player)
if player_form.is_valid():
form.save()
player_form.save()
messages.success(request, 'Your profile was updated successfully.')
return HttpResponseRedirect(reverse('editprofile'))
else:
messages.error(request, 'There was an error on the form you submitted.')
else:
player_form = EditPlayerForm(request.POST, instance=player)
messages.error(request, 'There was an error on the form you submitted.')
else:
form = EditProfileForm(instance=request.user)
player_form = EditPlayerForm(instance=player)
return render(request, 'user/editprofile.html',
{'form': form, 'player_form': player_form})
@login_required
def editratings(request):
try:
ratings = PlayerRatings.objects.get(user=request.user, submitted_by=request.user, ratings_type=PlayerRatings.RATING_TYPE_USER)
except PlayerRatings.DoesNotExist:
ratings = None
if request.method == 'POST':
form = EditPlayerRatingsForm(request.POST, instance=ratings)
if form.is_valid():
instance = form.save(commit=False)
instance.ratings_type = PlayerRatings.RATING_TYPE_USER
instance.submitted_by = request.user
instance.updated = timezone.now()
instance.user = request.user
instance.save()
messages.success(request, 'Your ratings were updated successfully.')
return HttpResponseRedirect(reverse('editratings'))
else:
messages.error(request, 'There was an error on the form you submitted.')
else:
form = EditPlayerRatingsForm(instance=ratings)
return render(request, 'user/editratings.html',
{
'form': form
}
)
|
rdonnelly/ultimate-league-app
|
src/ultimate/user/views.py
|
Python
|
bsd-3-clause
| 4,712 | 0.004032 |
class RegMagic:
fixed_registers = []
regmagic = RegMagic()
__all__ = ['regmagic']
|
svp-dev/slcore
|
slc/tools/slc/mt/mipsel/regdefs.py
|
Python
|
gpl-3.0
| 87 | 0.034483 |
import unittest
from sympy import sqrt, exp, I, pi, IndexedBase, symbols, factorial
from qnet.algebra.core.abstract_algebra import _apply_rules
from qnet.algebra.core.scalar_algebra import (
ScalarValue, KroneckerDelta, Zero, One)
from qnet.algebra.toolbox.core import temporary_rules
from qnet.algebra.core.operator_algebra import (
OperatorSymbol, LocalSigma, IdentityOperator, OperatorPlus)
from qnet.algebra.library.spin_algebra import (
Jz, Jplus, Jminus, SpinSpace,SpinBasisKet)
from qnet.algebra.library.fock_operators import (
Destroy, Create, Phase,
Displace)
from qnet.algebra.core.hilbert_space_algebra import LocalSpace
from qnet.algebra.core.state_algebra import (
KetSymbol, ZeroKet, KetPlus, ScalarTimesKet, CoherentStateKet,
TrivialKet, TensorKet, BasisKet, Bra, OperatorTimesKet, BraKet,
KetBra, KetIndexedSum)
from qnet.algebra.core.exceptions import UnequalSpaces
from qnet.utils.indices import (
IdxSym, FockIndex, IntIndex, StrLabel, FockLabel, SymbolicLabelBase,
IndexOverFockSpace, IndexOverRange, SpinIndex)
from qnet.algebra.pattern_matching import wc
import pytest
class TestStateAddition(unittest.TestCase):
def testAdditionToZero(self):
hs = LocalSpace("hs")
a = KetSymbol("a", hs=hs)
z = ZeroKet
assert a+z == a
assert z+a == a
assert z+z == z
assert z != 0
assert z.is_zero
def testAdditionToOperator(self):
hs = LocalSpace("hs")
a = KetSymbol("a", hs=hs)
b = KetSymbol("b", hs=hs)
assert a + b == b + a
assert a + b == KetPlus(a,b)
def testSubtraction(self):
hs = LocalSpace("hs")
a = KetSymbol("a", hs=hs)
b = KetSymbol("b", hs=hs)
z = ZeroKet
lhs = a - a
assert lhs == z
lhs = a - b
rhs = KetPlus(a, ScalarTimesKet(-1,b))
assert lhs == rhs
def testHilbertSpace(self):
h1 = LocalSpace("h1")
h2 = LocalSpace("h2")
a = KetSymbol("a", hs=h1)
b = KetSymbol("b", hs=h2)
with pytest.raises(UnequalSpaces):
a.__add__(b)
def testEquality(self):
h1 = LocalSpace("h1")
assert (CoherentStateKet(10., hs=h1) + CoherentStateKet(20., hs=h1) ==
CoherentStateKet(20., hs=h1) + CoherentStateKet(10., hs=h1))
class TestTensorKet(unittest.TestCase):
def testIdentity(self):
h1 = LocalSpace("h1")
a = KetSymbol("a", hs=h1)
id = TrivialKet
assert a * id == a
assert id * a == a
def testOrdering(self):
h1 = LocalSpace("h1")
h2 = LocalSpace("h2")
a = KetSymbol("a", hs=h1)
b = KetSymbol("b", hs=h2)
assert a * b == TensorKet(a,b)
assert a * b == b * a
def testHilbertSpace(self):
h1 = LocalSpace("h1")
h2 = LocalSpace("h2")
a = KetSymbol("a", hs=h1)
b = KetSymbol("b", hs=h2)
assert a.space == h1
assert (a * b).space == h1*h2
def testEquality(self):
h1 = LocalSpace("h1")
h2 = LocalSpace("h2")
assert (CoherentStateKet(1, hs=h1) * CoherentStateKet(2, hs=h2) ==
CoherentStateKet(2, hs=h2) * CoherentStateKet(1, hs=h1))
class TestScalarTimesKet(unittest.TestCase):
def testZeroOne(self):
h1 = LocalSpace("h1")
h2 = LocalSpace("h2")
a = KetSymbol("a", hs=h1)
b = KetSymbol("b", hs=h2)
z = ZeroKet
assert a+a == 2*a
assert a*1 == a
assert 1*a == a
assert a*5 == ScalarTimesKet(5, a)
assert 5*a == a*5
assert 2*a*3 == 6*a
assert a*5*b == ScalarTimesKet(5, a*b)
assert a*(5*b) == ScalarTimesKet(5, a*b)
assert 0 * a == z
assert a * 0 == z
assert 10 * z == z
def testScalarCombination(self):
a = KetSymbol("a", hs="h1")
assert a+a == 2*a
assert 3 * a + 4 * a == 7 * a
assert (CoherentStateKet("1", hs=1) + CoherentStateKet("1", hs=1) ==
2 * CoherentStateKet("1", hs=1))
def testHilbertSpace(self):
h1 = LocalSpace("h1")
h2 = LocalSpace("h2")
a = KetSymbol("a", hs=h1)
b = KetSymbol("b", hs=h2)
assert (5*(a * b)).space == h1*h2
class TestOperatorTimesKet(unittest.TestCase):
def testZeroOne(self):
h1 = LocalSpace("h1")
h2 = LocalSpace("h2")
a = KetSymbol("a", hs=h1)
b = KetSymbol("b", hs=h2)
A = OperatorSymbol("A", hs=h1)
Ap = OperatorSymbol("Ap", hs=h1)
B = OperatorSymbol("B", hs=h2)
assert IdentityOperator*a == a
assert A * (Ap * a) == (A * Ap) * a
assert (A * B) * (a * b) == (A * a) * (B * b)
def testScalarCombination(self):
a = KetSymbol("a", hs="h1")
assert a+a == 2*a
assert 3 * a + 4 * a == 7 * a
assert (CoherentStateKet("1", hs=1) + CoherentStateKet("1", hs=1) ==
2 * CoherentStateKet("1", hs=1))
def testHilbertSpace(self):
h1 = LocalSpace("h1")
h2 = LocalSpace("h2")
a = KetSymbol("a", hs=h1)
b = KetSymbol("b", hs=h2)
assert (5*(a * b)).space == h1*h2
class TestLocalOperatorKetRelations(unittest.TestCase):
def testCreateDestroy(self):
hs1 = LocalSpace(1)
assert (
Create(hs=hs1) * BasisKet(2, hs=hs1) ==
sqrt(3) * BasisKet(3, hs=hs1))
assert (
Destroy(hs=hs1) * BasisKet(2, hs=hs1) ==
sqrt(2) * BasisKet(1, hs=hs1))
assert (
Destroy(hs=hs1) * BasisKet(0, hs=hs1) == ZeroKet)
coh = CoherentStateKet(10., hs=hs1)
a = Destroy(hs=hs1)
lhs = a * coh
rhs = 10 * coh
assert lhs == rhs
def testSpin(self):
j = 3
h = SpinSpace('j', spin=j)
assert (Jplus(hs=h) * BasisKet('+2', hs=h) ==
sqrt(j*(j+1)-2*(2+1)) * BasisKet('+3', hs=h))
assert (Jminus(hs=h) * BasisKet('+2', hs=h) ==
sqrt(j*(j+1)-2*(2-1)) * BasisKet('+1', hs=h))
assert Jz(hs=h) * BasisKet('+2', hs=h) == 2 * BasisKet('+2', hs=h)
tls = SpinSpace('tls', spin='1/2', basis=('-', '+'))
assert (
Jplus(hs=tls) * BasisKet('-', hs=tls) == BasisKet('+', hs=tls))
assert (
Jminus(hs=tls) * BasisKet('+', hs=tls) == BasisKet('-', hs=tls))
assert (
Jz(hs=tls) * BasisKet('+', hs=tls) == BasisKet('+', hs=tls) / 2)
assert (
Jz(hs=tls) * BasisKet('-', hs=tls) == -BasisKet('-', hs=tls) / 2)
def testPhase(self):
hs1 = LocalSpace(1)
assert (Phase(5, hs=hs1) * BasisKet(3, hs=hs1) ==
exp(I * 15) * BasisKet(3, hs=hs1))
lhs = Phase(pi, hs=hs1) * CoherentStateKet(3., hs=hs1)
rhs = CoherentStateKet(-3., hs=hs1)
assert lhs.__class__ == rhs.__class__
assert lhs.space == rhs.space
assert abs(lhs.ampl - rhs.ampl) < 1e-14
def testDisplace(self):
hs1 = LocalSpace(1)
assert (Displace(5 + 6j, hs=hs1) * CoherentStateKet(3., hs=hs1) ==
exp(I * ((5+6j)*3).imag) * CoherentStateKet(8 + 6j, hs=hs1))
assert (Displace(5 + 6j, hs=hs1) * BasisKet(0, hs=hs1) ==
CoherentStateKet(5+6j, hs=hs1))
def testLocalSigmaPi(self):
assert (LocalSigma(0, 1, hs=1) * BasisKet(1, hs=1) ==
BasisKet(0, hs=1))
assert (LocalSigma(0, 0, hs=1) * BasisKet(1, hs=1) ==
ZeroKet)
def testActLocally(self):
hs1 = LocalSpace(1)
hs2 = LocalSpace(2)
assert ((Create(hs=hs1) * Destroy(hs=hs2)) *
(BasisKet(2, hs=hs1) * BasisKet(1, hs=hs2)) ==
sqrt(3) * BasisKet(3, hs=hs1) * BasisKet(0, hs=hs2))
def testOperatorTensorProduct(self):
hs1 = LocalSpace(1)
hs2 = LocalSpace(2)
assert ((Create(hs=hs1)*Destroy(hs=hs2)) *
(BasisKet(0, hs=hs1) * BasisKet(1, hs=hs2)) ==
BasisKet(1, hs=hs1) * BasisKet(0, hs=hs2))
def testOperatorProduct(self):
hs1 = LocalSpace(1)
hs2 = LocalSpace(2)
assert ((Create(hs=hs1) * Destroy(hs=hs1)) *
(BasisKet(1, hs=hs1) * BasisKet(1, hs=hs2)) ==
BasisKet(1, hs=hs1) * BasisKet(1, hs=hs2))
assert ((Create(hs=hs1) * Destroy(hs=hs1) * Destroy(hs=hs1)) *
(BasisKet(2, hs=hs1)*BasisKet(1, hs=hs2)) ==
sqrt(2) * BasisKet(1, hs=hs1) * BasisKet(1, hs=hs2))
assert ((Create(hs=hs1) * Destroy(hs=hs1) * Destroy(hs=hs1)) *
BasisKet(2, hs=hs1) ==
sqrt(2) * BasisKet(1, hs=hs1))
assert ((Create(hs=hs1) * Destroy(hs=hs1)) * BasisKet(1, hs=hs1) ==
BasisKet(1, hs=hs1))
assert (
(Create(hs=hs1) * Destroy(hs=hs1)) * BasisKet(0, hs=hs1) ==
ZeroKet)
def test_expand_ketbra():
"""Test expansion of KetBra"""
hs = LocalSpace('0', basis=('0', '1'))
expr = KetBra(
KetPlus(BasisKet('0', hs=hs), BasisKet('1', hs=hs)),
KetPlus(BasisKet('0', hs=hs), BasisKet('1', hs=hs)))
with temporary_rules(KetBra, clear=True):
expr_expand = expr.expand()
assert expr_expand == OperatorPlus(
KetBra(BasisKet('0', hs=hs), BasisKet('0', hs=hs)),
KetBra(BasisKet('0', hs=hs), BasisKet('1', hs=hs)),
KetBra(BasisKet('1', hs=hs), BasisKet('0', hs=hs)),
KetBra(BasisKet('1', hs=hs), BasisKet('1', hs=hs)))
def test_orthonormality_fock():
"""Test orthonormality of Fock space BasisKets (including symbolic)"""
hs = LocalSpace('tls', basis=('g', 'e'))
i = IdxSym('i')
j = IdxSym('j')
ket_0 = BasisKet(0, hs=hs)
bra_0 = ket_0.dag()
ket_1 = BasisKet(1, hs=hs)
ket_g = BasisKet('g', hs=hs)
bra_g = ket_g.dag()
ket_e = BasisKet('e', hs=hs)
ket_i = BasisKet(FockIndex(i), hs=hs)
ket_j = BasisKet(FockIndex(j), hs=hs)
bra_i = ket_i.dag()
ket_i_lb = BasisKet(FockLabel(i, hs=hs), hs=hs)
ket_j_lb = BasisKet(FockLabel(j, hs=hs), hs=hs)
bra_i_lb = ket_i_lb.dag()
assert bra_0 * ket_1 == Zero
assert bra_0 * ket_0 == One
assert bra_g * ket_g == One
assert bra_g * ket_e == Zero
assert bra_0 * ket_g == One
assert bra_0 * ket_e == Zero
assert bra_g * ket_0 == One
assert bra_g * ket_1 == Zero
delta_ij = KroneckerDelta(i, j)
delta_i0 = KroneckerDelta(i, 0)
delta_0j = KroneckerDelta(0, j)
assert bra_i * ket_j == delta_ij
assert bra_i * ket_0 == delta_i0
assert bra_0 * ket_j == delta_0j
assert bra_i * ket_g == delta_i0
assert bra_g * ket_j == delta_0j
assert delta_ij.substitute({i: 0, j: 0}) == One
assert delta_ij.substitute({i: 0, j: 1}) == Zero
assert delta_i0.substitute({i: 0}) == One
assert delta_i0.substitute({i: 1}) == Zero
delta_ij = KroneckerDelta(i, j)
delta_ig = KroneckerDelta(i, 0)
delta_gj = KroneckerDelta(0, j)
assert bra_i_lb * ket_j_lb == delta_ij
assert bra_i_lb * ket_0 == delta_ig
assert bra_0 * ket_j_lb == delta_gj
assert bra_i_lb * ket_g == delta_ig
assert bra_g * ket_j_lb == delta_gj
assert delta_ij.substitute({i: 0, j: 0}) == One
assert delta_ij.substitute({i: 0, j: 1}) == Zero
assert delta_ig.substitute({i: 0}) == One
assert delta_ig.substitute({i: 1}) == Zero
def test_orthonormality_spin():
hs = SpinSpace('s', spin='1/2')
i = IdxSym('i')
j = IdxSym('j')
ket_dn = SpinBasisKet(-1, 2, hs=hs)
ket_up = SpinBasisKet(1, 2, hs=hs)
bra_dn = ket_dn.dag()
ket_i = BasisKet(SpinIndex(i/2, hs), hs=hs)
bra_i = ket_i.dag()
ket_j = BasisKet(SpinIndex(j/2, hs), hs=hs)
assert bra_dn * ket_dn == One
assert bra_dn * ket_up == Zero
delta_ij = KroneckerDelta(i, j, simplify=False)
delta_i_dn = KroneckerDelta(i, -1, simplify=False)
delta_dn_j = KroneckerDelta(-1, j, simplify=False)
assert bra_i * ket_j == delta_ij
assert bra_i * ket_dn == delta_i_dn
assert bra_dn * ket_j == delta_dn_j
assert delta_ij.substitute({i: 0, j: 0}) == One
assert delta_ij.substitute({i: 0, j: 1}) == Zero
def test_indexed_local_sigma():
"""Test that brakets involving indexed symbols evaluate to Kronecker
deltas"""
hs = LocalSpace('tls', basis=('g', 'e'))
i = IdxSym('i')
j = IdxSym('j')
ket_i = BasisKet(FockIndex(i), hs=hs)
ket_j = BasisKet(FockIndex(j), hs=hs)
expr = LocalSigma('g', 'e', hs=hs) * ket_i
expected = KroneckerDelta(i, 1) * BasisKet('g', hs=hs)
assert expr == expected
assert expr == LocalSigma(0, 1, hs=hs) * ket_i
braopket = BraKet(
ket_i, OperatorTimesKet(
(LocalSigma('g', 'e', hs=hs) + LocalSigma('e', 'g', hs=hs)),
ket_j))
expr = braopket.expand()
assert expr == (
KroneckerDelta(i, 0) * KroneckerDelta(1, j) +
KroneckerDelta(i, 1) * KroneckerDelta(0, j))
def eval_lb(expr, mapping):
"""Evaluate symbolic labels with the given mapping"""
return _apply_rules(expr, rules=[(
wc('label', head=SymbolicLabelBase),
lambda label: label.substitute(mapping))])
def test_ket_symbolic_labels():
"""Test that we can instantiate Kets with symbolic labels"""
i = IdxSym('i')
j = IdxSym('j')
hs0 = LocalSpace(0)
hs1 = LocalSpace(1)
Psi = IndexedBase('Psi')
assert (
eval_lb(BasisKet(FockIndex(2 * i), hs=hs0), {i: 2}) ==
BasisKet(4, hs=hs0))
with pytest.raises(TypeError) as exc_info:
BasisKet(IntIndex(2 * i), hs=hs0)
assert "not IntIndex" in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
BasisKet(StrLabel(2 * i), hs=hs0)
assert "not StrLabel" in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
BasisKet(2 * i, hs=hs0)
assert "not Mul" in str(exc_info.value)
assert(
eval_lb(KetSymbol(StrLabel(2 * i), hs=hs0), {i: 2}) ==
KetSymbol("4", hs=hs0))
with pytest.raises(TypeError) as exc_info:
eval_lb(KetSymbol(FockIndex(2 * i), hs=hs0), {i: 2})
assert "type of label must be str" in str(exc_info.value)
assert StrLabel(Psi[i, j]).substitute({i: 'i', j: 'j'}) == 'Psi_ij'
assert(
eval_lb(
KetSymbol(StrLabel(Psi[i, j]), hs=hs0*hs1), {i: 'i', j: 'j'}) ==
KetSymbol("Psi_ij", hs=hs0*hs1))
assert(
eval_lb(
KetSymbol(StrLabel(Psi[i, j]), hs=hs0*hs1), {i: 1, j: 2}) ==
KetSymbol("Psi_12", hs=hs0*hs1))
assert (
eval_lb(
LocalSigma(FockIndex(i), FockIndex(j), hs=hs0), {i: 1, j: 2}) ==
LocalSigma(1, 2, hs=hs0))
assert (
BasisKet(FockIndex(i), hs=hs0) * BasisKet(FockIndex(j), hs=hs0).dag() ==
LocalSigma(FockIndex(i), FockIndex(j), hs=hs0))
def test_coherent_state_to_fock_representation():
"""Test the representation of a coherent state in the Fock basis"""
alpha = symbols('alpha')
expr1 = CoherentStateKet(alpha, hs=1).to_fock_representation()
expr2 = CoherentStateKet(alpha, hs=1).to_fock_representation(max_terms=10)
expr3 = CoherentStateKet(alpha, hs=1).to_fock_representation(
index_symbol='i')
expr4 = CoherentStateKet(alpha, hs=1).to_fock_representation(
index_symbol=IdxSym('m', positive=True))
assert (
expr1.term.ranges[0] ==
IndexOverFockSpace(IdxSym('n'), LocalSpace('1')))
assert (
expr2.term.ranges[0] ==
IndexOverRange(IdxSym('n', integer=True), 0, 9))
assert (
expr3.term.ranges[0] ==
IndexOverFockSpace(IdxSym('i'), LocalSpace('1')))
assert (
expr4.term.ranges[0] ==
IndexOverFockSpace(IdxSym('m', positive=True), LocalSpace('1')))
for expr in (expr1, expr2):
assert expr.coeff == exp(-alpha*alpha.conjugate()/2)
sum = expr.term
assert len(sum.ranges) == 1
n = sum.ranges[0].index_symbol
assert sum.term.coeff == alpha**n/sqrt(factorial(n))
assert (
sum.term.term ==
BasisKet(FockIndex(IdxSym('n')), hs=LocalSpace('1')))
def test_scalar_times_bra():
"""Test that multiplication of a scalar with a bra is handled correctly"""
alpha_sym = symbols('alpha')
alpha = ScalarValue(alpha_sym)
ket = KetSymbol('Psi', hs=0)
bra = ket.bra
# first, let's try the ket case, just to establish a working baseline
expr = alpha * ket
assert expr == ScalarTimesKet(alpha, ket)
assert expr == alpha_sym * ket
assert isinstance((alpha_sym * ket).coeff, ScalarValue)
assert expr == ket * alpha
assert expr == ket * alpha_sym
# now, the bra
expr = alpha * bra
assert expr == Bra(ScalarTimesKet(alpha.conjugate(), ket))
assert expr == alpha_sym * bra
assert isinstance((alpha_sym * bra).ket.coeff, ScalarValue)
assert expr == bra * alpha
assert expr == bra * alpha_sym
def test_disallow_inner_bra():
"""Test that it is not possible to instantiate a State Opereration that has
a Bra as an operator: we accept Bra to be at the root of the expression
tree"""
alpha = symbols('alpha')
A = OperatorSymbol('A', hs=0)
ket1 = KetSymbol('Psi_1', hs=0)
ket2 = KetSymbol('Psi_1', hs=0)
bra1 = Bra(ket1)
bra2 = Bra(ket2)
bra1_hs1 = Bra(KetSymbol('Psi_1', hs=1))
with pytest.raises(TypeError) as exc_info:
KetPlus(bra1, bra2)
assert "must be Kets" in str(exc_info.value)
assert isinstance(KetPlus.create(bra1, bra2), Bra)
with pytest.raises(TypeError) as exc_info:
TensorKet(bra1, bra1_hs1)
assert "must be Kets" in str(exc_info.value)
assert isinstance(TensorKet.create(bra1, bra1_hs1), Bra)
with pytest.raises(TypeError) as exc_info:
ScalarTimesKet(alpha, bra1)
assert "must be Kets" in str(exc_info.value)
assert isinstance(ScalarTimesKet.create(alpha, bra1), Bra)
with pytest.raises(TypeError) as exc_info:
OperatorTimesKet(A, bra1)
assert "must be Kets" in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
OperatorTimesKet(bra1, A)
assert "must be Kets" in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
BraKet(bra1, ket2)
assert "must be Kets" in str(exc_info.value)
with pytest.raises(TypeError) as exc_info:
KetBra(ket1, bra2)
assert "must be Kets" in str(exc_info.value)
i = IdxSym('i')
Psi = IndexedBase('Psi')
psi_i = KetSymbol(StrLabel(Psi[i]), hs=0)
with pytest.raises(TypeError) as exc_info:
KetIndexedSum(Bra(psi_i), IndexOverFockSpace(i, hs=0))
assert "must be Kets" in str(exc_info.value)
assert isinstance(
KetIndexedSum.create(Bra(psi_i), IndexOverFockSpace(i, hs=0)),
Bra)
|
mabuchilab/QNET
|
tests/algebra/test_state_algebra.py
|
Python
|
mit
| 18,851 | 0.000265 |
from __future__ import print_function
from __future__ import division
ALIGN_LEFT = '<'
ALIGN_CENTER = '_'
ALIGN_RIGHT = '>'
def pprint(data, header=None, dictorder=None, align=None, output_file=None):
if ((dict is type(data[0])) and (dictorder is None)):
dictorder = data[0].keys()
if ((dict is type(data[0])) and (header is None)):
header = data[0].keys()
(sdata, align) = makeStrings(data, dictorder, align)
(widths, percents) = calcSize(sdata, header)
output = ''
if header:
for i in range(len(header)):
output += ((('|' + (' ' * (((widths[i] - len(header[i])) // 2) + 1))) + header[i]) + (' ' * (((widths[i] - len(header[i])) // 2) + 1)))
if ((widths[i] - len(header[i])) % 2):
output += ' '
if percents[i]:
output += (' ' * (percents[i] - header[i].count('%')))
output += '|'
output += '\n'
for i in range(len(widths)):
output += ('+-' + ('-' * ((widths[i] + 1) + percents[i])))
output += '+'
output += '\n'
for j in range(len(sdata)):
d = sdata[j]
a = align[j]
for i in range(len(d)):
if (a[i] == ALIGN_RIGHT):
output += ((('|' + (' ' * ((widths[i] - len(d[i])) + 1))) + d[i]) + ' ')
elif (a[i] == ALIGN_CENTER):
output += ((('|' + (' ' * (((widths[i] - len(d[i])) // 2) + 1))) + d[i]) + (' ' * (((widths[i] - len(d[i])) // 2) + 1)))
if ((widths[i] - len(d[i])) % 2):
output += ' '
else:
output += (('| ' + d[i]) + (' ' * ((widths[i] - len(d[i])) + 1)))
if percents[i]:
output += (' ' * (percents[i] - d[i].count('%')))
output += '|'
output += '\n'
if output_file:
with open(output_file, 'wb') as output_handle:
output_handle.write(output)
else:
print(output, end='')
def makeStrings(data, dictOrder, align):
r = []
a = ([] if (align is None) else None)
for i in data:
c = []
ac = []
if dictOrder:
for k in dictOrder:
c += ([i[k]] if (unicode is type(i[k])) else [(str(i[k]) if (i[k] is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(i[k])) or (float is type(i[k])) or (long is type(i[k]))) else [ALIGN_LEFT])
else:
for k in i:
c += ([k] if (unicode is type(k)) else [(str(k) if (k is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(k)) or (float is type(k)) or (long is type(k))) else [ALIGN_LEFT])
r += [c]
if (a is not None):
a += [ac]
return (r, (a if (a is not None) else align))
def calcSize(data, header):
widths = range(len(data[0]))
percents = range(len(data[0]))
for i in widths:
widths[i] = 0
percents[i] = 0
if header:
for i in range(len(header)):
r = len(header[i])
if (r > widths[i]):
widths[i] = r
r = header[i].count('%')
if (r > percents[i]):
percents[i] = r
for d in data:
for i in range(len(d)):
r = len(d[i])
if (r > widths[i]):
widths[i] = r
r = d[i].count('%')
if (r > percents[i]):
percents[i] = r
return (widths, percents)
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/windows/sentinel/table_print.py
|
Python
|
unlicense
| 3,527 | 0.004536 |
# coding=utf-8
import os
import time
from gzip import GzipFile
from StringIO import StringIO
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.http import HttpResponse
from django.core.management import call_command
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import BackupImportForm
from .uploadhandler import TemporaryGzipFileUploadHandler
breadcrumbs = [
['warehouse.skill.views.home', 'главная'],
['warehouse.backup.views.home', 'резервное копирование'],
]
info = [
{
'view': 'warehouse.backup.views.export_gz',
'title': 'Экспорт',
'text': 'Позволяет сохранить данные из системы в файл для последующего восстановления.',
'cls': 'large-4',
},
{
'view': 'warehouse.backup.views.import_gz',
'title': 'Импорт',
'text': 'Позволяет восстановить данные из файла. Внимание! Все существующие записи будут безвозвратно утеряны!',
'cls': 'large-4',
},
]
##### HOME #####
@login_required
def home(request):
return render_to_response('backup/home.html', {'breadcrumbs': breadcrumbs, 'info': info}, RequestContext(request))
##### EXPORT #####
@login_required
def export_gz(request):
filename = 'skill__%s' % time.strftime('%Y%m%d_%H%M%S.gz')
response = HttpResponse(mimetype='application/force-download')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
with GzipFile(fileobj=response, mode='w', filename='skill.json') as gz_stream:
call_command('dumpdata', 'auth', 'skill', stdout=gz_stream, natural=True, indent=2)
return response
##### IMPORT #####
@login_required
@csrf_exempt
def import_gz(request):
# changing suffix to '.gz' for temp file names
request.upload_handlers = [TemporaryGzipFileUploadHandler()]
return _import_gz(request)
@csrf_protect
def _import_gz(request):
if request.method == 'POST':
form = BackupImportForm(request.POST, request.FILES)
if form.is_valid():
message = _process_file(request.FILES['file'])
messages.success(request, message)
return redirect('warehouse.backup.views.home')
else:
form = BackupImportForm()
cur = ['warehouse.backup.views.import_gz', 'импорт']
return render_to_response(
'backup/import.html',
{'form': form, 'breadcrumbs': breadcrumbs + [cur]},
RequestContext(request)
)
def _process_file(f):
file_path = f.temporary_file_path()
if not f.closed:
f.close()
stream = StringIO()
call_command('loaddata', file_path, stdout=stream)
message = stream.getvalue()
stream.close()
os.unlink(file_path)
return message
|
hva/warehouse
|
warehouse/backup/views.py
|
Python
|
mit
| 3,061 | 0.003495 |
#!/usr/bin/env python
import os
from slackclient import SlackClient
BOT_NAME = 'chopbot3000'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
|
baylesj/chopBot3000
|
scripts/print_bot_id.py
|
Python
|
mit
| 604 | 0 |
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Cloud Spanner Database."""
import copy
import functools
import grpc
import logging
import re
import threading
import google.auth.credentials
from google.api_core.retry import Retry
from google.api_core.retry import if_exception_type
from google.cloud.exceptions import NotFound
from google.api_core.exceptions import Aborted
from google.api_core import gapic_v1
from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest
from google.cloud.spanner_admin_database_v1 import Database as DatabasePB
from google.cloud.spanner_admin_database_v1 import EncryptionConfig
from google.cloud.spanner_admin_database_v1 import RestoreDatabaseEncryptionConfig
from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest
from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest
from google.cloud.spanner_v1 import ExecuteSqlRequest
from google.cloud.spanner_v1 import TransactionSelector
from google.cloud.spanner_v1 import TransactionOptions
from google.cloud.spanner_v1 import RequestOptions
from google.cloud.spanner_v1 import SpannerClient
from google.cloud.spanner_v1._helpers import _merge_query_options
from google.cloud.spanner_v1._helpers import _metadata_with_prefix
from google.cloud.spanner_v1.batch import Batch
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1.pool import BurstyPool
from google.cloud.spanner_v1.pool import SessionCheckout
from google.cloud.spanner_v1.session import Session
from google.cloud.spanner_v1.snapshot import _restart_on_unavailable
from google.cloud.spanner_v1.snapshot import Snapshot
from google.cloud.spanner_v1.streamed import StreamedResultSet
from google.cloud.spanner_v1.services.spanner.transports.grpc import (
SpannerGrpcTransport,
)
from google.cloud.spanner_v1.table import Table
SPANNER_DATA_SCOPE = "https://www.googleapis.com/auth/spanner.data"
_DATABASE_NAME_RE = re.compile(
r"^projects/(?P<project>[^/]+)/"
r"instances/(?P<instance_id>[a-z][-a-z0-9]*)/"
r"databases/(?P<database_id>[a-z][a-z0-9_\-]*[a-z0-9])$"
)
_DATABASE_METADATA_FILTER = "name:{0}/operations/"
_LIST_TABLES_QUERY = """SELECT TABLE_NAME
FROM INFORMATION_SCHEMA.TABLES
WHERE SPANNER_STATE = 'COMMITTED'
"""
DEFAULT_RETRY_BACKOFF = Retry(initial=0.02, maximum=32, multiplier=1.3)
class Database(object):
"""Representation of a Cloud Spanner Database.
We can use a :class:`Database` to:
* :meth:`create` the database
* :meth:`reload` the database
* :meth:`update` the database
* :meth:`drop` the database
:type database_id: str
:param database_id: The ID of the database.
:type instance: :class:`~google.cloud.spanner_v1.instance.Instance`
:param instance: The instance that owns the database.
:type ddl_statements: list of string
:param ddl_statements: (Optional) DDL statements, excluding the
CREATE DATABASE statement.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`.
:param pool: (Optional) session pool to be used by database. If not
passed, the database will construct an instance of
:class:`~google.cloud.spanner_v1.pool.BurstyPool`.
:type logger: :class:`logging.Logger`
:param logger: (Optional) a custom logger that is used if `log_commit_stats`
is `True` to log commit statistics. If not passed, a logger
will be created when needed that will log the commit statistics
to stdout.
:type encryption_config:
:class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig`
or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig`
or :class:`dict`
:param encryption_config:
(Optional) Encryption configuration for the database.
If a dict is provided, it must be of the same form as either of the protobuf
messages :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig`
or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig`
"""
_spanner_api = None
def __init__(
self,
database_id,
instance,
ddl_statements=(),
pool=None,
logger=None,
encryption_config=None,
):
self.database_id = database_id
self._instance = instance
self._ddl_statements = _check_ddl_statements(ddl_statements)
self._local = threading.local()
self._state = None
self._create_time = None
self._restore_info = None
self._version_retention_period = None
self._earliest_version_time = None
self._encryption_info = None
self._default_leader = None
self.log_commit_stats = False
self._logger = logger
self._encryption_config = encryption_config
if pool is None:
pool = BurstyPool()
self._pool = pool
pool.bind(self)
@classmethod
def from_pb(cls, database_pb, instance, pool=None):
"""Creates an instance of this class from a protobuf.
:type database_pb:
:class:`~google.cloud.spanner_admin_instance_v1.types.Instance`
:param database_pb: A instance protobuf object.
:type instance: :class:`~google.cloud.spanner_v1.instance.Instance`
:param instance: The instance that owns the database.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`.
:param pool: (Optional) session pool to be used by database.
:rtype: :class:`Database`
:returns: The database parsed from the protobuf response.
:raises ValueError:
if the instance name does not match the expected format
or if the parsed project ID does not match the project ID
on the instance's client, or if the parsed instance ID does
not match the instance's ID.
"""
match = _DATABASE_NAME_RE.match(database_pb.name)
if match is None:
raise ValueError(
"Database protobuf name was not in the " "expected format.",
database_pb.name,
)
if match.group("project") != instance._client.project:
raise ValueError(
"Project ID on database does not match the "
"project ID on the instance's client"
)
instance_id = match.group("instance_id")
if instance_id != instance.instance_id:
raise ValueError(
"Instance ID on database does not match the "
"Instance ID on the instance"
)
database_id = match.group("database_id")
return cls(database_id, instance, pool=pool)
@property
def name(self):
"""Database name used in requests.
.. note::
This property will not change if ``database_id`` does not, but the
return value is not cached.
The database name is of the form
``"projects/../instances/../databases/{database_id}"``
:rtype: str
:returns: The database name.
"""
return self._instance.name + "/databases/" + self.database_id
@property
def state(self):
"""State of this database.
:rtype: :class:`~google.cloud.spanner_admin_database_v1.types.Database.State`
:returns: an enum describing the state of the database
"""
return self._state
@property
def create_time(self):
"""Create time of this database.
:rtype: :class:`datetime.datetime`
:returns: a datetime object representing the create time of
this database
"""
return self._create_time
@property
def restore_info(self):
"""Restore info for this database.
:rtype: :class:`~google.cloud.spanner_v1.types.RestoreInfo`
:returns: an object representing the restore info for this database
"""
return self._restore_info
@property
def version_retention_period(self):
"""The period in which Cloud Spanner retains all versions of data
for the database.
:rtype: str
:returns: a string representing the duration of the version retention period
"""
return self._version_retention_period
@property
def earliest_version_time(self):
"""The earliest time at which older versions of the data can be read.
:rtype: :class:`datetime.datetime`
:returns: a datetime object representing the earliest version time
"""
return self._earliest_version_time
@property
def encryption_config(self):
"""Encryption config for this database.
:rtype: :class:`~google.cloud.spanner_admin_instance_v1.types.EncryptionConfig`
:returns: an object representing the encryption config for this database
"""
return self._encryption_config
@property
def encryption_info(self):
"""Encryption info for this database.
:rtype: a list of :class:`~google.cloud.spanner_admin_instance_v1.types.EncryptionInfo`
:returns: a list of objects representing encryption info for this database
"""
return self._encryption_info
@property
def default_leader(self):
"""The read-write region which contains the database's leader replicas.
:rtype: str
:returns: a string representing the read-write region
"""
return self._default_leader
@property
def ddl_statements(self):
"""DDL Statements used to define database schema.
See
cloud.google.com/spanner/docs/data-definition-language
:rtype: sequence of string
:returns: the statements
"""
return self._ddl_statements
@property
def logger(self):
"""Logger used by the database.
The default logger will log commit stats at the log level INFO using
`sys.stderr`.
:rtype: :class:`logging.Logger` or `None`
:returns: the logger
"""
if self._logger is None:
self._logger = logging.getLogger(self.name)
self._logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
self._logger.addHandler(ch)
return self._logger
@property
def spanner_api(self):
"""Helper for session-related API calls."""
if self._spanner_api is None:
client_info = self._instance._client._client_info
client_options = self._instance._client._client_options
if self._instance.emulator_host is not None:
transport = SpannerGrpcTransport(
channel=grpc.insecure_channel(self._instance.emulator_host)
)
self._spanner_api = SpannerClient(
client_info=client_info, transport=transport
)
return self._spanner_api
credentials = self._instance._client.credentials
if isinstance(credentials, google.auth.credentials.Scoped):
credentials = credentials.with_scopes((SPANNER_DATA_SCOPE,))
self._spanner_api = SpannerClient(
credentials=credentials,
client_info=client_info,
client_options=client_options,
)
return self._spanner_api
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
other.database_id == self.database_id and other._instance == self._instance
)
def __ne__(self, other):
return not self == other
def create(self):
"""Create this database within its instance
Includes any configured schema assigned to :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase
:rtype: :class:`~google.api_core.operation.Operation`
:returns: a future used to poll the status of the create request
:raises Conflict: if the database already exists
:raises NotFound: if the instance owning the database does not exist
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
db_name = self.database_id
if "-" in db_name:
db_name = "`%s`" % (db_name,)
if type(self._encryption_config) == dict:
self._encryption_config = EncryptionConfig(**self._encryption_config)
request = CreateDatabaseRequest(
parent=self._instance.name,
create_statement="CREATE DATABASE %s" % (db_name,),
extra_statements=list(self._ddl_statements),
encryption_config=self._encryption_config,
)
future = api.create_database(request=request, metadata=metadata)
return future
def exists(self):
"""Test whether this database exists.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL
:rtype: bool
:returns: True if the database exists, else false.
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
try:
api.get_database_ddl(database=self.name, metadata=metadata)
except NotFound:
return False
return True
def reload(self):
"""Reload this database.
Refresh any configured schema into :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDDL
:raises NotFound: if the database does not exist
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
response = api.get_database_ddl(database=self.name, metadata=metadata)
self._ddl_statements = tuple(response.statements)
response = api.get_database(name=self.name, metadata=metadata)
self._state = DatabasePB.State(response.state)
self._create_time = response.create_time
self._restore_info = response.restore_info
self._version_retention_period = response.version_retention_period
self._earliest_version_time = response.earliest_version_time
self._encryption_config = response.encryption_config
self._encryption_info = response.encryption_info
self._default_leader = response.default_leader
def update_ddl(self, ddl_statements, operation_id=""):
"""Update DDL for this database.
Apply any configured schema from :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase
:type ddl_statements: Sequence[str]
:param ddl_statements: a list of DDL statements to use on this database
:type operation_id: str
:param operation_id: (optional) a string ID for the long-running operation
:rtype: :class:`google.api_core.operation.Operation`
:returns: an operation instance
:raises NotFound: if the database does not exist
"""
client = self._instance._client
api = client.database_admin_api
metadata = _metadata_with_prefix(self.name)
request = UpdateDatabaseDdlRequest(
database=self.name, statements=ddl_statements, operation_id=operation_id,
)
future = api.update_database_ddl(request=request, metadata=metadata)
return future
def drop(self):
"""Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
api.drop_database(database=self.name, metadata=metadata)
def execute_partitioned_dml(
self,
dml,
params=None,
param_types=None,
query_options=None,
request_options=None,
):
"""Execute a partitionable DML statement.
:type dml: str
:param dml: DML statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type query_options:
:class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options:
(Optional) Query optimizer configuration to use for the given query.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.QueryOptions`
:type request_options:
:class:`google.cloud.spanner_v1.types.RequestOptions`
:param request_options:
(Optional) Common options for this request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.RequestOptions`.
Please note, the `transactionTag` setting will be ignored as it is
not supported for partitioned DML.
:rtype: int
:returns: Count of rows affected by the DML statement.
"""
query_options = _merge_query_options(
self._instance._client._query_options, query_options
)
if request_options is None:
request_options = RequestOptions()
elif type(request_options) == dict:
request_options = RequestOptions(request_options)
request_options.transaction_tag = None
if params is not None:
from google.cloud.spanner_v1.transaction import Transaction
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
params_pb = Transaction._make_params_pb(params, param_types)
else:
params_pb = {}
api = self.spanner_api
txn_options = TransactionOptions(
partitioned_dml=TransactionOptions.PartitionedDml()
)
metadata = _metadata_with_prefix(self.name)
def execute_pdml():
with SessionCheckout(self._pool) as session:
txn = api.begin_transaction(
session=session.name, options=txn_options, metadata=metadata
)
txn_selector = TransactionSelector(id=txn.id)
request = ExecuteSqlRequest(
session=session.name,
sql=dml,
transaction=txn_selector,
params=params_pb,
param_types=param_types,
query_options=query_options,
request_options=request_options,
)
method = functools.partial(
api.execute_streaming_sql, metadata=metadata,
)
iterator = _restart_on_unavailable(method, request)
result_set = StreamedResultSet(iterator)
list(result_set) # consume all partials
return result_set.stats.row_count_lower_bound
return _retry_on_aborted(execute_pdml, DEFAULT_RETRY_BACKOFF)()
def session(self, labels=None):
"""Factory to create a session for this database.
:type labels: dict (str -> str) or None
:param labels: (Optional) user-assigned labels for the session.
:rtype: :class:`~google.cloud.spanner_v1.session.Session`
:returns: a session bound to this database.
"""
return Session(self, labels=labels)
def snapshot(self, **kw):
"""Return an object which wraps a snapshot.
The wrapper *must* be used as a context manager, with the snapshot
as the value returned by the wrapper.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.TransactionOptions.ReadOnly
:type kw: dict
:param kw:
Passed through to
:class:`~google.cloud.spanner_v1.snapshot.Snapshot` constructor.
:rtype: :class:`~google.cloud.spanner_v1.database.SnapshotCheckout`
:returns: new wrapper
"""
return SnapshotCheckout(self, **kw)
def batch(self, request_options=None):
"""Return an object which wraps a batch.
The wrapper *must* be used as a context manager, with the batch
as the value returned by the wrapper.
:type request_options:
:class:`google.cloud.spanner_v1.types.RequestOptions`
:param request_options:
(Optional) Common options for the commit request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.RequestOptions`.
:rtype: :class:`~google.cloud.spanner_v1.database.BatchCheckout`
:returns: new wrapper
"""
return BatchCheckout(self, request_options)
def batch_snapshot(self, read_timestamp=None, exact_staleness=None):
"""Return an object which wraps a batch read / query.
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
:rtype: :class:`~google.cloud.spanner_v1.database.BatchSnapshot`
:returns: new wrapper
"""
return BatchSnapshot(
self, read_timestamp=read_timestamp, exact_staleness=exact_staleness
)
def run_in_transaction(self, func, *args, **kw):
"""Perform a unit of work in a transaction, retrying on abort.
:type func: callable
:param func: takes a required positional argument, the transaction,
and additional positional / keyword arguments as supplied
by the caller.
:type args: tuple
:param args: additional positional arguments to be passed to ``func``.
:type kw: dict
:param kw: (Optional) keyword arguments to be passed to ``func``.
If passed, "timeout_secs" will be removed and used to
override the default retry timeout which defines maximum timestamp
to continue retrying the transaction.
:rtype: Any
:returns: The return value of ``func``.
:raises Exception:
reraises any non-ABORT exceptions raised by ``func``.
"""
# Sanity check: Is there a transaction already running?
# If there is, then raise a red flag. Otherwise, mark that this one
# is running.
if getattr(self._local, "transaction_running", False):
raise RuntimeError("Spanner does not support nested transactions.")
self._local.transaction_running = True
# Check out a session and run the function in a transaction; once
# done, flip the sanity check bit back.
try:
with SessionCheckout(self._pool) as session:
return session.run_in_transaction(func, *args, **kw)
finally:
self._local.transaction_running = False
def restore(self, source):
"""Restore from a backup to this database.
:type source: :class:`~google.cloud.spanner_v1.backup.Backup`
:param source: the path of the source being restored from.
:rtype: :class:`~google.api_core.operation.Operation`
:returns: a future used to poll the status of the create request
:raises Conflict: if the database already exists
:raises NotFound:
if the instance owning the database does not exist, or
if the backup being restored from does not exist
:raises ValueError: if backup is not set
"""
if source is None:
raise ValueError("Restore source not specified")
if type(self._encryption_config) == dict:
self._encryption_config = RestoreDatabaseEncryptionConfig(
**self._encryption_config
)
if (
self.encryption_config
and self.encryption_config.kms_key_name
and self.encryption_config.encryption_type
!= RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION
):
raise ValueError("kms_key_name only used with CUSTOMER_MANAGED_ENCRYPTION")
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
request = RestoreDatabaseRequest(
parent=self._instance.name,
database_id=self.database_id,
backup=source.name,
encryption_config=self._encryption_config or None,
)
future = api.restore_database(request=request, metadata=metadata,)
return future
def is_ready(self):
"""Test whether this database is ready for use.
:rtype: bool
:returns: True if the database state is READY_OPTIMIZING or READY, else False.
"""
return (
self.state == DatabasePB.State.READY_OPTIMIZING
or self.state == DatabasePB.State.READY
)
def is_optimized(self):
"""Test whether this database has finished optimizing.
:rtype: bool
:returns: True if the database state is READY, else False.
"""
return self.state == DatabasePB.State.READY
def list_database_operations(self, filter_="", page_size=None):
"""List database operations for the database.
:type filter_: str
:param filter_:
Optional. A string specifying a filter for which database operations to list.
:type page_size: int
:param page_size:
Optional. The maximum number of operations in each page of results from this
request. Non-positive values are ignored. Defaults to a sensible value set
by the API.
:type: :class:`~google.api_core.page_iterator.Iterator`
:returns:
Iterator of :class:`~google.api_core.operation.Operation`
resources within the current instance.
"""
database_filter = _DATABASE_METADATA_FILTER.format(self.name)
if filter_:
database_filter = "({0}) AND ({1})".format(filter_, database_filter)
return self._instance.list_database_operations(
filter_=database_filter, page_size=page_size
)
def table(self, table_id):
"""Factory to create a table object within this database.
Note: This method does not create a table in Cloud Spanner, but it can
be used to check if a table exists.
.. code-block:: python
my_table = database.table("my_table")
if my_table.exists():
print("Table with ID 'my_table' exists.")
else:
print("Table with ID 'my_table' does not exist.")
:type table_id: str
:param table_id: The ID of the table.
:rtype: :class:`~google.cloud.spanner_v1.table.Table`
:returns: a table owned by this database.
"""
return Table(table_id, self)
def list_tables(self):
"""List tables within the database.
:type: Iterable
:returns:
Iterable of :class:`~google.cloud.spanner_v1.table.Table`
resources within the current database.
"""
with self.snapshot() as snapshot:
results = snapshot.execute_sql(_LIST_TABLES_QUERY)
for row in results:
yield self.table(row[0])
class BatchCheckout(object):
"""Context manager for using a batch from a database.
Inside the context manager, checks out a session from the database,
creates a batch from it, making the batch available.
Caller must *not* use the batch to perform API requests outside the scope
of the context manager.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database to use
:type request_options:
:class:`google.cloud.spanner_v1.types.RequestOptions`
:param request_options:
(Optional) Common options for the commit request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.RequestOptions`.
"""
def __init__(self, database, request_options=None):
self._database = database
self._session = self._batch = None
if request_options is None:
self._request_options = RequestOptions()
elif type(request_options) == dict:
self._request_options = RequestOptions(request_options)
else:
self._request_options = request_options
def __enter__(self):
"""Begin ``with`` block."""
session = self._session = self._database._pool.get()
batch = self._batch = Batch(session)
if self._request_options.transaction_tag:
batch.transaction_tag = self._request_options.transaction_tag
return batch
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
try:
if exc_type is None:
self._batch.commit(
return_commit_stats=self._database.log_commit_stats,
request_options=self._request_options,
)
finally:
if self._database.log_commit_stats and self._batch.commit_stats:
self._database.logger.info(
"CommitStats: {}".format(self._batch.commit_stats),
extra={"commit_stats": self._batch.commit_stats},
)
self._database._pool.put(self._session)
class SnapshotCheckout(object):
"""Context manager for using a snapshot from a database.
Inside the context manager, checks out a session from the database,
creates a snapshot from it, making the snapshot available.
Caller must *not* use the snapshot to perform API requests outside the
scope of the context manager.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database to use
:type kw: dict
:param kw:
Passed through to
:class:`~google.cloud.spanner_v1.snapshot.Snapshot` constructor.
"""
def __init__(self, database, **kw):
self._database = database
self._session = None
self._kw = kw
def __enter__(self):
"""Begin ``with`` block."""
session = self._session = self._database._pool.get()
return Snapshot(session, **self._kw)
def __exit__(self, exc_type, exc_val, exc_tb):
"""End ``with`` block."""
self._database._pool.put(self._session)
class BatchSnapshot(object):
"""Wrapper for generating and processing read / query batches.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database to use
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
"""
def __init__(self, database, read_timestamp=None, exact_staleness=None):
self._database = database
self._session = None
self._snapshot = None
self._read_timestamp = read_timestamp
self._exact_staleness = exact_staleness
@classmethod
def from_dict(cls, database, mapping):
"""Reconstruct an instance from a mapping.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database to use
:type mapping: mapping
:param mapping: serialized state of the instance
:rtype: :class:`BatchSnapshot`
"""
instance = cls(database)
session = instance._session = database.session()
session._session_id = mapping["session_id"]
snapshot = instance._snapshot = session.snapshot()
snapshot._transaction_id = mapping["transaction_id"]
return instance
def to_dict(self):
"""Return state as a dictionary.
Result can be used to serialize the instance and reconstitute
it later using :meth:`from_dict`.
:rtype: dict
"""
session = self._get_session()
snapshot = self._get_snapshot()
return {
"session_id": session._session_id,
"transaction_id": snapshot._transaction_id,
}
def _get_session(self):
"""Create session as needed.
.. note::
Caller is responsible for cleaning up the session after
all partitions have been processed.
"""
if self._session is None:
session = self._session = self._database.session()
session.create()
return self._session
def _get_snapshot(self):
"""Create snapshot if needed."""
if self._snapshot is None:
self._snapshot = self._get_session().snapshot(
read_timestamp=self._read_timestamp,
exact_staleness=self._exact_staleness,
multi_use=True,
)
self._snapshot.begin()
return self._snapshot
def read(self, *args, **kw):
"""Convenience method: perform read operation via snapshot.
See :meth:`~google.cloud.spanner_v1.snapshot.Snapshot.read`.
"""
return self._get_snapshot().read(*args, **kw)
def execute_sql(self, *args, **kw):
"""Convenience method: perform query operation via snapshot.
See :meth:`~google.cloud.spanner_v1.snapshot.Snapshot.execute_sql`.
"""
return self._get_snapshot().execute_sql(*args, **kw)
def generate_read_batches(
self,
table,
columns,
keyset,
index="",
partition_size_bytes=None,
max_partitions=None,
*,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
):
"""Start a partitioned batch read operation.
Uses the ``PartitionRead`` API request to initiate the partitioned
read. Returns a list of batch information needed to perform the
actual reads.
:type table: str
:param table: name of the table from which to fetch data
:type columns: list of str
:param columns: names of columns to be retrieved
:type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet`
:param keyset: keys / ranges identifying rows to be retrieved
:type index: str
:param index: (Optional) name of index to use, rather than the
table's primary key
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry: (Optional) The retry settings for this request.
:type timeout: float
:param timeout: (Optional) The timeout for this request.
:rtype: iterable of dict
:returns:
mappings of information used perform actual partitioned reads via
:meth:`process_read_batch`.
"""
partitions = self._get_snapshot().partition_read(
table=table,
columns=columns,
keyset=keyset,
index=index,
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
retry=retry,
timeout=timeout,
)
read_info = {
"table": table,
"columns": columns,
"keyset": keyset._to_dict(),
"index": index,
}
for partition in partitions:
yield {"partition": partition, "read": read_info.copy()}
def process_read_batch(
self, batch, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT,
):
"""Process a single, partitioned read.
:type batch: mapping
:param batch:
one of the mappings returned from an earlier call to
:meth:`generate_read_batches`.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry: (Optional) The retry settings for this request.
:type timeout: float
:param timeout: (Optional) The timeout for this request.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
kwargs = copy.deepcopy(batch["read"])
keyset_dict = kwargs.pop("keyset")
kwargs["keyset"] = KeySet._from_dict(keyset_dict)
return self._get_snapshot().read(
partition=batch["partition"], **kwargs, retry=retry, timeout=timeout
)
def generate_query_batches(
self,
sql,
params=None,
param_types=None,
partition_size_bytes=None,
max_partitions=None,
query_options=None,
*,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
):
"""Start a partitioned query operation.
Uses the ``PartitionQuery`` API request to start a partitioned
query operation. Returns a list of batch information needed to
perform the actual queries.
:type sql: str
:param sql: SQL query statement
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``sql``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type partition_size_bytes: int
:param partition_size_bytes:
(Optional) desired size for each partition generated. The service
uses this as a hint, the actual partition size may differ.
:type max_partitions: int
:param max_partitions:
(Optional) desired maximum number of partitions generated. The
service uses this as a hint, the actual number of partitions may
differ.
:type query_options:
:class:`~google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions`
or :class:`dict`
:param query_options:
(Optional) Query optimizer configuration to use for the given query.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.QueryOptions`
:type retry: :class:`~google.api_core.retry.Retry`
:param retry: (Optional) The retry settings for this request.
:type timeout: float
:param timeout: (Optional) The timeout for this request.
:rtype: iterable of dict
:returns:
mappings of information used perform actual partitioned reads via
:meth:`process_read_batch`.
"""
partitions = self._get_snapshot().partition_query(
sql=sql,
params=params,
param_types=param_types,
partition_size_bytes=partition_size_bytes,
max_partitions=max_partitions,
retry=retry,
timeout=timeout,
)
query_info = {"sql": sql}
if params:
query_info["params"] = params
query_info["param_types"] = param_types
# Query-level options have higher precedence than client-level and
# environment-level options
default_query_options = self._database._instance._client._query_options
query_info["query_options"] = _merge_query_options(
default_query_options, query_options
)
for partition in partitions:
yield {"partition": partition, "query": query_info}
def process_query_batch(
self, batch, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT,
):
"""Process a single, partitioned query.
:type batch: mapping
:param batch:
one of the mappings returned from an earlier call to
:meth:`generate_query_batches`.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry: (Optional) The retry settings for this request.
:type timeout: float
:param timeout: (Optional) The timeout for this request.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
"""
return self._get_snapshot().execute_sql(
partition=batch["partition"], **batch["query"], retry=retry, timeout=timeout
)
def process(self, batch):
"""Process a single, partitioned query or read.
:type batch: mapping
:param batch:
one of the mappings returned from an earlier call to
:meth:`generate_query_batches`.
:rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet`
:returns: a result set instance which can be used to consume rows.
:raises ValueError: if batch does not contain either 'read' or 'query'
"""
if "query" in batch:
return self.process_query_batch(batch)
if "read" in batch:
return self.process_read_batch(batch)
raise ValueError("Invalid batch")
def close(self):
"""Clean up underlying session.
.. note::
If the transaction has been shared across multiple machines,
calling this on any machine would invalidate the transaction
everywhere. Ideally this would be called when data has been read
from all the partitions.
"""
if self._session is not None:
self._session.delete()
def _check_ddl_statements(value):
"""Validate DDL Statements used to define database schema.
See
https://cloud.google.com/spanner/docs/data-definition-language
:type value: list of string
:param value: DDL statements, excluding the 'CREATE DATABASE' statement
:rtype: tuple
:returns: tuple of validated DDL statement strings.
:raises ValueError:
if elements in ``value`` are not strings, or if ``value`` contains
a ``CREATE DATABASE`` statement.
"""
if not all(isinstance(line, str) for line in value):
raise ValueError("Pass a list of strings")
if any("create database" in line.lower() for line in value):
raise ValueError("Do not pass a 'CREATE DATABASE' statement")
return tuple(value)
def _retry_on_aborted(func, retry_config):
"""Helper for :meth:`Database.execute_partitioned_dml`.
Wrap function in a Retry that will retry on Aborted exceptions
with the retry config specified.
:type func: callable
:param func: the function to be retried on Aborted exceptions
:type retry_config: Retry
:param retry_config: retry object with the settings to be used
"""
retry = retry_config.with_predicate(if_exception_type(Aborted))
return retry(func)
|
googleapis/python-spanner
|
google/cloud/spanner_v1/database.py
|
Python
|
apache-2.0
| 45,568 | 0.000812 |
# urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
kristerhedfors/xnet
|
xnet/packages/urllib3/contrib/ntlmpool.py
|
Python
|
bsd-3-clause
| 4,740 | 0 |
"""
This config file runs the simplest dev environment"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
DEBUG = True
TEMPLATE_DEBUG = DEBUG
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
dev_env=True,
debug=True)
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
'fs_root': GITHUB_REPO_ROOT,
'render_template': 'mitxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'OPTIONS': modulestore_options
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'OPTIONS': modulestore_options
}
}
# cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store
# This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'OPTIONS': {
'host': 'localhost',
'db': 'xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "mitx.db",
}
}
LMS_BASE = "10.129.50.13:8000"
MITX_FEATURES['PREVIEW_LMS_BASE'] = "10.129.50.13:8000"
REPOS = {
'edx4edx': {
'branch': 'master',
'origin': 'git@github.com:MITx/edx4edx.git',
},
'content-mit-6002x': {
'branch': 'master',
# 'origin': 'git@github.com:MITx/6002x-fall-2012.git',
'origin': 'git@github.com:MITx/content-mit-6002x.git',
},
'6.00x': {
'branch': 'master',
'origin': 'git@github.com:MITx/6.00x.git',
},
'7.00x': {
'branch': 'master',
'origin': 'git@github.com:MITx/7.00x.git',
},
'3.091x': {
'branch': 'master',
'origin': 'git@github.com:MITx/3.091x.git',
},
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'mitx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
################################ PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = True
# disable NPS survey in dev mode
MITX_FEATURES['STUDIO_NPS_SURVEY'] = False
# Enable URL that shows information about the status of variuous services
MITX_FEATURES['ENABLE_SERVICE_STATUS'] = True
############################# SEGMENT-IO ##################################
# If there's an environment variable set, grab it and turn on Segment.io
# Note that this is the Studio key. There is a separate key for the LMS.
import os
SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
MITX_FEATURES['SEGMENT_IO'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
|
IITBinterns13/edx-platform-dev
|
cms/envs/dev.py
|
Python
|
agpl-3.0
| 6,301 | 0.002063 |
"""
Polygon path.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
import math
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/02/05 $"
__license__ = 'GPL 3.0'
def getGeometryOutput(xmlElement):
"Get vector3 vertexes from attribute dictionary."
radius = lineation.getRadiusComplex(complex(1.0, 1.0), xmlElement)
sides = evaluate.getSidesMinimumThreeBasedOnPrecisionSides(max(radius.real, radius.imag), xmlElement)
loop = []
start = evaluate.getEvaluatedFloatZero('start', xmlElement)
start = getWrappedFloat(start, 360.0)
extent = evaluate.getEvaluatedFloatDefault(360.0 - start, 'extent', xmlElement)
end = evaluate.getEvaluatedFloatDefault(start + extent, 'end', xmlElement)
end = getWrappedFloat(end, 360.0)
revolutions = evaluate.getEvaluatedFloatOne('revolutions', xmlElement)
if revolutions > 1:
end += 360.0 * (revolutions - 1)
angleTotal = math.radians(start)
extent = end - start
sidesCeiling = int(math.ceil(abs(sides) * extent / 360.0))
sideAngle = math.radians(extent) / sidesCeiling
spiral = lineation.Spiral(0.5 * sideAngle / math.pi, xmlElement)
for side in xrange(sidesCeiling + (extent != 360.0)):
unitPolar = euclidean.getWiddershinsUnitPolar(angleTotal)
vertex = spiral.getSpiralPoint(unitPolar, Vector3(unitPolar.real * radius.real, unitPolar.imag * radius.imag))
angleTotal += sideAngle
loop.append(vertex)
sideLength = sideAngle * lineation.getAverageRadius(radius)
lineation.setClosedAttribute(revolutions, xmlElement)
return lineation.getGeometryOutputByLoop(lineation.SideLoop(loop, sideAngle, sideLength), xmlElement)
def getGeometryOutputByArguments(arguments, xmlElement):
"Get vector3 vertexes from attribute dictionary by arguments."
evaluate.setAttributeDictionaryByArguments(['radius', 'start', 'end', 'revolutions'], arguments, xmlElement)
return getGeometryOutput(xmlElement)
def getWrappedFloat(floatValue, modulo):
"Get wrapped float."
if floatValue >= modulo:
return modulo
if floatValue >= 0:
return floatValue
return floatValue % modulo
def processXMLElement(xmlElement):
"Process the xml element."
lineation.processXMLElementByGeometry(getGeometryOutput(xmlElement), xmlElement)
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-31/fabmetheus_utilities/geometry/creation/circle.py
|
Python
|
gpl-2.0
| 2,658 | 0.017682 |
import operator
import numpy as np
import sys
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@fmi.fi
Finnish Meteorological Institute
'''
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readAsciiGridHeader( filename, nHeaderEntries, idx=0 ):
fl = open( filename , 'r')
name = filename.strip('.asc') # Extract the tile name.
hdict = {'id':idx,'name': name }
# 'ncols': None
# 'nrows': None
# 'xllcorner': None
# 'yllcorner': None
# 'cellsize' : None
# 'NODATA_value' : None
for i in range(nHeaderEntries):
try:
s = fl.readline().lower().split()
print(' Header line {}: {} '.format(i,s))
hdict[s[0]] = float( s[1] )
except:
print('Unexpected ascii grid header format. Exiting.')
sys.exit(1)
hdict = asciiCenterToCorner( hdict )
idx += 1
fl.close()
return hdict, idx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def asciiCenterToCorner( xdict ):
# Resolve the issue involving xllcorner vs xllcenter. Both are allowed by ESRI ASCII standard
if( 'xllcenter' in xdict.keys() ):
xdict['xllcorner'] = xdict['xllcenter'] - xdict['cellsize']/2.
if( 'yllcenter' in xdict.keys() ):
xdict['yllcorner'] = xdict['yllcenter'] - xdict['cellsize']/2.
return xdict
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readAsciiGrid( filename, nHeaderEntries ):
try:
rx = np.loadtxt( filename, skiprows=nHeaderEntries ) # Note! skiprows=6.
print(' File {} read successfully.'.format(filename))
except:
print(' Could not read ascii grid file: {}. Exiting.'.format(filename))
sys.exit(1)
return rx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def asciiTileToNumpyZ(filename, nHeaderEntries, idx=0):
Rdict, idx = readAsciiGridHeader( filename, nHeaderEntries, idx )
R = readAsciiGrid( filename, nHeaderEntries )
Rdict['id'] = idx
Rdict['ytlcorner'] = Rdict['yllcorner'] + Rdict['cellsize']* Rdict['nrows']
Rdict['xtlcorner'] = Rdict['xllcorner']
# These are ofter used later.
Rdict['GlobOrig'] = np.array([ Rdict['ytlcorner'], Rdict['xtlcorner']]) # [N,E]
Rdict['dPx'] = np.array([ Rdict['cellsize'], Rdict['cellsize'] ])
Rdict['R'] = R
saveTileAsNumpyZ( filename.strip('.asc'), Rdict )
return Rdict
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readNumpyZGridData( filename, idx=0 ):
Rdict = readNumpyZTile(filename, dataOnly=True)
Rxdims=np.array(np.shape(Rdict['R']))
Rdict['R'] = []
RxOrig=Rdict['GlobOrig']
dPx=Rdict['dPx']
name = filename.strip('.npz') # Extract the tile name.
hdict = {'id':idx,'name': name, 'ncols':Rxdims[1],'nrows':Rxdims[0],\
'xtlcorner':RxOrig[1],'ytlcorner':RxOrig[0],\
'cellsize':int(dPx[0]),'nodata_value':None}
idx += 1
return hdict, idx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def resolutionFromDicts( dictList ):
d1 = dictList[0]
dPxRef = d1['cellsize']
for d in dictList:
dPx = d['cellsize']
if( dPx != dPxRef ):
print('The tile resolutions do not match. Exiting.')
sys.exit(1)
return np.array([dPxRef,dPxRef])
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def arrangeTileGrid( dictList, fileTypes ):
coordList = []
ascii = fileTypes[0]; npz = fileTypes[1]
XO_TL = np.zeros(2) # Initialize the Top Left Origin.
for d in dictList:
# The last two indecies are for row / col addresses.
if( ascii ):
coordList.append( [d['id'], d['xllcorner'], d['yllcorner'], 0, 0] )
else: # .npz
coordList.append( [d['id'], d['xtlcorner'], d['ytlcorner'], 0, 0] )
# Sort the list according to y-values
coordListSorted = sorted( coordList, key=operator.itemgetter(2) )
#print(' CoordList y-sorted : {} '.format( coordListSorted ))
# Determine the Top Left Origin (y-value).
ltmp = coordListSorted[-1] # Take the last entry.
dtmp = dictList[ltmp[0]] # Fetch the desired dict. ltmp[0] == id.
if( ascii ):
XO_TL[0]= dtmp['yllcorner']+dtmp['nrows']*dtmp['cellsize']
else:
XO_TL[0]= dtmp['ytlcorner']
irow = 0; maxVal = 0.
for t in reversed(coordListSorted):
if( t[2] >= maxVal ): # t[2] := y-cord.
pass
else:
irow+=1 # Change row
t[3] = irow; maxVal = t[2]
imax = irow+1 # Store the number of rows.
# Sort the list according to x-values
coordListSorted = sorted( coordList, key=operator.itemgetter(1) )
#print(' x-sorted : {} '.format( coordListSorted ))
# Determine the Top Left Origin (x-value).
# This is the same for xllcorner and xtlcorner.
ltmp = coordListSorted[0]
dtmp = dictList[ltmp[0]]
if( ascii ):
XO_TL[1]= dtmp['xllcorner']
else:
XO_TL[1]= dtmp['xtlcorner']
jcol = 0; minVal = 1.e12
for t in coordListSorted:
if( t[1] <= minVal ): # t[1] := x-cord.
pass
else:
jcol+=1 # Change column
t[4] = jcol; minVal = t[1]
jmax = jcol+1 # Store the number of columns
ijList = []
for t in coordListSorted:
ijList.append( [ t[0], t[3], t[4] ] )# id, irow, jcol
return ijList, XO_TL, imax, jmax
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def minMaxCoords( xdict , fileTypes ):
asc = fileTypes[0]; npz = fileTypes[1]
if( asc ):
xn = xdict['xllcorner']
yx = xdict['yllcorner']
xx = xdict['xllcorner']+xdict['ncols']*xdict['cellsize']
yn = xdict['yllcorner']-xdict['nrows']*xdict['cellsize']
else:
xn = xdict['xtlcorner']
yx = xdict['ytlcorner']
xx = xdict['xtlcorner']
yn = xdict['ytlcorner']
return xn, xx, yn, yx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def compileTileGrid( dictList, ijList, Mrows, Mcols, fileTypes, nHeaderEntries ):
M = [] # An empty array to start with.
ascii = fileTypes[0]; npz = fileTypes[1]
for i in range(Mrows):
for j in range(Mcols):
for idTile, irow, jcol in ijList:
if(irow == i and jcol == j ):
d = dictList[idTile]
if( ascii ):
r = readAsciiGrid( d['name']+'.asc', nHeaderEntries )
elif( npz ):
Rdict = readNumpyZTile(d['name']+'.npz')
r=Rdict['R']
Rdict = None # Throw the rest away.
M.append(r); r = None
print(' M.shape = {}'.format(np.shape(M)))
T = None
for i in range(Mrows):
c1 = i*Mcols; c2 = (i+1)*Mcols
print('c1={}, c2={}'.format(c1,c2))
if( T is None ):
T = np.hstack(M[c1:c2])
else:
T = np.vstack( (T,np.hstack(M[c1:c2])) )
print(' np.shape(T) = {}'.format(np.shape(T)))
M = None
Rdict = {'R' : T}
return Rdict
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def saveTileAsNumpyZ( filename, Rdict):
'''
The saved npz file doesn't contain Rdict, but separate numpy arrays matching key names.
Therefore np.load(filename) is equal to the saved Rdict.
'''
try:
np.savez_compressed(filename, **Rdict)
print(' {} saved successfully!'.format(filename))
except:
print(' Error in saving {}.npz in saveTileAsNumpyZ().'.format(filename.strip('.npz')))
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def initRdict(Rdict, R=None, dPx=None ):
if( R is not None ): Rdict['R'] = R
if( dPx is not None ): Rdict['dPx'] = dPx
if( 'GlobOrig' not in Rdict ):
Rdict['GlobOrig'] = np.array( [0.,0.] )
if('gridRot' not in Rdict ):
Rdict['gridRot'] = 0.0
if( ('Rdims' not in Rdict) and (R is not None) ):
Rdict['Rdims'] = np.array( R.shape )
return Rdict
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def checkDictFormat( Rdict ):
# Backwards compatibility for variable name change.
if(not('gridRot' in Rdict)): Rdict['gridRot'] = 0.0
if ('XOrig' in Rdict and not('GlobOrig' in Rdict)):
Rdict['GlobOrig']=Rdict['XOrig']
# For some reason dPx arrays were saved as 'dpx' in the past hardcoded versions of saveTileAsNumpyZ.
if ('dpx' in Rdict and not('dPx' in Rdict)):
Rdict['dPx']=Rdict['dpx']
# Add bottom left origin only if the transformation is trivial (i.e. no rotation required).
# Otherwise the extractDomainFromTile.py script ought to be used.
if(not('GlobOrigBL' in Rdict) and Rdict['gridRot']==0.0):
# Check also we're dealing with raster format numpy array.
if(('GlobOrig' in Rdict) and ('dPx' in Rdict) and ('R' in Rdict)):
BLN = Rdict['GlobOrig'][0] + Rdict['dPx'][0]*np.shape(Rdict['R'])[0]
BLE = Rdict['GlobOrig'][1]
Rdict['GlobOrigBL'] = np.array([ BLN , BLE ])
return Rdict
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readNumpyZTile( filename, dataOnly=False, verbose=True):
if (verbose):
print(' Read filename {} '.format(filename))
# dat must be closed to avoid leaking file descriptors.
try:
dat = np.load(filename)
Rdict = dict(dat)
dat.close()
except IOError as e:
print('Error reading file {0}: {1}'.format(filename, e.strerror))
sys.exit(e.errno)
#if(dataOnly):
#Rdict['R'] = []
Rdict = checkDictFormat( Rdict )
return Rdict
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readNumpyZTileForMesh( filename ):
Rdict = readNumpyZTile( filename )
Rx = Rdict['R']
Rxdims = np.array(np.shape(Rx))
RxOrig = Rdict['GlobOrig']
dPx = Rdict['dPx']
try:
gridRot = Rdict['gridRot']
except:
gridRot = Rdict['gridRot'] = 0
# N,E - coords, start from top left.
# Sometimes the dPx[0] is correctly negative, but sometimes not.
# Here, we need to make sure it's own sign is irrelevant
dPN = np.abs(dPx[0]); dPE = np.abs(dPx[1])
Rdict['rowCoords'] = np.arange(RxOrig[0],(RxOrig[0]-Rxdims[0]*dPN),-dPN) # N
Rdict['colCoords'] = np.arange(RxOrig[1],(RxOrig[1]+Rxdims[1]*dPE), dPE) # E
#Nx, Ex = np.meshgrid(ni,ej)
return Rdict
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def rotateGridAroundPivot( X, Y, xp, yp, theta, deg=True ):
if( deg ):
theta = theta * (np.pi/180.)
CtX = np.array([ np.cos(theta), -np.sin(theta) ])
CtY = np.array([ np.sin(theta) , np.cos(theta) ])
#print(' CtX = {} , CtY = {} '.format(CtX, CtY))
Mdims = np.shape(X)
XR = np.zeros( Mdims, float )
YR = np.zeros( Mdims, float )
for i in range( Mdims[0] ):
XR[i,:] = xp + (X[i,:]-xp)*CtX[0] + (Y[i,:]-yp)*CtX[1] # E-X
YR[i,:] = yp + (X[i,:]-xp)*CtY[0] + (Y[i,:]-yp)*CtY[1] # N-Y
return XR, YR
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def rotatePoint(pivot, point, angle):
# Simple 2D rotation matrix
rotatedPoint = np.zeros(2)
rotatedPoint[1] = pivot[1] + np.cos(angle) * (point[1] - pivot[1]) - np.sin(angle) * (point[0] - pivot[0])
rotatedPoint[0] = pivot[0] + np.sin(angle) * (point[1] - pivot[1]) + np.cos(angle) * (point[0] - pivot[0])
return rotatedPoint
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def entry2Int( ax ):
try:
ax = np.mean(np.abs(ax))
except:
pass
return int(ax)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def marginIds( Rxdims, Mw ):
Li = np.zeros(2, int); Ri = Li.copy(); Bi = Li.copy(); Ti = Li.copy()
Li[0]= 0
Li[1]= max( int( np.ceil(Mw[0]*Rxdims[1]-1) ), 1 ) # These can never be -1.
Ri[0]= min( int((1.-Mw[1])*Rxdims[1]+1), Rxdims[1]-1 )
Ri[1]= Rxdims[1]
Bi[0]= min( int((1.-Mw[2])*Rxdims[0]+1), Rxdims[0]-1 )
Bi[1]= Rxdims[0]
Ti[0]= 0
Ti[1]= max( int( np.ceil(Mw[3]*Rxdims[0]-1) ), 1 ) # These can never be -1.
return Li, Ri, Bi, Ti
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def applyMargins( Rx, Mw, Mr, Mh ):
Rxdims = np.shape(Rx)
if( Mw.count(None) == 0 ):
print(' Zero (or non-zero) margins: L={}, R={}, B={}, T={}'.format(Mw[0],Mw[1],Mw[2],Mw[3]))
L12, R12, B12, T12 = marginIds( Rxdims, Mw )
L1 = L12[0]; L2 = L12[1]
R1 = R12[0]; R2 = R12[1]
B1 = B12[0]; B2 = B12[1]
T1 = T12[0]; T2 = T12[1]
#print('Margin\nL:{},{},R:{},{},T:{},{},B:{},{}'.format(L1,L2,R1,R2,T1,T2,B1,B2))
if( not all( L12 == 0 ) ): Rx[:,L1:L2] = Mh[0]
if( not all( R12 == 0 ) ): Rx[:,R1:R2] = Mh[1]
if( not all( T12 == 0 ) ): Rx[T1:T2,:] = Mh[3]
if( not all( B12 == 0 ) ): Rx[B1:B2,:] = Mh[2]
else:
L1=0; L2=1
R1=Rxdims[1]-1; R2=Rxdims[1]
B1=Rxdims[0]-1; B2=Rxdims[0]
T1=0; T2=1
if( Mr.count(None) == 0 ):
print(' Ramp margins: L={}, R={}, B={}, T={}'.format(Mr[0],Mr[1],Mr[2],Mr[3]))
dL = int(Mr[0]*Rxdims[1]); dR = int(Mr[1]*Rxdims[1])
dB = int(Mr[2]*Rxdims[0]); dT = int(Mr[3]*Rxdims[0])
L11 = max(L2-1,0) ; L22 = L2+dL
R11 = R1-dR ; R22 = min(R1+1, Rxdims[1])
B11 = B1-dB ; B22 = min(B1+1, Rxdims[0])
T11 = max(T2-1,0) ; T22 = T2+dT
#print('Ramp\nL:{},{},R:{},{},T:{},{},B:{},{}'.format(L11,L22,R11,R22,T11,T22,B11,B22))
if( dL != 0 ):
if( (Mw[0] is None) or (Mw[0] ==0.) ):
Rx = applyRamp( Rx, L11, L22, 1, 0, Mh )
else:
Rx = applyRamp( Rx, L11, L22, 1, 0 )
if( dR != 0 ):
if( (Mw[1] is None) or (Mw[1] ==0.) ):
Rx = applyRamp( Rx, R11, R22, 1, 1, Mh )
else:
Rx = applyRamp( Rx, R11, R22, 1, 1 )
if( dB != 0 ):
if( (Mw[2] is None) or (Mw[2] ==0.) ):
Rx = applyRamp( Rx, B11, B22, 0, 1, Mh )
else:
Rx = applyRamp( Rx, B11, B22, 0, 1 )
if( dT != 0 ):
if( (Mw[3] is None) or (Mw[3] ==0.) ):
Rx = applyRamp( Rx, T11, T22, 0, 0, Mh )
else:
Rx = applyRamp( Rx, T11, T22, 0, 0 )
return Rx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def applyRamp( Rz, L1, L2, LeftRight, End, Mh=None ):
dL = (L2-L1)
w = np.arange( L1, L2 ).astype(float)
w -= np.min(w); w /= np.max(w)
w *= np.pi ; w -= (np.pi/2.)
w = np.sin(w)/2. + 0.5
if ( LeftRight and not End ): # Left
if( Mh is None ): Rm = Rz[:,L1]
else: Rm = Mh[0]
#
elif( LeftRight and End ): # Right
if( Mh is None ): Rm = Rz[:,L2]
else: Rm = Mh[1]
elif( not LeftRight and End ): # Bottom
if( Mh is None ): Rm = Rz[L2,:]
else: Rm = Mh[2]
else: # Top
if( Mh is None ): Rm = Rz[L1,:]
else: Rm = Mh[3]
if( End ):
w = (1.-w)
#print(' w = {}, len(w) = {}, len(dL) = {}'.format(w,len(w),dL))
if( LeftRight ):
for i in range(dL):
Rz[:,L1+i] = w[i]*Rz[:,L1+i] + (1.-w[i])*Rm
else: # TopBottom
for i in range(dL):
Rz[L1+i,:] = w[i]*Rz[L1+i,:] + (1.-w[i])*Rm
return Rz
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def filterAndScale(Rxo, Rx, filterInfo, sx=1.0, ix=None, jx=None):
# Check if the indecies are explicitly given.
inxOn = True
if( ix is None or jx is None ):
inxOn = False
if( filterInfo.count(None) == 0):
if( 'user' in filterInfo[0] ):
nI = int(filterInfo[1])
for i in range(nI):
ftmp = raw_input(' Enter <method>, <num> = ').split(',')
if( i == 0 and inxOn ): Rxf = applyFilter(Rx[ix,jx], ftmp)
else: Rxf = applyFilter(Rx, ftmp)
Rx = Rxf.copy()
Rx = None
else:
if( inxOn ): Rxf = applyFilter(Rx[ix,jx], filterInfo)
else: Rxf = applyFilter(Rx, filterInfo)
Rx = None
Rxo += sx*Rxf
else:
if( inxOn ):
Rxo += sx*Rx[ix,jx]
else:
Rxo += sx*Rx
return Rxo
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def applyFilter(Rx, filterInfo ):
import scipy.ndimage as sn # contains the filters
if( 'gauss' in filterInfo[0] ):
try:
Nf = float(filterInfo[1])
except:
print(' Failed to obtain <sigma> for the Gaussian filter. Exiting.')
sys.exit(1)
else:
try:
Nf = int(filterInfo[1])
except:
print(' Failed to obtain <size> for the filters. Exiting.')
sys.exit(1)
if( 'median' in filterInfo[0] ):
print(' Median {0}x{0} filter applied. '.format(Nf))
Rf = sn.median_filter(Rx, size=Nf)
elif( 'perc' in filterInfo[0] ):
print(' Percentile 60 {0}x{0} filter applied. '.format(Nf))
Rf = sn.percentile_filter(Rx, 60, size=Nf)
elif( 'rank' in filterInfo[0] ):
print(' Rank 5 {0}x{0} filter applied. '.format(Nf))
Rf = sn.rank_filter(Rx, 5, size=Nf)
elif( 'gauss' in filterInfo[0] ):
print(' Gaussian sigma={} filter applied. '.format(Nf))
Rf = sn.gaussian_filter(Rx, sigma=Nf)
elif( 'local' in filterInfo[0] ):
print(' Local mean {0}x{0} filter applied. '.format(Nf))
Rf = sn.uniform_filter(Rx, size=Nf)
elif( 'max' in filterInfo[0] ):
print('Max {0}x{0} filter applied. '.format(Nf))
Rf = sn.maximum_filter(Rx, size=Nf)
else:
print(' No filter applied. ')
Rf = Rx
return Rf
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def labelRaster(R, maskId=None):
import scipy.ndimage.measurements as snms
Rm = np.zeros( R.shape, type(R[0,0]) )
if( maskId is not None ):
mIds = list()
if( isinstance( maskId, list) ):
mIds.extend(maskId)
elif( isinstance( maskId, int ) ):
mIds.append(maskId)
else:
sys.exit(' Error in labelRaster: maskId is not a list or int. It is {}'.format(type(maskId)))
idx = np.zeros( R.shape , bool )
for im in mIds:
idx = np.maximum( idx , (R == im ) )
Rm[idx] = R[idx] # Set desired mask values
Rl, shapeCount = snms.label(Rm) # this might be slow for unfiltered data
Rm = None
print(' Found {} shapes from the data.'.format(shapeCount))
return Rl, shapeCount
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def splitLabels(R, shapeCount, labelsize):
import scipy.ndimage as sn
for i in range(1,shapeCount+1):
nlabels = np.count_nonzero(R==i)
if (nlabels > labelsize):
print(' Label no. '+str(i)+' will be split. Original size: '+str(nlabels)+' px.')
nleft = nlabels
while (nleft > labelsize):
L = R==i
LU = np.zeros(L.shape,dtype=bool)
Lnz = np.nonzero(L)
LU[(Lnz[0][0],Lnz[1][0])] = True
nnonzero = np.count_nonzero(LU)
while ( nnonzero < labelsize):
VLU = LU
vnnonzero = nnonzero
LU = sn.binary_dilation(VLU)*L
if (VLU==LU).all():
break
nnonzero = np.count_nonzero(LU)
shapeCount=shapeCount+1
R[VLU]=shapeCount
print(' Created new label no. '+str(shapeCount)+' at size '+str(vnnonzero)+' px.')
nleft = nleft - vnnonzero
print(' Label no. '+str(i)+' is now at size '+str(nleft)+' px.')
return R, shapeCount
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def openTifAsNumpy(tifFile):
from PIL import Image
Image.MAX_IMAGE_PIXELS = 1800000000
im = Image.open(tifFile)
#im.show()
a = np.array(im)
return a
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def numpyArray2Tif( arr ):
from PIL import Image
return Image.fromarray( arr )
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def farFieldIds( xc, exclx ):
# Exclude the given percentile of the nearest upstream field.
exclx = max( 1. , exclx )
exclx = min( 99., exclx )
clw = exclx / 100.
xth = (clw)*np.max(xc) + (1.-clw)*np.min(xc)
idx = (xc < xth )
return idx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def farFieldMean( dat, Xcoord, excludeNearest ):
if( len(dat) != len(Xcoord) ):
sys.exit(' Error! The data and coord arrays are different length. Exiting ...')
idx = farFieldIds( Xcoord, excludeNearest )
return np.mean( dat[idx] )
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def canopyBetaFunction(height,dpz,alpha,beta,lai):
'''
Calculate lead area index using beta probability density function
(Markkanen et al., 2003, BLM 106, 437-459).
'''
from scipy.stats import beta as betadist
z_col=np.arange(0.,height+dpz[2],dpz[2]) # BUG HERE: np.arange(0.,height/dpz[2],dpz[2])
z_col=np.divide(z_col,height) # z/H
#print(' z_col (2) = {}'.format(z_col))
lad_d = betadist.pdf(z_col,alpha,beta)/height
#print(' lad_d = {}'.format(lad_d))
lad=np.multiply(lad_d,lai)
return lad
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def totalArea( Rdims, dx ):
#Calculate total area of the domain
Npx = np.prod( Rdims ) # Number of pixels
At = Npx*np.abs(np.prod(dx))
return At
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def frontalAreas( Ri, hclip=1. ):
# Calculate frontal areas of the domain
Ae = 0.
for i in range( Ri.shape[0] ):
ce = (Ri[i,1:] > Ri[i,:-1]).astype(float)
he = (Ri[i,1:]-Ri[i,:-1]); he[(he<hclip)] = 0. # Height, clip out non-buildings
Ae += np.sum( ce * he )
An = 0.
for j in range( Ri.shape[1] ):
cn = (Ri[1:,j] > Ri[:-1,j]).astype(float)
hn = (Ri[1:,j]-Ri[:-1,j]); hn[(hn<hclip)] = 0.
An += np.sum( cn* hn )
return Ae, An
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def maskMeanValues(Rm, Ri, mlist):
dims = np.shape(mlist)
m_mean = np.zeros(dims)
m_var = np.zeros(dims)
m_std = np.zeros(dims)
j = 0
for im in mlist:
idm = (Rm == im)
m_mean[j] = np.mean( Ri[idm] )
m_var[j] = np.var( Ri[idm] )
m_std[j] = np.std( Ri[idm] )
print(' Mask {} mean, var, std = {:.2f}, {:.2f}, {:.2f} '.format(im, m_mean[j], m_var[j], m_std[j]))
j += 1
return m_mean, m_var, m_std
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def planAreaFractions( Ri, mlist ):
Npx = np.prod( np.array(Ri.shape) )
r = np.zeros( np.shape(mlist) )
j = 0
for im in mlist:
r[j] = np.count_nonzero( Ri == im )/float( Npx )
print(' Mask {} plan area fraction = {:.2f} '.format(im, r[j]))
j += 1
return r
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def replaceByNans( Rt, a, b ):
if( a is not None ):
idr = (Rt > a )
Rt[idr] = np.nan
if( b is not None ):
idr = (Rt < b )
Rt[idr] = np.nan
return Rt
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def interpolateOverNans( R ):
idOk = ~np.isnan(R)
xp = idOk.ravel().nonzero()[0] # An increasing list of indices
fp = R[idOk] # Corresponding list of values
x = np.isnan(R).ravel().nonzero()[0] # Indices for which values are missing
R[~idOk] = np.interp(x, xp, fp) # Fill in values where missing.
return R
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def slowCoarsen(R1,R2dims,s,n1,n2,e1,e2,Rtype):
'''Function to coarsen an integer field using the most common value. This is relatively slow for strong coarsenings.'''
from scipy import stats
print(' Coarsening using mode as value.')
maxDims = np.array(np.shape(R1))
RT = np.zeros( np.append(R2dims,int(np.round(1/s))), Rtype )
i = np.zeros(R2dims,int)
for k in range(maxDims[0]):
#print(' k={}'.format(k))
for l in range(maxDims[1]):
RT[ n2[k], e2[l], i[n2[k], e2[l]]] = R1[ n1[k] ,e1[l] ]
#try:
# RT[ n2[k], e2[l], i[n2[k], e2[l]]] = R[ n1[k] ,e1[l] ]
#except IndexError:
# sys.exit("ERROR: Incorrect index in RT or R array. Exiting. (slowCoarsen)")
i[n2[k], e2[l]] += 1
print(' Computing mode ... ')
RTT,_ = stats.mode(RT,axis=2)
print(' ... done!')
return RTT[:,:,0]
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fastCoarsen(R,Rdims,s,n1,n2,e1,e2,Rtype):
'''Function to coarsen a field using the most common value. This is relatively fast for strong coarsenings.'''
print(' Coarsening with mean value.')
maxDims = np.array(np.shape(R))
R2 = np.zeros( Rdims, Rtype ) # Create the output array.
for k in range(maxDims[0]):
for l in range(maxDims[1]):
R2[ n2[k], e2[l] ] += R[ n1[k] ,e1[l] ]
R2 *= s
return R2
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
|
mjsauvinen/P4UL
|
pyLib/mapTools.py
|
Python
|
mit
| 23,790 | 0.042917 |
#!/usr/bin/env python
from __future__ import print_function
import sys
import re
from utils import CDNEngine
from utils import request
if sys.version_info >= (3, 0):
import subprocess as commands
import urllib.parse as urlparse
else:
import commands
import urlparse
def detect(hostname):
"""
Performs CDN detection thanks to information disclosure from server error.
Parameters
----------
hostname : str
Hostname to assess
"""
print('[+] Error server detection\n')
hostname = urlparse.urlparse(hostname).netloc
regexp = re.compile('\\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\\b')
out = commands.getoutput("host " + hostname)
addresses = regexp.finditer(out)
for addr in addresses:
res = request.do('http://' + addr.group())
if res is not None and res.status_code == 500:
CDNEngine.find(res.text.lower())
|
Nitr4x/whichCDN
|
plugins/ErrorServerDetection/behaviors.py
|
Python
|
mit
| 907 | 0.00882 |
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
example_user_input_material
---------------------------
Shows user how to input a mineral of his/her choice without usint the library and which physical values
need to be input for BurnMan to calculate :math:`V_P, V_\Phi, V_S` and density at depth.
*Specifically uses:*
* :class:`burnman.mineral.Mineral`
*Demonstrates:*
* how to create your own minerals
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
import burnman
# A note about units: all the material parameters are expected to be in plain SI units.
# This means that the elastic moduli should be in Pascals and NOT Gigapascals,
# and the Debye temperature should be in K not C. Additionally, the reference volume
# should be in m^3/(mol molecule) and not in unit cell volume and 'n' should be
# the number of atoms per molecule. Frequently in the literature the reference volume
# is given in Angstrom^3 per unit cell. To convert this to m^3/(mol of molecule)
#you should multiply by 10^(-30) * N_a / Z, where N_a is Avogadro's number and Z is the number of
# atoms per unit cell. You can look up Z in many places, including www.mindat.org
if __name__ == "__main__":
### input variables ###
#######################
#INPUT for method
""" choose 'slb2' (finite-strain 2nd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'slb3 (finite-strain 3rd order shear modulus,
stixrude and lithgow-bertelloni, 2005)
or 'mgd3' (mie-gruneisen-debeye 3rd order shear modulus,
matas et al. 2007)
or 'mgd2' (mie-gruneisen-debeye 2nd order shear modulus,
matas et al. 2007)
or 'bm2' (birch-murnaghan 2nd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))
or 'bm3' (birch-murnaghan 3rd order, if you choose to ignore temperature
(your choice in geotherm will not matter in this case))"""
method = 'slb3'
#in form name_of_mineral (burnman.mineral <- creates list with parameters)
class own_material (burnman.Mineral):
def __init__(self):
self.params = {
'name': 'myownmineral',
'equation_of_state': method,
'V_0': 10.844e-6, #Molar volume [m^3/(mole molecules)]
#at room pressure/temperature
'K_0': 135.19e9, #Reference bulk modulus [Pa]
#at room pressure/temperature
'Kprime_0': 6.04, #pressure derivative of bulk modulus
'G_0': 175.0e9, #reference shear modulus
#at room pressure/temperature
'Gprime_0': 1.7, #pressure derivative of shear modulus
'molar_mass': .055845, #molar mass in units of [kg/mol]
'n': 1, #number of atoms per formula unit
'Debye_0': 998.85, #Debye temperature for material.
#See Stixrude & Lithgow-Bertelloni, 2005 for values
'grueneisen_0': 1.368, #Gruneisen parameter for material.
#See Stixrude & Lithgow-Bertelloni, 2005 for values
'q_0': 0.917, #isotropic strain derivative of gruneisen
#parameter. Values in Stixrude & Lithgow-Bertelloni, 2005
'eta_s_0': 3.0 #full strain derivative of gruneisen parameter
#parameter. Values in Stixrude & Lithgow-Bertelloni, 2005
}
burnman.Mineral.__init__(self)
rock = own_material()
#seismic model for comparison: (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM() # pick from .prem() .slow() .fast()
number_of_points = 20 #set on how many depth slices the computations should be done
depths = np.linspace(700e3,2800e3, number_of_points)
#depths = seismic_model.internal_depth_list()
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)
temperature = burnman.geotherm.brown_shankland(seis_p)
# The next line is not required here, because the method is set automatically by defining 'equation_of_state' in mineral.params. This shows an alternative way to set the method later, or reset the method to a different one.
rock.set_method(method)
print "Calculations are done for:"
rock.debug_print()
mat_rho, mat_vp, mat_vs, mat_vphi, mat_K, mat_G = \
burnman.velocities_from_rock(rock, seis_p, temperature, \
burnman.averaging_schemes.VoigtReussHill())
[vs_err, vphi_err, rho_err]= \
burnman.compare_chifactor([mat_vs,mat_vphi,mat_rho], [seis_vs,seis_vphi,seis_rho])
print vs_err, vphi_err, rho_err
|
QuLogic/burnman
|
examples/example_user_input_material.py
|
Python
|
gpl-2.0
| 4,958 | 0.013312 |
import sys, os, json
from pathlib import Path
from acmacs_py.mapi_utils import MapiSettings
# ======================================================================
class CladeData:
sSubtypeToCladePrefix = {"h1pdm": "clades-A(H1N1)2009pdm", "h3": "clades-A(H3N2)", "bvic": "clades-B/Vic", "byam": "clades-B/Yam"}
def __init__(self):
self.mapi_settings = MapiSettings("clades.mapi")
def entry_names_for_subtype(self, subtype):
subtype_prefix = self.sSubtypeToCladePrefix[subtype]
names = sorted(name for name in self.mapi_settings.names() if name.startswith(subtype_prefix))
return names
def chart_draw_modify(self, *args, **kw):
self.mapi_settings.chart_draw_modify(*args, **kw)
def chart_draw_reset(self, *args, **kw):
self.mapi_settings.chart_draw_reset(*args, **kw)
# ======================================================================
def load(app):
app["clade_data"] = CladeData()
# ======================================================================
|
acorg/acmacs-whocc
|
web/chains-202105/py/directories.py
|
Python
|
mit
| 1,044 | 0.004789 |
from __future__ import division
from itertools import chain
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pandas as pd
from fisher import pvalue
import re
import collections
from nltk.stem.porter import PorterStemmer
import math
from percept.tasks.base import Task
from percept.fields.base import Complex, List, Dict, Float
from inputs.inputs import SimpsonsFormats
from percept.utils.models import RegistryCategories, get_namespace
from percept.conf.base import settings
import os
from percept.tasks.train import Train
from sklearn.ensemble import RandomForestClassifier
import pickle
import random
import logging
log = logging.getLogger(__name__)
MAX_FEATURES = 500
DISTANCE_MIN=1
CHARACTER_DISTANCE_MIN = .2
RESET_SCENE_EVERY = 5
def make_df(datalist, labels, name_prefix=""):
df = pd.DataFrame(datalist).T
if name_prefix!="":
labels = [name_prefix + "_" + l for l in labels]
labels = [l.replace(" ", "_").lower() for l in labels]
df.columns = labels
df.index = range(df.shape[0])
return df
def return_one():
return 1
class SpellCorrector(object):
"""
Taken and slightly adapted from peter norvig's post at http://norvig.com/spell-correct.html
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
punctuation = [".", "!", "?", ","]
def __init__(self):
self.NWORDS = self.train(self.words(file(os.path.join(settings.PROJECT_PATH,'data/big.txt')).read()))
self.cache = {}
def words(self, text):
return re.findall('[a-z]+', text.lower())
def train(self, features):
model = collections.defaultdict(return_one)
for f in features:
model[f] += 1
return model
def edits1(self, word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in self.alphabet if b]
inserts = [a + c + b for a, b in splits for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(self, word):
return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1) if e2 in self.NWORDS)
def known(self, words): return set(w for w in words if w in self.NWORDS)
def correct(self, word):
if word in self.cache:
return self.cache[word]
suffix = ""
for p in self.punctuation:
if word.endswith(p):
suffix = p
word = word[:-1]
candidates = self.known([word]) or self.known(self.edits1(word)) or self.known_edits2(word) or [word]
newword = max(candidates, key=self.NWORDS.get) + suffix
self.cache.update({word : newword})
return newword
class Vectorizer(object):
def __init__(self):
self.fit_done = False
def fit(self, input_text, input_scores, max_features=100, min_features=3):
self.spell_corrector = SpellCorrector()
self.stemmer = PorterStemmer()
new_text = self.batch_generate_new_text(input_text)
input_text = [input_text[i] + new_text[i] for i in xrange(0,len(input_text))]
self.vectorizer1 = CountVectorizer(ngram_range=(1,2), min_df = min_features/len(input_text), max_df=.4, stop_words="english")
self.vectorizer1.fit(input_text)
self.vocab = self.get_vocab(input_text, input_scores, max_features)
self.vectorizer = CountVectorizer(ngram_range=(1,2), vocabulary=self.vocab)
self.fit_done = True
self.input_text = input_text
def spell_correct_text(self, text):
text = text.lower()
split = text.split(" ")
corrected = [self.spell_corrector.correct(w) for w in split]
return corrected
def batch_apply(self, all_tokens, applied_func):
for key in all_tokens:
cor = applied_func(all_tokens[key])
all_tokens[key] = cor
return all_tokens
def batch_generate_new_text(self, text):
text = [re.sub("[^A-Za-z0-9]", " ", t.lower()) for t in text]
text = [re.sub("\s+", " ", t) for t in text]
t_tokens = [t.split(" ") for t in text]
all_token_list = list(set(chain.from_iterable(t_tokens)))
all_token_dict = {}
for t in all_token_list:
all_token_dict.update({t : t})
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
for i in xrange(0,len(t_tokens)):
for j in xrange(0,len(t_tokens[i])):
t_tokens[i][j] = all_token_dict.get(t_tokens[i][j], t_tokens[i][j])
new_text = [" ".join(t) for t in t_tokens]
return new_text
def generate_new_text(self, text):
no_punctuation = re.sub("[^A-Za-z0-9]", " ", text.lower())
no_punctuation = re.sub("\s+", " ", no_punctuation)
corrected = self.spell_correct_text(no_punctuation)
corrected = [self.stemmer.stem(w) for w in corrected]
new = " ".join(corrected)
return new
def get_vocab(self, input_text, input_scores, max_features):
train_mat = self.vectorizer1.transform(input_text)
input_score_med = np.median(input_scores)
new_scores = [0 if i<=input_score_med else 1 for i in input_scores]
ind_max_features = math.floor(max_features/max(input_scores))
all_vocab = []
all_cols = [np.asarray(train_mat.getcol(i).todense().transpose())[0] for i in xrange(0,train_mat.shape[1])]
for s in xrange(0,max(input_scores)):
sel_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]==s]
out_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]!=s]
pvalues = []
for i in xrange(0,len(all_cols)):
lcol = all_cols[i]
good_lcol = lcol[sel_inds]
bad_lcol = lcol[out_inds]
good_lcol_present = len(good_lcol[good_lcol > 0])
good_lcol_missing = len(good_lcol[good_lcol == 0])
bad_lcol_present = len(bad_lcol[bad_lcol > 0])
bad_lcol_missing = len(bad_lcol[bad_lcol == 0])
pval = pvalue(good_lcol_present, bad_lcol_present, good_lcol_missing, bad_lcol_missing)
pvalues.append(pval.two_tail)
col_inds = list(xrange(0,train_mat.shape[1]))
p_frame = pd.DataFrame(np.array([col_inds, pvalues]).transpose(), columns=["inds", "pvalues"])
p_frame = p_frame.sort(['pvalues'], ascending=True)
getVar = lambda searchList, ind: [searchList[int(i)] for i in ind]
vocab = getVar(self.vectorizer1.get_feature_names(), p_frame['inds'][:ind_max_features+2])
all_vocab.append(vocab)
return list(set(list(chain.from_iterable(all_vocab))))
def batch_get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
new_text = self.batch_generate_new_text(text)
text = [text[i] + new_text[i] for i in xrange(0,len(text))]
return (self.vectorizer.transform(text).todense())
def get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
itext=text
if isinstance(text, list):
itext = text[0]
new_text = self.generate_new_text(itext)
if isinstance(text, list):
text = [text[0] + new_text]
else:
text = [text + new_text]
return (self.vectorizer.transform(text).todense())
class FeatureExtractor(Task):
data = Complex()
row_data = List()
speaker_code_dict = Dict()
speaker_codes = List()
vectorizer = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
help_text = "Cleanup simpsons scripts."
args = {'scriptfile' : os.path.abspath(os.path.join(settings.DATA_PATH, "script_tasks"))}
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
scriptfile = kwargs.get('scriptfile')
script_data = pickle.load(open(scriptfile))
script = script_data.tasks[2].voice_lines.value
speakers = []
lines = []
for s in script:
for (i,l) in enumerate(s):
if i>0:
previous_line = s[i-1]['line']
previous_speaker = s[i-1]['speaker']
else:
previous_line = ""
previous_speaker = ""
if i>1:
two_back_speaker = s[i-2]['speaker']
else:
two_back_speaker = ""
if len(s)>i+1:
next_line = s[i+1]['line']
else:
next_line = ""
current_line = s[i]['line']
current_speaker = s[i]['speaker']
lines.append(current_line)
speakers.append(current_speaker)
row_data = {
'previous_line' : previous_line,
'previous_speaker' : previous_speaker,
'next_line' : next_line,
'current_line' : current_line,
'current_speaker' : current_speaker,
'two_back_speaker' : two_back_speaker
}
self.row_data.append(row_data)
self.speaker_code_dict = {k:i for (i,k) in enumerate(list(set(speakers)))}
self.speaker_codes = [self.speaker_code_dict[s] for s in speakers]
self.max_features = math.floor(MAX_FEATURES)/3
self.vectorizer = Vectorizer()
self.vectorizer.fit(lines, self.speaker_codes, self.max_features)
prev_features = self.vectorizer.batch_get_features([rd['previous_line'] for rd in self.row_data])
cur_features = self.vectorizer.batch_get_features([rd['current_line'] for rd in self.row_data])
next_features = self.vectorizer.batch_get_features([rd['next_line'] for rd in self.row_data])
self.speaker_code_dict.update({'' : -1})
meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], [self.speaker_code_dict[s['previous_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "previous_speaker", "current_speaker"])
#meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "current_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features),pd.DataFrame(next_features),meta_features],axis=1)
train_frame.index = range(train_frame.shape[0])
data = {
'vectorizer' : self.vectorizer,
'speaker_code_dict' : self.speaker_code_dict,
'train_frame' : train_frame,
'speakers' : make_df([speakers,self.speaker_codes, lines], ["speaker", "speaker_code", "line"]),
'data' : data,
'current_features' : cur_features,
}
return data
class RandomForestTrain(Train):
"""
A class to train a random forest
"""
colnames = List()
clf = Complex()
category = RegistryCategories.algorithms
namespace = get_namespace(__module__)
algorithm = RandomForestClassifier
args = {'n_estimators' : 300, 'min_samples_leaf' : 4, 'compute_importances' : True}
help_text = "Train and predict with Random Forest."
class KNNRF(Task):
data = Complex()
predictions = Complex()
importances = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
args = {'algo' : RandomForestTrain}
help_text = "Cleanup simpsons scripts."
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
from preprocess import CHARACTERS
vec_length = math.floor(MAX_FEATURES/3)
algo = kwargs.get('algo')
alg = algo()
train_data = data['train_frame'].iloc[:,:-1]
target = data['train_frame']['current_speaker']
clf = alg.train(train_data,target, **algo.args)
self.importances=clf.feature_importances_
test_data = data['data']
match_data = data['current_features']
reverse_speaker_code_dict = {data['speaker_code_dict'][k] : k for k in data['speaker_code_dict']}
speaker_list = []
speaker_codes = reverse_speaker_code_dict.keys()
for i in xrange(0,len(speaker_codes)):
s_text = "\n".join(list(data['speakers'][data['speakers']['speaker']==reverse_speaker_code_dict[speaker_codes[i]]]['line']))
speaker_list.append(s_text)
speaker_features = data['vectorizer'].batch_get_features(speaker_list)
self.predictions = []
counter = 0
for script in test_data['voice_script']:
counter+=1
log.info("On script {0} out of {1}".format(counter,len(test_data['voice_script'])))
lines = script.split("\n")
speaker_code = [-1 for i in xrange(0,len(lines))]
for (i,line) in enumerate(lines):
if i>0 and i%RESET_SCENE_EVERY!=0:
previous_line = lines[i-1]
previous_speaker = speaker_code[i-1]
else:
previous_line = ""
previous_speaker= -1
if i>1 and i%RESET_SCENE_EVERY!=0:
two_back_speaker = speaker_code[i-2]
else:
two_back_speaker = -1
if i<(len(lines)-1):
next_line = lines[i+1]
else:
next_line = ""
prev_features = data['vectorizer'].get_features(previous_line)
cur_features = data['vectorizer'].get_features(line)
next_features = data['vectorizer'].get_features(next_line)
meta_features = make_df([[two_back_speaker], [previous_speaker]],["two_back_speaker", "previous_speaker"])
#meta_features = make_df([[two_back_speaker]],["two_back_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features),pd.DataFrame(next_features), meta_features],axis=1)
speaker_code[i] = alg.predict(train_frame)[0]
nearest_match, distance = self.find_nearest_match(cur_features, speaker_features)
if distance<CHARACTER_DISTANCE_MIN:
sc = speaker_codes[nearest_match]
speaker_code[i] = sc
continue
for k in CHARACTERS:
for c in CHARACTERS[k]:
if c in previous_line:
speaker_code[i] = data['speaker_code_dict'][k]
nearest_match, distance = self.find_nearest_match(cur_features,match_data)
if distance<DISTANCE_MIN:
sc = data['speakers']['speaker_code'][nearest_match]
speaker_code[i] = sc
continue
df = make_df([lines,speaker_code,[reverse_speaker_code_dict[round(s)] for s in speaker_code]],["line","speaker_code","speaker"])
self.predictions.append(df)
return data
def find_nearest_match(self, features, matrix):
features = np.asarray(features)
distances = [self.euclidean(u, features) for u in matrix]
nearest_match = distances.index(min(distances))
return nearest_match, min(distances)
def euclidean(self, v1, v2):
return np.sqrt(np.sum(np.square(np.subtract(v1,v2))))
"""
p = tasks[3].predictions.value
speakers = []
lines = []
for pr in p:
speakers.append(list(pr['speaker']))
lines.append(list(pr['line']))
from itertools import chain
speakers = list(chain.from_iterable(speakers))
lines = list(chain.from_iterable(lines))
rows = []
for (s,l) in zip(speakers, lines):
rows.append({
'speaker' : s,
'line': l,
})
import json
json.dump(rows,open("/home/vik/vikparuchuri/simpsons-scripts/data/final_voice.json","w+"))
"""
|
VikParuchuri/simpsons-scripts
|
tasks/train.py
|
Python
|
apache-2.0
| 16,847 | 0.008429 |
# Encoding: UTF-8
"""Czech conjugation
"""
from spline.i18n.formatter import Formatter, BaseWord, parse_bool
class Word(BaseWord):
@classmethod
def guess_type(cls, word, **props):
if word.endswith(u'í'):
return SoftAdjective
elif word.endswith(u'ý'):
return HardAdjective
else:
return Word
class Adjective(Word):
def __init__(self, word):
self.root = word
_interesting_categories = 'gender number case'.split()
gender = 'm'
case = 1
number = 'sg'
def inflect(self, **props):
gender = props.get('gender', self.gender)
case = int(props.get('case', self.case))
number = props.get('number', self.number)
case_no = (case - 1) + (7 if (number == 'pl') else 0)
if gender == 'm':
if parse_bool(props.get('animate', True)):
return self.root + self.endings_ma[case_no]
else:
return self.root + self.endings_mi[case_no]
elif gender == 'f':
return self.root + self.endings_f[case_no]
else:
return self.root + self.endings_n[case_no]
class SoftAdjective(Adjective):
def __init__(self, word):
if word.endswith(u'í'):
self.root = word[:-1]
else:
self.root = word
endings_ma = u'í,ího,ímu,ího,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',')
endings_mi = u'í,ího,ímu,í,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',')
endings_f = u'í,í,í,í,í,í,í,í,ích,ím,í,í,ích,ími'.split(',')
endings_n = u'í,ího,ímu,í,í,ím,ím,í,ích,ím,í,í,ích,ími'.split(',')
class HardAdjective(Adjective):
def __init__(self, word):
if any(word.endswith(x) for x in u'ýáé'):
self.root = word[:-1]
else:
self.root = word
endings_ma = u'ý,ého,ému,ého,ý,ém,ým,í,ých,ým,é,í,ých,ými'.split(',')
endings_mi = u'ý,ého,ému,ý,ý,ém,ým,é,ých,ým,é,é,ých,ými'.split(',')
endings_f = u'á,é,é,ou,á,é,ou,é,ých,ým,é,é,ých,ými'.split(',')
endings_n = u'é,ého,ému,é,é,ém,ým,á,ých,ým,á,á,ých,ými'.split(',')
formatter = Formatter('cs', Word)
class Template(unicode):
def format(self, *args, **kwargs):
return formatter.format(self, *args, **kwargs)
|
veekun/spline
|
spline/i18n/cs/__init__.py
|
Python
|
mit
| 2,365 | 0.002668 |
#!/usr/bin/env python
"""
client module for memcached (memory cache daemon)
Overview
========
See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
Usage summary
=============
This should give you a feel for how this module operates::
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.set("another_key", 3)
mc.delete("another_key")
mc.set("key", "1") # note that the key used for incr/decr must be a string.
mc.incr("key")
mc.decr("key")
The standard way to use memcache with a database is like this::
key = derive_key(obj)
obj = mc.get(key)
if not obj:
obj = backend_api.get(...)
mc.set(key, obj)
# we now have obj, and future passes through this code
# will use the object from the cache.
Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
"""
import sys
import socket
import time
import os
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from binascii import crc32 # zlib version is not cross-platform
def cmemcache_hash(key):
return((((crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1)
serverHashFunction = cmemcache_hash
def useOldServerHashFunction():
"""Use the old python-memcache server hash function."""
global serverHashFunction
serverHashFunction = crc32
try:
from zlib import compress, decompress
_supports_compress = True
except ImportError:
_supports_compress = False
# quickly define a decompress just in case we recv compressed data.
def decompress(val):
raise _Error("received compressed data but I don't support compression (import error)")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Original author: Evan Martin of Danga Interactive
__author__ = "Sean Reifschneider <jafo-memcached@tummy.com>"
__version__ = "1.48"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
# http://en.wikipedia.org/wiki/Python_Software_Foundation_License
__license__ = "Python Software Foundation License"
SERVER_MAX_KEY_LENGTH = 250
# Storing values larger than 1MB requires recompiling memcached. If you do,
# this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N"
# after importing this module.
SERVER_MAX_VALUE_LENGTH = 1024*1024
class _Error(Exception):
pass
class _ConnectionDeadError(Exception):
pass
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# TODO: add the pure-python local implementation
class local(object):
pass
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
_SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
class Client(local):
"""
Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
making this module calculate a hash value. You may prefer, for
example, to keep all of a given user's objects on the same memcache
server, so you could use the user's unique id as the hash value.
@group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
@group Insertion: set, add, replace, set_multi
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete, delete_multi
@sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
"""
_FLAG_PICKLE = 1<<0
_FLAG_INTEGER = 1<<1
_FLAG_LONG = 1<<2
_FLAG_COMPRESSED = 1<<3
_SERVER_RETRIES = 10 # how many times to try finding a free server.
# exceptions for Client
class MemcachedKeyError(Exception):
pass
class MemcachedKeyLengthError(MemcachedKeyError):
pass
class MemcachedKeyCharacterError(MemcachedKeyError):
pass
class MemcachedKeyNoneError(MemcachedKeyError):
pass
class MemcachedKeyTypeError(MemcachedKeyError):
pass
class MemcachedStringEncodingError(Exception):
pass
def __init__(self, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None,
server_max_key_length=SERVER_MAX_KEY_LENGTH,
server_max_value_length=SERVER_MAX_VALUE_LENGTH,
dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT,
cache_cas = False):
"""
Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
@param debug: whether to display error messages when a server can't be
contacted.
@param pickleProtocol: number to mandate protocol used by (c)Pickle.
@param pickler: optional override of default Pickler to allow subclassing.
@param unpickler: optional override of default Unpickler to allow subclassing.
@param pload: optional persistent_load function to call on pickle loading.
Useful for cPickle since subclassing isn't allowed.
@param pid: optional persistent_id function to call on pickle storing.
Useful for cPickle since subclassing isn't allowed.
@param dead_retry: number of seconds before retrying a blacklisted
server. Default to 30 s.
@param socket_timeout: timeout in seconds for all calls to a server. Defaults
to 3 seconds.
@param cache_cas: (default False) If true, cas operations will be
cached. WARNING: This cache is not expired internally, if you have
a long-running process you will need to expire it manually via
"client.reset_cas(), or the cache can grow unlimited.
@param server_max_key_length: (default SERVER_MAX_KEY_LENGTH)
Data that is larger than this will not be sent to the server.
@param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH)
Data that is larger than this will not be sent to the server.
"""
local.__init__(self)
self.debug = debug
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.set_servers(servers)
self.stats = {}
self.cache_cas = cache_cas
self.reset_cas()
# Allow users to modify pickling/unpickling behavior
self.pickleProtocol = pickleProtocol
self.pickler = pickler
self.unpickler = unpickler
self.persistent_load = pload
self.persistent_id = pid
self.server_max_key_length = server_max_key_length
self.server_max_value_length = server_max_value_length
# figure out the pickler style
file = StringIO()
try:
pickler = self.pickler(file, protocol = self.pickleProtocol)
self.picklerIsKeyword = True
except TypeError:
self.picklerIsKeyword = False
def reset_cas(self):
"""
Reset the cas cache. This is only used if the Client() object
was created with "cache_cas=True". If used, this cache does not
expire internally, so it can grow unbounded if you do not clear it
yourself.
"""
self.cas_ids = {}
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,
socket_timeout=self.socket_timeout)
for s in servers]
self._init_buckets()
def get_stats(self, stat_args = None):
'''Get statistics from each of the servers.
@param stat_args: Additional arguments to pass to the memcache
"stats" command.
@return: A list of tuples ( server_identifier, stats_dictionary ).
The dictionary contains a number of name/value pairs specifying
the name of the status field and the string value associated with
it. The values are not converted from strings.
'''
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
if not stat_args:
s.send_cmd('stats')
else:
s.send_cmd('stats ' + stat_args)
serverData = {}
data.append(( name, serverData ))
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
return(data)
def get_slabs(self):
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
serverData = {}
data.append(( name, serverData ))
s.send_cmd('stats items')
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
item = line.split(' ', 2)
#0 = STAT, 1 = ITEM, 2 = Value
slab = item[1].split(':', 2)
#0 = items, 1 = Slab #, 2 = Name
if slab[1] not in serverData:
serverData[slab[1]] = {}
serverData[slab[1]][slab[2]] = item[2]
return data
def flush_all(self):
'Expire all data currently in the memcache servers.'
for s in self.servers:
if not s.connect(): continue
s.send_cmd('flush_all')
s.expect("OK")
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _statlog(self, func):
if func not in self.stats:
self.stats[func] = 1
else:
self.stats[func] += 1
def forget_dead_hosts(self):
"""
Reset every host in the pool to an "alive" state.
"""
for s in self.servers:
s.deaduntil = 0
def _init_buckets(self):
self.buckets = []
for server in self.servers:
for i in range(server.weight):
self.buckets.append(server)
def _get_server(self, key):
if isinstance(key, tuple):
serverhash, key = key
else:
serverhash = serverHashFunction(key)
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
#print "(using server %s)" % server,
return server, key
serverhash = serverHashFunction(str(serverhash) + str(i))
return None, None
def disconnect_all(self):
for s in self.servers:
s.close_socket()
def delete_multi(self, keys, time=0, key_prefix=''):
'''
Delete multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
>>> mc.delete_multi(['key1', 'key2'])
1
>>> mc.get_multi(['key1', 'key2']) == {}
1
This method is recommended over iterated regular L{delete}s as it reduces total latency, since
your app doesn't have to wait for each round-trip of L{delete} before sending
the next one.
@param keys: An iterable of keys to clear
@param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
@param key_prefix: Optional string to prepend to each key when sending to memcache.
See docs for L{get_multi} and L{set_multi}.
@return: 1 if no failure in communication with any memcacheds.
@rtype: int
'''
self._statlog('delete_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
rc = 1
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
if time != None:
for key in server_keys[server]: # These are mangled keys
write("delete %s %d\r\n" % (key, time))
else:
for key in server_keys[server]: # These are mangled keys
write("delete %s\r\n" % key)
try:
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
rc = 0
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
for server, keys in server_keys.iteritems():
try:
for key in keys:
server.expect("DELETED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
rc = 0
return rc
def delete(self, key, time=0):
'''Deletes a key from the memcache.
@return: Nonzero on success.
@param time: number of seconds any subsequent set / update commands
should fail. Defaults to None for no delay.
@rtype: int
'''
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog('delete')
if time != None and time != 0:
cmd = "delete %s %d" % (key, time)
else:
cmd = "delete %s" % key
try:
server.send_cmd(cmd)
line = server.readline()
if line and line.strip() in ['DELETED', 'NOT_FOUND']: return 1
self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s'
% repr(line))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
def incr(self, key, delta=1):
"""
Sends a command to the server to atomically increment the value
for C{key} by C{delta}, or by 1 if C{delta} is unspecified.
Returns None if C{key} doesn't exist on server, otherwise it
returns the new value after incrementing.
Note that the value for C{key} must already exist in the memcache,
and it must be the string representation of an integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
>>> mc.incr("counter")
21
>>> mc.incr("counter")
22
Overflow on server is not checked. Be aware of values approaching
2**32. See L{decr}.
@param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
"""
Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
new values are capped at 0. If server value is 1, a decrement of 2
returns 0, not -1.
@param delta: Integer amount to decrement by (should be zero or greater).
@return: New value after decrementing.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
if line == None or line.strip() =='NOT_FOUND': return None
return int(line)
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
def add(self, key, val, time = 0, min_compress_len = 0):
'''
Add new key with value.
Like L{set}, but only stores in memcache if the key doesn't already exist.
@return: Nonzero on success.
@rtype: int
'''
return self._set("add", key, val, time, min_compress_len)
def append(self, key, val, time=0, min_compress_len=0):
'''Append the value to the end of the existing key's value.
Only stores in memcache if key already exists.
Also see L{prepend}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("append", key, val, time, min_compress_len)
def prepend(self, key, val, time=0, min_compress_len=0):
'''Prepend the value to the beginning of the existing key's value.
Only stores in memcache if key already exists.
Also see L{append}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("prepend", key, val, time, min_compress_len)
def replace(self, key, val, time=0, min_compress_len=0):
'''Replace existing key with value.
Like L{set}, but only stores in memcache if the key already exists.
The opposite of L{add}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("replace", key, val, time, min_compress_len)
def set(self, key, val, time=0, min_compress_len=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
'''
return self._set("set", key, val, time, min_compress_len)
def cas(self, key, val, time=0, min_compress_len=0):
'''Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress.
'''
return self._set("cas", key, val, time, min_compress_len)
def _map_and_prefix_keys(self, key_iterable, key_prefix):
"""Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
prefixed key -> original key.
"""
# Check it just once ...
key_extra_len=len(key_prefix)
if key_prefix:
self.check_key(key_prefix)
# server (_Host) -> list of unprefixed server keys in mapping
server_keys = {}
prefixed_to_orig_key = {}
# build up a list for each server of all the keys we want.
for orig_key in key_iterable:
if isinstance(orig_key, tuple):
# Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.
# Ensure call to _get_server gets a Tuple as well.
str_orig_key = str(orig_key[1])
server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.
else:
str_orig_key = str(orig_key) # set_multi supports int / long keys.
server, key = self._get_server(key_prefix + str_orig_key)
# Now check to make sure key length is proper ...
self.check_key(str_orig_key, key_extra_len=key_extra_len)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append(key)
prefixed_to_orig_key[key] = orig_key
return (server_keys, prefixed_to_orig_key)
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
'''
Sets multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
This method is recommended over regular L{set} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{set} before sending
the next one.
@param mapping: A dict of key/value pairs to set.
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache:
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
>>> len(notset_keys) == 0
True
>>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
True
Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache.
In this case, the return result would be the list of notset original keys, prefix not applied.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
@return: List of keys which failed to be stored [ memcache out of memory, etc. ].
@rtype: list
'''
self._statlog('set_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
notstored = [] # original keys.
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
try:
for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(
mapping[prefixed_to_orig_key[key]],
min_compress_len)
if store_info:
write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0],
time, store_info[1], store_info[2]))
else:
notstored.append(prefixed_to_orig_key[key])
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
# short-circuit if there are no servers, just return all keys
if not server_keys: return(mapping.keys())
for server, keys in server_keys.iteritems():
try:
for key in keys:
line = server.readline()
if line == 'STORED':
continue
else:
notstored.append(prefixed_to_orig_key[key]) #un-mangle.
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return notstored
def _val_to_store_info(self, val, min_compress_len):
"""
Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
"""
flags = 0
if isinstance(val, str):
pass
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
elif isinstance(val, long):
flags |= Client._FLAG_LONG
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
file = StringIO()
if self.picklerIsKeyword:
pickler = self.pickler(file, protocol = self.pickleProtocol)
else:
pickler = self.pickler(file, self.pickleProtocol)
if self.persistent_id:
pickler.persistent_id = self.persistent_id
pickler.dump(val)
val = file.getvalue()
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could
# import zlib and this string is longer than our min threshold.
if min_compress_len and _supports_compress and lv > min_compress_len:
comp_val = compress(val)
# Only retain the result if the compression result is smaller
# than the original.
if len(comp_val) < lv:
flags |= Client._FLAG_COMPRESSED
val = comp_val
# silently do not store if value length exceeds maximum
if self.server_max_value_length != 0 and \
len(val) > self.server_max_value_length: return(0)
return (flags, len(val), val)
def _set(self, cmd, key, val, time, min_compress_len = 0):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
def _unsafe_set():
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
if not store_info: return(0)
if cmd == 'cas':
if key not in self.cas_ids:
return self._set('set', key, val, time, min_compress_len)
fullcmd = "%s %s %d %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1],
self.cas_ids[key], store_info[2])
else:
fullcmd = "%s %s %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1], store_info[2])
try:
server.send_cmd(fullcmd)
return(server.expect("STORED") == "STORED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
try:
return _unsafe_set()
except _ConnectionDeadError:
# retry once
try:
server._get_socket()
return _unsafe_set()
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return 0
def _get(self, cmd, key):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
def _unsafe_get():
self._statlog(cmd)
try:
server.send_cmd("%s %s" % (cmd, key))
rkey = flags = rlen = cas_id = None
if cmd == 'gets':
rkey, flags, rlen, cas_id, = self._expect_cas_value(server)
if rkey and self.cache_cas:
self.cas_ids[rkey] = cas_id
else:
rkey, flags, rlen, = self._expectvalue(server)
if not rkey:
return None
try:
value = self._recv_value(server, flags, rlen)
finally:
server.expect("END")
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
return value
try:
return _unsafe_get()
except _ConnectionDeadError:
# retry once
try:
if server.connect():
return _unsafe_get()
return None
except (_ConnectionDeadError, socket.error), msg:
server.mark_dead(msg)
return None
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
return self._get('get', key)
def gets(self, key):
'''Retrieves a key from the memcache. Used in conjunction with 'cas'.
@return: The value or None.
'''
return self._get('gets', key)
def get_multi(self, keys, key_prefix=''):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
>>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
1
This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
>>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
1
get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
>>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
1
>>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{get} before sending
the next one.
See also L{set_multi}.
@param keys: An array of keys.
@param key_prefix: A string to prefix each key when we communicate with memcache.
Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
@return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
'''
self._statlog('get_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.iterkeys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.iterkeys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
line = server.readline()
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return retvals
def _expect_cas_value(self, server, line=None):
if not line:
line = server.readline()
if line and line[:5] == 'VALUE':
resp, rkey, flags, len, cas_id = line.split()
return (rkey, int(flags), int(len), int(cas_id))
else:
return (None, None, None, None)
def _expectvalue(self, server, line=None):
if not line:
line = server.readline()
if line and line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d"
% (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2] # strip \r\n
if flags & Client._FLAG_COMPRESSED:
buf = decompress(buf)
if flags == 0 or flags == Client._FLAG_COMPRESSED:
# Either a bare string or a compressed string now decompressed...
val = buf
elif flags & Client._FLAG_INTEGER:
val = int(buf)
elif flags & Client._FLAG_LONG:
val = long(buf)
elif flags & Client._FLAG_PICKLE:
try:
file = StringIO(buf)
unpickler = self.unpickler(file)
if self.persistent_load:
unpickler.persistent_load = self.persistent_load
val = unpickler.load()
except Exception, e:
self.debuglog('Pickle error: %s\n' % e)
return None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
return val
def check_key(self, key, key_extra_len=0):
"""Checks sanity of key. Fails if:
Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
Contains control characters (Raises MemcachedKeyCharacterError).
Is not a string (Raises MemcachedStringEncodingError)
Is an unicode string (Raises MemcachedStringEncodingError)
Is not a string (Raises MemcachedKeyError)
Is None (Raises MemcachedKeyError)
"""
if isinstance(key, tuple): key = key[1]
if not key:
raise Client.MemcachedKeyNoneError("Key is None")
if isinstance(key, unicode):
raise Client.MemcachedStringEncodingError(
"Keys must be str()'s, not unicode. Convert your unicode "
"strings using mystring.encode(charset)!")
if not isinstance(key, str):
raise Client.MemcachedKeyTypeError("Key must be str()'s")
if isinstance(key, basestring):
if self.server_max_key_length != 0 and \
len(key) + key_extra_len > self.server_max_key_length:
raise Client.MemcachedKeyLengthError("Key length is > %s"
% self.server_max_key_length)
for char in key:
if ord(char) < 33 or ord(char) == 127:
raise Client.MemcachedKeyCharacterError(
"Control characters not allowed")
class _Host(object):
def __init__(self, host, debug=0, dead_retry=_DEAD_RETRY,
socket_timeout=_SOCKET_TIMEOUT):
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.debug = debug
if isinstance(host, tuple):
host, self.weight = host
else:
self.weight = 1
# parse the connection string
m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
if not m:
m = re.match(r'^(?P<proto>inet):'
r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m: m = re.match(r'^(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m:
raise ValueError('Unable to parse connection string: "%s"' % host)
hostData = m.groupdict()
if hostData.get('proto') == 'unix':
self.family = socket.AF_UNIX
self.address = hostData['path']
else:
self.family = socket.AF_INET
self.ip = hostData['host']
self.port = int(hostData.get('port', 11211))
self.address = ( self.ip, self.port )
self.deaduntil = 0
self.socket = None
self.buffer = ''
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def connect(self):
if self._get_socket():
return 1
return 0
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + self.dead_retry
self.close_socket()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket(self.family, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'): s.settimeout(self.socket_timeout)
try:
s.connect(self.address)
except socket.timeout, msg:
self.mark_dead("connect: %s" % msg)
return None
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
self.mark_dead("connect: %s" % msg[1])
return None
self.socket = s
self.buffer = ''
return s
def close_socket(self):
if self.socket:
self.socket.close()
self.socket = None
def send_cmd(self, cmd):
self.socket.sendall(cmd + '\r\n')
def send_cmds(self, cmds):
""" cmds already has trailing \r\n's applied """
self.socket.sendall(cmds)
def readline(self):
buf = self.buffer
recv = self.socket.recv
while True:
index = buf.find('\r\n')
if index >= 0:
break
data = recv(4096)
if not data:
# connection close, let's kill it and raise
self.close_socket()
raise _ConnectionDeadError()
buf += data
self.buffer = buf[index+2:]
return buf[:index]
def expect(self, text):
line = self.readline()
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'"
% (text, line))
return line
def recv(self, rlen):
self_socket_recv = self.socket.recv
buf = self.buffer
while len(buf) < rlen:
foo = self_socket_recv(max(rlen - len(buf), 4096))
buf += foo
if not foo:
raise _Error( 'Read %d bytes, expecting %d, '
'read returned 0 length bytes' % ( len(buf), rlen ))
self.buffer = buf[rlen:]
return buf[:rlen]
def __str__(self):
d = ''
if self.deaduntil:
d = " (dead until %d)" % self.deaduntil
if self.family == socket.AF_INET:
return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
else:
return "unix:%s%s" % (self.address, d)
def _doctest():
import doctest, memcache
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
return doctest.testmod(memcache, globs=globs)
if __name__ == "__main__":
failures = 0
print "Testing docstrings..."
_doctest()
print "Running tests:"
print
serverList = [["127.0.0.1:11211"]]
if '--do-unix' in sys.argv:
serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
for servers in serverList:
mc = Client(servers, debug=1)
def to_s(val):
if not isinstance(val, basestring):
return "%s (%s)" % (val, type(val))
return "%s" % val
def test_setget(key, val):
global failures
print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
mc.set(key, val)
newval = mc.get(key)
if newval == val:
print "OK"
return 1
else:
print "FAIL"; failures = failures + 1
return 0
class FooStruct(object):
def __init__(self):
self.bar = "baz"
def __str__(self):
return "A FooStruct"
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
return 0
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
if test_setget("long", long(1<<30)):
print "Testing delete ...",
if mc.delete("long"):
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Checking results of delete ..."
if mc.get("long") == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing get_multi ...",
print mc.get_multi(["a_string", "an_integer"])
# removed from the protocol
#if test_setget("timed_delete", 'foo'):
# print "Testing timed delete ...",
# if mc.delete("timed_delete", 1):
# print "OK"
# else:
# print "FAIL"; failures = failures + 1
# print "Checking results of timed delete ..."
# if mc.get("timed_delete") == None:
# print "OK"
# else:
# print "FAIL"; failures = failures + 1
print "Testing get(unknown value) ...",
print to_s(mc.get("unknown_value"))
f = FooStruct()
test_setget("foostruct", f)
print "Testing incr ...",
x = mc.incr("an_integer", 1)
if x == 43:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing decr ...",
x = mc.decr("an_integer", 1)
if x == 42:
print "OK"
else:
print "FAIL"; failures = failures + 1
sys.stdout.flush()
# sanity tests
print "Testing sending spaces...",
sys.stdout.flush()
try:
x = mc.set("this has spaces", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending control characters...",
try:
x = mc.set("this\x10has\x11control characters\x02", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using insanely long key...",
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH, 1)
except Client.MemcachedKeyLengthError, msg:
print "FAIL"; failures = failures + 1
else:
print "OK"
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'a', 1)
except Client.MemcachedKeyLengthError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending a unicode-string key...",
try:
x = mc.set(u'keyhere', 1)
except Client.MemcachedStringEncodingError, msg:
print "OK",
else:
print "FAIL",; failures = failures + 1
try:
x = mc.set((u'a'*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except:
print "FAIL",; failures = failures + 1
else:
print "OK",
import pickle
s = pickle.loads('V\\u4f1a\np0\n.')
try:
x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except Client.MemcachedKeyLengthError:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using a value larger than the memcached value limit...",
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
if mc.get('keyhere') == None:
print "OK",
else:
print "FAIL",; failures = failures + 1
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
if mc.get('keyhere') == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing set_multi() with no memcacheds running",
mc.disconnect_all()
errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'})
if errors != []:
print "FAIL"; failures = failures + 1
else:
print "OK"
print "Testing delete_multi() with no memcacheds running",
mc.disconnect_all()
ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'})
if ret != 1:
print "FAIL"; failures = failures + 1
else:
print "OK"
if failures > 0:
print '*** THERE WERE FAILED TESTS'
sys.exit(1)
sys.exit(0)
# vim: ts=4 sw=4 et :
|
pouyana/teireader
|
webui/gluon/contrib/memcache/memcache.py
|
Python
|
mit
| 49,300 | 0.004138 |
#!/usr/bin/env python
'''
Copyright (C) 2011 Karlisson Bezerra <contact@hacktoon.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import inkex
import simplestyle
class Canvas:
"""Canvas API helper class"""
def __init__(self, parent, width, height, context = "ctx"):
self.obj = context
self.code = [] #stores the code
self.style = {}
self.styleCache = {} #stores the previous style applied
self.parent = parent
self.width = width
self.height = height
def write(self, text):
self.code.append("\t" + text.replace("ctx", self.obj) + "\n")
def output(self):
from textwrap import dedent
html = """
<!DOCTYPE html>
<html>
<head>
<title>Inkscape Output</title>
</head>
<body>
<canvas id='canvas' width='%d' height='%d'></canvas>
<script>
var %s = document.getElementById("canvas").getContext("2d");
%s
</script>
</body>
</html>
"""
return dedent(html) % (self.width, self.height, self.obj, "".join(self.code))
def equalStyle(self, style, key):
"""Checks if the last style used is the same or there's no style yet"""
if key in self.styleCache:
return True
if key not in style:
return True
return style[key] == self.styleCache[key]
def beginPath(self):
self.write("ctx.beginPath();")
def createLinearGradient(self, href, x1, y1, x2, y2):
data = (href, x1, y1, x2, y2)
self.write("var %s = \
ctx.createLinearGradient(%f,%f,%f,%f);" % data)
def createRadialGradient(self, href, cx1, cy1, rx, cx2, cy2, ry):
data = (href, cx1, cy1, rx, cx2, cy2, ry)
self.write("var %s = ctx.createRadialGradient\
(%f,%f,%f,%f,%f,%f);" % data)
def addColorStop(self, href, pos, color):
self.write("%s.addColorStop(%f, %s);" % (href, pos, color))
def getColor(self, rgb, a):
r, g, b = simplestyle.parseColor(rgb)
a = float(a)
if a < 1:
return "'rgba(%d, %d, %d, %.1f)'" % (r, g, b, a)
else:
return "'rgb(%d, %d, %d)'" % (r, g, b)
def setGradient(self, href):
"""
for stop in gstops:
style = simplestyle.parseStyle(stop.get("style"))
stop_color = style["stop-color"]
opacity = style["stop-opacity"]
color = self.getColor(stop_color, opacity)
pos = float(stop.get("offset"))
self.addColorStop(href, pos, color)
"""
return None #href
def setOpacity(self, value):
self.write("ctx.globalAlpha = %.1f;" % float(value))
def setFill(self, value):
try:
alpha = self.style["fill-opacity"]
except:
alpha = 1
if not value.startswith("url("):
fill = self.getColor(value, alpha)
self.write("ctx.fillStyle = %s;" % fill)
def setStroke(self, value):
try:
alpha = self.style["stroke-opacity"]
except:
alpha = 1
self.write("ctx.strokeStyle = %s;" % self.getColor(value, alpha))
def setStrokeWidth(self, value):
self.write("ctx.lineWidth = %f;" % self.parent.unittouu(value))
def setStrokeLinecap(self, value):
self.write("ctx.lineCap = '%s';" % value)
def setStrokeLinejoin(self, value):
self.write("ctx.lineJoin = '%s';" % value)
def setStrokeMiterlimit(self, value):
self.write("ctx.miterLimit = %s;" % value)
def setFont(self, value):
self.write("ctx.font = \"%s\";" % value)
def moveTo(self, x, y):
self.write("ctx.moveTo(%f, %f);" % (x, y))
def lineTo(self, x, y):
self.write("ctx.lineTo(%f, %f);" % (x, y))
def quadraticCurveTo(self, cpx, cpy, x, y):
data = (cpx, cpy, x, y)
self.write("ctx.quadraticCurveTo(%f, %f, %f, %f);" % data)
def bezierCurveTo(self, x1, y1, x2, y2, x, y):
data = (x1, y1, x2, y2, x, y)
self.write("ctx.bezierCurveTo(%f, %f, %f, %f, %f, %f);" % data)
def rect(self, x, y, w, h, rx = 0, ry = 0):
if rx or ry:
#rounded rectangle, starts top-left anticlockwise
self.moveTo(x, y + ry)
self.lineTo(x, y+h-ry)
self.quadraticCurveTo(x, y+h, x+rx, y+h)
self.lineTo(x+w-rx, y+h)
self.quadraticCurveTo(x+w, y+h, x+w, y+h-ry)
self.lineTo(x+w, y+ry)
self.quadraticCurveTo(x+w, y, x+w-rx, y)
self.lineTo(x+rx, y)
self.quadraticCurveTo(x, y, x, y+ry)
else:
self.write("ctx.rect(%f, %f, %f, %f);" % (x, y, w, h))
def arc(self, x, y, r, a1, a2, flag):
data = (x, y, r, a1, a2, flag)
self.write("ctx.arc(%f, %f, %f, %f, %.8f, %d);" % data)
def fillText(self, text, x, y):
self.write("ctx.fillText(\"%s\", %f, %f);" % (text, x, y))
def translate(self, cx, cy):
self.write("ctx.translate(%f, %f);" % (cx, cy))
def rotate(self, angle):
self.write("ctx.rotate(%f);" % angle)
def scale(self, rx, ry):
self.write("ctx.scale(%f, %f);" % (rx, ry))
def transform(self, m11, m12, m21, m22, dx, dy):
data = (m11, m12, m21, m22, dx, dy)
self.write("ctx.transform(%f, %f, %f, %f, %f, %f);" % data)
def save(self):
self.write("ctx.save();")
def restore(self):
self.write("ctx.restore();")
def closePath(self):
if "fill" in self.style and self.style["fill"] != "none":
self.write("ctx.fill();")
if "stroke" in self.style and self.style["stroke"] != "none":
self.write("ctx.stroke();")
#self.write("%s.closePath();" % self.obj)
|
yaii/yai
|
share/extensions/ink2canvas/canvas.py
|
Python
|
gpl-2.0
| 6,499 | 0.002462 |
from codecs import open
from os import path
from setuptools import setup, Extension
from Cython.Distutils import build_ext
import numpy
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Requirements
install_requires=['cython>=0.24.1',
'numpy>=1.6.1',
'scipy>=0.16',
'matplotlib>=1.5.1',
'scikit-learn>=0.17.1',
'nibabel>=2.0.2',
'nilearn>=0.2.4',
'GPy>=1.0.7']
setup(
name='connectopic_mapping',
version='0.3.0',
description='Connectopic mapping',
long_description=long_description,
author='Michele Damian',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='neuroscience connectopic mapping research',
packages=['connectopic_mapping'],
install_requires=install_requires,
cmdclass={'build_ext': build_ext},
ext_modules=[Extension("connectopic_mapping.haak", ["connectopic_mapping/haak.pyx"], include_dirs=[numpy.get_include()])],
)
|
MicheleDamian/ConnectopicMapping
|
setup.py
|
Python
|
apache-2.0
| 1,540 | 0.001299 |
import time
from datetime import datetime, timedelta
from StringIO import StringIO
from django.core.handlers.modpython import ModPythonRequest
from django.core.handlers.wsgi import WSGIRequest, LimitedStream
from django.http import HttpRequest, HttpResponse, parse_cookie, build_request_repr
from django.utils import unittest
from django.utils.http import cookie_date
class RequestsTests(unittest.TestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_httprequest_repr(self):
request = HttpRequest()
request.path = u'/somepath/'
request.GET = {u'get-key': u'get-value'}
request.POST = {u'post-key': u'post-value'}
request.COOKIES = {u'post-key': u'post-value'}
request.META = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<HttpRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<HttpRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': StringIO('')})
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(set(request.META.keys()), set(['PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input']))
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_wsgirequest_repr(self):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': StringIO('')})
request.GET = {u'get-key': u'get-value'}
request.POST = {u'post-key': u'post-value'}
request.COOKIES = {u'post-key': u'post-value'}
request.META = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<WSGIRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<WSGIRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_modpythonrequest(self):
class FakeModPythonRequest(ModPythonRequest):
def __init__(self, *args, **kwargs):
super(FakeModPythonRequest, self).__init__(*args, **kwargs)
self._get = self._post = self._meta = self._cookies = {}
class Dummy:
def get_options(self):
return {}
req = Dummy()
req.uri = 'bogus'
request = FakeModPythonRequest(req)
self.assertEqual(request.path, 'bogus')
self.assertEqual(request.GET.keys(), [])
self.assertEqual(request.POST.keys(), [])
self.assertEqual(request.COOKIES.keys(), [])
self.assertEqual(request.META.keys(), [])
def test_modpythonrequest_repr(self):
class Dummy:
def get_options(self):
return {}
req = Dummy()
req.uri = '/somepath/'
request = ModPythonRequest(req)
request._get = {u'get-key': u'get-value'}
request._post = {u'post-key': u'post-value'}
request._cookies = {u'post-key': u'post-value'}
request._meta = {u'post-key': u'post-value'}
self.assertEqual(repr(request), u"<ModPythonRequest\npath:/somepath/,\nGET:{u'get-key': u'get-value'},\nPOST:{u'post-key': u'post-value'},\nCOOKIES:{u'post-key': u'post-value'},\nMETA:{u'post-key': u'post-value'}>")
self.assertEqual(build_request_repr(request), repr(request))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={u'a': u'b'}, POST_override={u'c': u'd'}, COOKIES_override={u'e': u'f'}, META_override={u'g': u'h'}),
u"<ModPythonRequest\npath:/otherpath/,\nGET:{u'a': u'b'},\nPOST:{u'c': u'd'},\nCOOKIES:{u'e': u'f'},\nMETA:{u'g': u'h'}>")
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid:key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['expires'], 'Sat, 01-Jan-2028 04:05:06 GMT')
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time()+10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertTrue('; httponly' in str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(StringIO('test'), 2)
self.assertEqual(stream.read(), 'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), '')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(StringIO('test'), 2)
self.assertEqual(stream.read(5), 'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), '')
# Read sequentially from a stream
stream = LimitedStream(StringIO('12345678'), 8)
self.assertEqual(stream.read(5), '12345')
self.assertEqual(stream.read(5), '678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), '')
# Read lines from a stream
stream = LimitedStream(StringIO('1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), '1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), '56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), '78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), 'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), 'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), '\n')
# Read everything else.
self.assertEqual(stream.readline(), 'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(StringIO('1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), '1234\n')
self.assertEqual(stream.readline(3), 'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), 'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), '')
# Same test, but with read, not readline.
stream = LimitedStream(StringIO('1234\nabcdef'), 9)
self.assertEqual(stream.read(6), '1234\na')
self.assertEqual(stream.read(2), 'bc')
self.assertEqual(stream.read(2), 'd')
self.assertEqual(stream.read(2), '')
self.assertEqual(stream.read(), '')
def test_stream(self):
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.read(), 'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or raw_post_data.
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.POST, {u'name': [u'value']})
self.assertEqual(request.raw_post_data, 'name=value')
self.assertEqual(request.read(), 'name=value')
def test_value_after_read(self):
"""
Construction of POST or raw_post_data is not allowed after reading
from request.
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(request.read(2), 'na')
self.assertRaises(Exception, lambda: request.raw_post_data)
self.assertEqual(request.POST, {})
def test_raw_post_data_after_POST_multipart(self):
"""
Reading raw_post_data after parsing multipart is not allowed
"""
# Because multipart is used for large amounts fo data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting raw_post_data = '' either.
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
self.assertEqual(request.POST, {u'name': [u'value']})
self.assertRaises(Exception, lambda: request.raw_post_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': StringIO(payload)})
self.assertEqual(request.POST, {})
def test_read_by_lines(self):
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
self.assertEqual(list(request), ['name=value'])
def test_POST_after_raw_post_data_read(self):
"""
POST should be populated even if raw_post_data is read first
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
raw_data = request.raw_post_data
self.assertEqual(request.POST, {u'name': [u'value']})
def test_POST_after_raw_post_data_read_and_stream_read(self):
"""
POST should be populated even if raw_post_data is read first, and then
the stream is read second.
"""
request = WSGIRequest({'REQUEST_METHOD': 'POST', 'wsgi.input': StringIO('name=value')})
raw_data = request.raw_post_data
self.assertEqual(request.read(1), u'n')
self.assertEqual(request.POST, {u'name': [u'value']})
def test_POST_after_raw_post_data_read_and_stream_read_multipart(self):
"""
POST should be populated even if raw_post_data is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = "\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
''])
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': StringIO(payload)})
raw_data = request.raw_post_data
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), u'--boundary\r\nC')
self.assertEqual(request.POST, {u'name': [u'value']})
|
mitsuhiko/django
|
tests/regressiontests/requests/tests.py
|
Python
|
bsd-3-clause
| 15,031 | 0.002329 |
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from anitya.lib.backends import (
BaseBackend, get_versions_by_regex_for_text, REGEX)
from anitya.lib.exceptions import AnityaPluginException
import six
DEFAULT_REGEX = 'href="([0-9][0-9.]*)/"'
class FolderBackend(BaseBackend):
''' The custom class for project having a special hosting.
This backend allows to specify a version_url and a regex that will
be used to retrieve the version information.
'''
name = 'folder'
examples = [
'http://ftp.gnu.org/pub/gnu/gnash/',
'http://subsurface.hohndel.org/downloads/',
]
@classmethod
def get_version(cls, project):
''' Method called to retrieve the latest version of the projects
provided, project that relies on the backend of this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: the latest version found upstream
:return type: str
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the version cannot be retrieved correctly
'''
return cls.get_ordered_versions(project)[-1]
@classmethod
def get_versions(cls, project):
''' Method called to retrieve all the versions (that can be found)
of the projects provided, project that relies on the backend of
this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: a list of all the possible releases found
:return type: list
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the versions cannot be retrieved correctly
'''
url = project.version_url
try:
req = cls.call_url(url, insecure=project.insecure)
except Exception as err:
raise AnityaPluginException(
'Could not call : "%s" of "%s", with error: %s' % (
url, project.name, str(err)))
versions = None
if not isinstance(req, six.string_types):
req = req.text
try:
regex = REGEX % {'name': project.name.replace('+', '\+')}
versions = get_versions_by_regex_for_text(
req, url, regex, project)
except AnityaPluginException:
versions = get_versions_by_regex_for_text(
req, url, DEFAULT_REGEX, project)
return versions
|
pombredanne/anitya
|
anitya/lib/backends/folder.py
|
Python
|
gpl-2.0
| 2,702 | 0.00037 |
#!/usr/bin/env python3
import re
from enum import Enum
diags = []
with open('input.txt', 'r') as f:
diags = f.read().splitlines()
#--- challenge 1
gamma = ""
for i in range(0, len(diags[0])):
zeros = len([x for x in diags if x[i] == "0"])
ones = len([x for x in diags if x[i] == "1"])
gamma += "0" if zeros > ones else "1"
gamma = int(gamma, 2)
epsilon = gamma ^ 0b111111111111
print("Solution to challenge 1: {}".format(gamma * epsilon))
#--- challenge 2
class Rating(Enum):
OXYGEN = 0
CO2 = 1
def get_val(diags, rating):
for i in range(0, len(diags[0])):
zeros = len([x for x in diags if x[i] == "0"])
ones = len(diags) - zeros
if rating == Rating.OXYGEN:
check_val = "0" if zeros > ones else "1"
else:
check_val = "0" if zeros <= ones else "1"
diags = [x for x in diags if x[i] != check_val]
if len(diags) == 1:
return int(diags[0], 2)
oxygen = get_val(diags, Rating.OXYGEN)
co2 = get_val(diags, Rating.CO2)
print("Solution to challenge 2: {}".format(oxygen * co2))
|
jekhokie/scriptbox
|
python--advent-of-code/2021/3/solve.py
|
Python
|
mit
| 1,039 | 0.014437 |
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy import testing
from sqlalchemy.testing.util import function_named
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.schema import Table, Column
class BaseObject(object):
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class Publication(BaseObject):
pass
class Issue(BaseObject):
pass
class Location(BaseObject):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, str(getattr(self, 'issue_id', None)), repr(str(self._name.name)))
def _get_name(self):
return self._name
def _set_name(self, name):
session = create_session()
s = session.query(LocationName).filter(LocationName.name==name).first()
session.expunge_all()
if s is not None:
self._name = s
return
found = False
for i in session.new:
if isinstance(i, LocationName) and i.name == name:
self._name = i
found = True
break
if found == False:
self._name = LocationName(name=name)
name = property(_get_name, _set_name)
class LocationName(BaseObject):
def __repr__(self):
return "%s()" % (self.__class__.__name__)
class PageSize(BaseObject):
def __repr__(self):
return "%s(%sx%s, %s)" % (self.__class__.__name__, self.width, self.height, self.name)
class Magazine(BaseObject):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, repr(self.location), repr(self.size))
class Page(BaseObject):
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self.page_no))
class MagazinePage(Page):
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, str(self.page_no), repr(self.magazine))
class ClassifiedPage(MagazinePage):
pass
class MagazineTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global publication_table, issue_table, location_table, location_name_table, magazine_table, \
page_table, magazine_page_table, classified_page_table, page_size_table
publication_table = Table('publication', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(45), default=''),
)
issue_table = Table('issue', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('publication_id', Integer, ForeignKey('publication.id')),
Column('issue', Integer),
)
location_table = Table('location', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('issue_id', Integer, ForeignKey('issue.id')),
Column('ref', CHAR(3), default=''),
Column('location_name_id', Integer, ForeignKey('location_name.id')),
)
location_name_table = Table('location_name', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(45), default=''),
)
magazine_table = Table('magazine', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('location_id', Integer, ForeignKey('location.id')),
Column('page_size_id', Integer, ForeignKey('page_size.id')),
)
page_table = Table('page', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('page_no', Integer),
Column('type', CHAR(1), default='p'),
)
magazine_page_table = Table('magazine_page', metadata,
Column('page_id', Integer, ForeignKey('page.id'), primary_key=True),
Column('magazine_id', Integer, ForeignKey('magazine.id')),
Column('orders', Text, default=''),
)
classified_page_table = Table('classified_page', metadata,
Column('magazine_page_id', Integer, ForeignKey('magazine_page.page_id'), primary_key=True),
Column('titles', String(45), default=''),
)
page_size_table = Table('page_size', metadata,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('width', Integer),
Column('height', Integer),
Column('name', String(45), default=''),
)
def _generate_round_trip_test(use_unions=False, use_joins=False):
def test_roundtrip(self):
publication_mapper = mapper(Publication, publication_table)
issue_mapper = mapper(Issue, issue_table, properties = {
'publication': relationship(Publication, backref=backref('issues', cascade="all, delete-orphan")),
})
location_name_mapper = mapper(LocationName, location_name_table)
location_mapper = mapper(Location, location_table, properties = {
'issue': relationship(Issue, backref=backref('locations', lazy='joined', cascade="all, delete-orphan")),
'_name': relationship(LocationName),
})
page_size_mapper = mapper(PageSize, page_size_table)
magazine_mapper = mapper(Magazine, magazine_table, properties = {
'location': relationship(Location, backref=backref('magazine', uselist=False)),
'size': relationship(PageSize),
})
if use_unions:
page_join = polymorphic_union(
{
'm': page_table.join(magazine_page_table),
'c': page_table.join(magazine_page_table).join(classified_page_table),
'p': page_table.select(page_table.c.type=='p'),
}, None, 'page_join')
page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_join.c.type, polymorphic_identity='p')
elif use_joins:
page_join = page_table.outerjoin(magazine_page_table).outerjoin(classified_page_table)
page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_table.c.type, polymorphic_identity='p')
else:
page_mapper = mapper(Page, page_table, polymorphic_on=page_table.c.type, polymorphic_identity='p')
if use_unions:
magazine_join = polymorphic_union(
{
'm': page_table.join(magazine_page_table),
'c': page_table.join(magazine_page_table).join(classified_page_table),
}, None, 'page_join')
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=magazine_join.c.page_no))
})
elif use_joins:
magazine_join = page_table.join(magazine_page_table).outerjoin(classified_page_table)
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no))
})
else:
magazine_page_mapper = mapper(MagazinePage, magazine_page_table, inherits=page_mapper, polymorphic_identity='m', properties={
'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no))
})
classified_page_mapper = mapper(ClassifiedPage,
classified_page_table,
inherits=magazine_page_mapper,
polymorphic_identity='c',
primary_key=[page_table.c.id])
session = create_session()
pub = Publication(name='Test')
issue = Issue(issue=46,publication=pub)
location = Location(ref='ABC',name='London',issue=issue)
page_size = PageSize(name='A4',width=210,height=297)
magazine = Magazine(location=location,size=page_size)
page = ClassifiedPage(magazine=magazine,page_no=1)
page2 = MagazinePage(magazine=magazine,page_no=2)
page3 = ClassifiedPage(magazine=magazine,page_no=3)
session.add(pub)
session.flush()
print([x for x in session])
session.expunge_all()
session.flush()
session.expunge_all()
p = session.query(Publication).filter(Publication.name=="Test").one()
print(p.issues[0].locations[0].magazine.pages)
print([page, page2, page3])
assert repr(p.issues[0].locations[0].magazine.pages) == repr([page, page2, page3]), repr(p.issues[0].locations[0].magazine.pages)
test_roundtrip = function_named(
test_roundtrip, "test_%s" % (not use_union and (use_joins and "joins" or "select") or "unions"))
setattr(MagazineTest, test_roundtrip.__name__, test_roundtrip)
for (use_union, use_join) in [(True, False), (False, True), (False, False)]:
_generate_round_trip_test(use_union, use_join)
|
itkovian/sqlalchemy
|
test/orm/inheritance/test_magazine.py
|
Python
|
mit
| 9,316 | 0.008695 |
import os
import sys
import signal
import subprocess
import tempfile
import curses
import visidata
visidata.vd.tstp_signal = None
class SuspendCurses:
'Context manager to leave windowed mode on enter and restore it on exit.'
def __enter__(self):
curses.endwin()
if visidata.vd.tstp_signal:
signal.signal(signal.SIGTSTP, visidata.vd.tstp_signal)
def __exit__(self, exc_type, exc_val, tb):
curses.reset_prog_mode()
visidata.vd.scrFull.refresh()
curses.doupdate()
@visidata.VisiData.api
def launchEditor(vd, *args):
'Launch $EDITOR with *args* as arguments.'
editor = os.environ.get('EDITOR') or vd.fail('$EDITOR not set')
args = editor.split() + list(args)
with SuspendCurses():
return subprocess.call(args)
@visidata.VisiData.api
def launchBrowser(vd, *args):
'Launch $BROWSER with *args* as arguments.'
browser = os.environ.get('BROWSER') or vd.fail('(no $BROWSER) for %s' % args[0])
args = [browser] + list(args)
subprocess.call(args)
@visidata.VisiData.api
def launchExternalEditor(vd, v, linenum=0):
'Launch $EDITOR to edit string *v* starting on line *linenum*.'
import tempfile
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, 'w') as fp:
fp.write(v)
return launchExternalEditorPath(visidata.Path(temp.name), linenum)
def launchExternalEditorPath(path, linenum=0):
'Launch $EDITOR to edit *path* starting on line *linenum*.'
if linenum:
visidata.vd.launchEditor(path, '+%s' % linenum)
else:
visidata.vd.launchEditor(path)
with open(path, 'r') as fp:
try:
return fp.read().rstrip('\n') # trim inevitable trailing newlines
except Exception as e:
visidata.vd.exceptionCaught(e)
return ''
def suspend():
import signal
with SuspendCurses():
os.kill(os.getpid(), signal.SIGSTOP)
def _breakpoint(*args, **kwargs):
import pdb
class VisiDataPdb(pdb.Pdb):
def precmd(self, line):
r = super().precmd(line)
if not r:
SuspendCurses.__exit__(None, None, None, None)
return r
def postcmd(self, stop, line):
if stop:
SuspendCurses.__enter__(None)
return super().postcmd(stop, line)
SuspendCurses.__enter__(None)
VisiDataPdb(nosigint=True).set_trace()
sys.breakpointhook = _breakpoint
visidata.BaseSheet.addCommand('^Z', 'suspend', 'suspend()', 'suspend VisiData process')
visidata.BaseSheet.addCommand('', 'breakpoint', 'breakpoint()', 'drop into pdb REPL')
|
saulpw/visidata
|
visidata/editor.py
|
Python
|
gpl-3.0
| 2,699 | 0.002594 |
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self,
plotly_name="tickvals",
parent_name="scatterternary.marker.colorbar",
**kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_tickvals.py
|
Python
|
mit
| 513 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import copy
from openerp import models
from openerp.addons.account.report.account_financial_report import\
report_account_common
class report_account_common_horizontal(report_account_common):
def __init__(self, cr, uid, name, context=None):
super(report_account_common_horizontal, self).__init__(
cr, uid, name, context=context)
self.localcontext.update({
'get_left_lines': self.get_left_lines,
'get_right_lines': self.get_right_lines,
})
def get_lines(self, data, side=None):
data = copy.deepcopy(data)
if data['form']['used_context'] is None:
data['form']['used_context'] = {}
data['form']['used_context'].update(
account_financial_report_horizontal_side=side)
return super(report_account_common_horizontal, self).get_lines(
data)
def get_left_lines(self, data):
return self.get_lines(data, side='left')
def get_right_lines(self, data):
return self.get_lines(data, side='right')
class ReportFinancial(models.AbstractModel):
_inherit = 'report.account.report_financial'
_wrapped_report_class = report_account_common_horizontal
|
Ehtaga/account-financial-reporting
|
account_financial_report_horizontal/report/report_financial.py
|
Python
|
agpl-3.0
| 2,192 | 0 |
# coding:utf-8
import MySQLdb as mysql
from flask import Flask, request, render_template
import json
app = Flask(__name__)
con = mysql.connect(user="root", passwd="redhat", db="jiangkun")
con.autocommit(True)
cur = con.cursor()
@app.route('/')
def index():
return render_template("index.html")
@app.route('/list')
def list():
sql = "select * from user"
cur.execute(sql)
res_json = json.dumps(cur.fetchall())
print res_json
return res_json
@app.route('/add')
def add():
name = request.args.get('name')
passwd = request.args.get('passwd')
sql = "insert into user (name, passwd) values (%s, %s)" % (name, passwd)
cur.execute(sql)
return "ok"
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, port=9092)
|
seerjk/reboot06
|
09/homework08/flask_web.py
|
Python
|
mit
| 768 | 0.002604 |
"""Utility functions for hc-api-python"""
from datetime import datetime
def get_readable_time_string(seconds):
"""Returns human readable string from number of seconds"""
seconds = int(seconds)
minutes = seconds // 60
seconds = seconds % 60
hours = minutes // 60
minutes = minutes % 60
days = hours // 24
hours = hours % 24
result = ""
if days > 0:
result += "%d %s " % (days, "Day" if (days == 1) else "Days")
if hours > 0:
result += "%d %s " % (hours, "Hour" if (hours == 1) else "Hours")
if minutes > 0:
result += "%d %s " % (minutes, "Minute" if (minutes == 1) else "Minutes")
if seconds > 0:
result += "%d %s " % (seconds, "Second" if (seconds == 1) else "Seconds")
return result.strip()
def get_datetime_from_timestamp(timestamp):
"""Return datetime from unix timestamp"""
try:
return datetime.fromtimestamp(int(timestamp))
except:
return None
def get_rate_limits(response):
"""Returns a list of rate limit information from a given response's headers."""
periods = response.headers['X-RateLimit-Period']
if not periods:
return []
rate_limits = []
periods = periods.split(',')
limits = response.headers['X-RateLimit-Limit'].split(',')
remaining = response.headers['X-RateLimit-Remaining'].split(',')
reset = response.headers['X-RateLimit-Reset'].split(',')
for idx, period in enumerate(periods):
rate_limit = {}
limit_period = get_readable_time_string(period)
rate_limit["period"] = limit_period
rate_limit["period_seconds"] = period
rate_limit["request_limit"] = limits[idx]
rate_limit["requests_remaining"] = remaining[idx]
reset_datetime = get_datetime_from_timestamp(reset[idx])
rate_limit["reset"] = reset_datetime
right_now = datetime.now()
if (reset_datetime is not None) and (right_now < reset_datetime):
# add 1 second because of rounding
seconds_remaining = (reset_datetime - right_now).seconds + 1
else:
seconds_remaining = 0
rate_limit["reset_in_seconds"] = seconds_remaining
rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining)
rate_limits.append(rate_limit)
return rate_limits
|
housecanary/hc-api-python
|
housecanary/utilities.py
|
Python
|
mit
| 2,345 | 0.002132 |
from .manager import Manager
__version__ = '0.2.4'
|
chendx79/Python3HandlerSocket
|
pyhs/__init__.py
|
Python
|
mit
| 51 | 0 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
MsSQL to GCS operator.
"""
import decimal
from airflow.providers.google.cloud.operators.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
from airflow.utils.decorators import apply_defaults
class MSSQLToGCSOperator(BaseSQLToGCSOperator):
"""Copy data from Microsoft SQL Server to Google Cloud Storage
in JSON or CSV format.
:param mssql_conn_id: Reference to a specific MSSQL hook.
:type mssql_conn_id: str
**Example**:
The following operator will export data from the Customers table
within the given MSSQL Database and then upload it to the
'mssql-export' GCS bucket (along with a schema file). ::
export_customers = MsSqlToGoogleCloudStorageOperator(
task_id='export_customers',
sql='SELECT * FROM dbo.Customers;',
bucket='mssql-export',
filename='data/customers/export.json',
schema_filename='schemas/export.json',
mssql_conn_id='mssql_default',
google_cloud_storage_conn_id='google_cloud_default',
dag=dag
)
"""
ui_color = '#e0a98c'
type_map = {
3: 'INTEGER',
4: 'TIMESTAMP',
5: 'NUMERIC'
}
@apply_defaults
def __init__(self,
mssql_conn_id='mssql_default',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.mssql_conn_id = mssql_conn_id
def query(self):
"""
Queries MSSQL and returns a cursor of results.
:return: mssql cursor
"""
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
conn = mssql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
def field_to_bigquery(self, field):
return {
'name': field[0].replace(" ", "_"),
'type': self.type_map.get(field[1], "STRING"),
'mode': "NULLABLE",
}
@classmethod
def convert_type(cls, value, schema_type):
"""
Takes a value from MSSQL, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery.
"""
if isinstance(value, decimal.Decimal):
return float(value)
return value
|
mtagle/airflow
|
airflow/providers/google/cloud/operators/mssql_to_gcs.py
|
Python
|
apache-2.0
| 3,144 | 0.000318 |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from stacks.utils.RMFTestCase import *
from mock.mock import patch
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestKafkaBroker(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "KAFKA/0.8.1/package"
STACK_VERSION = "2.2"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
classname = "KafkaBroker",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/log/kafka',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/var/run/kafka',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/kafka-broker/config',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/tmp/log/dir',
owner = 'kafka',
create_parents = True,
group = 'hadoop',
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
@patch("os.path.islink")
@patch("os.path.realpath")
def test_configure_custom_paths_default(self, realpath_mock, islink_mock):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
classname = "KafkaBroker",
command = "configure",
config_file="default_custom_path_config.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/customdisk/var/log/kafka',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/customdisk/var/run/kafka',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/kafka-broker/config',
owner = 'kafka',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertResourceCalled('Directory', '/tmp/log/dir',
owner = 'kafka',
create_parents = True,
group = 'hadoop',
mode = 0755,
cd_access = 'a',
recursive_ownership = True,
)
self.assertTrue(islink_mock.called)
self.assertTrue(realpath_mock.called)
def test_pre_upgrade_restart(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
classname = "KafkaBroker",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,)
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_23(self, call_mock):
config_file = self.get_src_folder()+"/test/python/stacks/2.2/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/kafka_broker.py",
classname = "KafkaBroker",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,)
self.assertResourceCalled("Link", "/etc/kafka/conf", to="/usr/hdp/current/kafka-broker/conf")
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'kafka', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
|
alexryndin/ambari
|
ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
|
Python
|
apache-2.0
| 7,652 | 0.023523 |
"""
Gauged
https://github.com/chriso/gauged (MIT Licensed)
Copyright 2014 (c) Chris O'Hara <cohara87@gmail.com>
"""
from urlparse import urlparse, parse_qsl
from urllib import unquote
from .mysql import MySQLDriver
from .sqlite import SQLiteDriver
from .postgresql import PostgreSQLDriver
def parse_dsn(dsn_string):
"""Parse a connection string and return the associated driver"""
dsn = urlparse(dsn_string)
scheme = dsn.scheme.split('+')[0]
username = password = host = port = None
host = dsn.netloc
if '@' in host:
username, host = host.split('@')
if ':' in username:
username, password = username.split(':')
password = unquote(password)
username = unquote(username)
if ':' in host:
host, port = host.split(':')
port = int(port)
database = dsn.path.split('?')[0][1:]
query = dsn.path.split('?')[1] if '?' in dsn.path else dsn.query
kwargs = dict(parse_qsl(query, True))
if scheme == 'sqlite':
return SQLiteDriver, [dsn.path], {}
elif scheme == 'mysql':
kwargs['user'] = username or 'root'
kwargs['db'] = database
if port:
kwargs['port'] = port
if host:
kwargs['host'] = host
if password:
kwargs['passwd'] = password
return MySQLDriver, [], kwargs
elif scheme == 'postgresql':
kwargs['user'] = username or 'postgres'
kwargs['database'] = database
if port:
kwargs['port'] = port
if 'unix_socket' in kwargs:
kwargs['host'] = kwargs.pop('unix_socket')
elif host:
kwargs['host'] = host
if password:
kwargs['password'] = password
return PostgreSQLDriver, [], kwargs
else:
raise ValueError('Unknown driver %s' % dsn_string)
def get_driver(dsn_string):
driver, args, kwargs = parse_dsn(dsn_string)
return driver(*args, **kwargs)
|
chriso/gauged
|
gauged/drivers/__init__.py
|
Python
|
mit
| 1,960 | 0 |
#!/usr/bin/python2.5
import sys
import time
import os
import nuke
def launchSubmit():
print("nukeStub(): launch submitter dialog")
submitCmd = "/drd/software/int/bin/launcher.sh -p %s -d %s --launchBlocking farm -o EPA_CMDLINE python2.5 --arg '$ABSUBMIT/nukesubmit/nuke2AB.py'" % (os.environ['DRD_JOB'], os.environ['DRD_DEPT'])
# root.name holds the path to the nuke script
submitCmd += " %s" % nuke.value("root.name")
submitCmd += " %s" % nuke.Root.firstFrame(nuke.root())
submitCmd += " %s" % nuke.Root.lastFrame(nuke.root())
writeNodes = [i for i in nuke.allNodes() if i.Class() == "Write"]
for i in writeNodes:
submitCmd += " %s %s" % (i['name'].value(), nuke.filename(i))
print( "nukeStub(): %s" % submitCmd )
os.system(submitCmd)
menubar = nuke.menu("Nuke")
m = menubar.addMenu("&Render")
m.addCommand("Submit to Farm", "nukeStub.launchSubmit()", "Up")
|
kurikaesu/arsenalsuite
|
cpp/apps/absubmit/nukesubmit/nukeStub.py
|
Python
|
gpl-2.0
| 877 | 0.019384 |
from extractors.extract_website import ExtractWebsite
from datawakestreams.extractors.extractor_bolt import ExtractorBolt
class WebsiteBolt(ExtractorBolt):
name ='website_extractor'
def __init__(self):
ExtractorBolt.__init__(self)
self.extractor = ExtractWebsite()
|
Sotera/Datawake-Legacy
|
memex-datawake-stream/src/datawakestreams/extractors/website_bolt.py
|
Python
|
apache-2.0
| 293 | 0.010239 |
import pycountry
from marshmallow import Schema, fields, ValidationError
def validate_currency_symbol(val):
if val not in [x.letter for x in pycountry.currencies.objects]:
raise ValidationError('Symbol is not valid')
class CategoryTypeField(fields.Field):
def _serialize(self, value, attr, obj):
return {'value': value, 'title': dict(obj.CATEGORY_TYPES).get(value)}
class RecordTypeField(fields.Field):
def _serialize(self, value, attr, obj):
return {'value': value, 'title': dict(obj.RECORD_TYPES).get(value)}
class PaymentMethodField(fields.Field):
def _serialize(self, value, attr, obj):
return {'value': value, 'title': dict(obj.PAYMENT_METHODS).get(value)}
class GroupSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
class UserSchema(Schema):
id = fields.Int(dump_only=True)
email = fields.Email(required=True)
first_name = fields.Str(required=True)
last_name = fields.Str()
password = fields.Str(load_only=True, required=True)
active = fields.Bool()
group = fields.Nested(GroupSchema, dump_only=True)
invite_hash = fields.Str()
date_created = fields.DateTime(dump_only=True)
date_modified = fields.DateTime(dump_only=True)
class CategorySchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
category_type = CategoryTypeField()
parent = fields.Nested('self', dump_only=True, exclude=('parent', ))
parent_id = fields.Int(load_only=True, load_from='parent')
colour = fields.Str(required=True)
logo = fields.Str(required=True)
class GroupCategorySchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
category_type = CategoryTypeField()
group = fields.Nested(GroupSchema, dump_only=True)
parent = fields.Nested('self', dump_only=True, exclude=('parent', ))
parent_id = fields.Int(load_only=True, load_from='parent')
colour = fields.Str(required=True)
logo = fields.Str(required=True)
class GroupCurrencySchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
symbol = fields.Str(
required=True,
validate=validate_currency_symbol
)
date_modified = fields.DateTime()
group = fields.Nested(GroupSchema, dump_only=True)
class AccountSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
currency = fields.Nested(GroupCurrencySchema, dump_only=True)
currency_id = fields.Int(required=True, load_only=True, load_from='currency')
user = fields.Nested(UserSchema, dump_only=True)
class TransactionSchema(Schema):
id = fields.Int(dump_only=True)
amount = fields.Float(required=True)
source_account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name'))
source_account_id = fields.Int(required=True, load_only=True, load_from='source_account')
target_account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name'))
target_account_id = fields.Int(required=True, load_only=True, load_from='target_account')
user = fields.Nested(
UserSchema, dump_only=True, only=('id', 'first_name', 'last_name', 'email')
)
currency = fields.Nested(GroupCurrencySchema, dump_only=True, only=('id', 'name'))
currency_id = fields.Int(required=True, load_only=True, load_from='currency')
description = fields.Str()
date = fields.DateTime()
class RecordSchema(Schema):
id = fields.Int(dump_only=True)
amount = fields.Float(required=True)
description = fields.Str()
record_type = RecordTypeField(required=True)
payment_method = PaymentMethodField()
date = fields.DateTime()
user = fields.Nested(
UserSchema, dump_only=True, only=('id', 'first_name', 'last_name', 'email')
)
account = fields.Nested(AccountSchema, dump_only=True, only=('id', 'name'))
account_id = fields.Int(required=True, load_only=True, load_from='account')
currency = fields.Nested(GroupCurrencySchema, dump_only=True, only=('id', 'name'))
currency_id = fields.Int(required=True, load_only=True, load_from='currency')
transaction = fields.Nested(
TransactionSchema,
dump_only=True,
only=('id', 'source_account', 'target_account', 'amount', 'currency')
)
category = fields.Nested(
GroupCategorySchema, dump_only=True, only=('id', 'name', 'logo', 'colour')
)
category_id = fields.Int(required=True, load_only=True, load_from='category')
class AppSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
secret = fields.Str(required=True, dump_only=True)
user = fields.Nested(UserSchema, dump_only=True)
user_id = fields.Int(required=True, load_only=True, load_from='user')
class TokenSchema(Schema):
email = fields.Email(required=True)
password = fields.Str(load_only=True, required=True)
secret = fields.Str(required=True)
class BalanceSchema(Schema):
cash_flow = fields.Float(required=True)
start_balance = fields.Float()
end_balance = fields.Float()
expense = fields.Float()
income = fields.Float()
date = fields.Date()
record_type = fields.Int()
class DateRangeFilterSchema(Schema):
date_from = fields.Date()
date_to = fields.Date()
class CashFlowSchema(Schema):
cash_flow = fields.Float(required=True)
expense = fields.Float()
income = fields.Float()
date = fields.Date()
class ExpenseSchema(Schema):
amount = fields.Float(required=True)
category_id = fields.Int()
class IncomeSchema(Schema):
amount = fields.Float(required=True)
category_id = fields.Int()
|
monetario/core
|
monetario/serializers.py
|
Python
|
bsd-3-clause
| 5,724 | 0.002271 |
from decimal import Decimal
from electrum.util import (format_satoshis, format_fee_satoshis, parse_URI,
is_hash256_str, chunks)
from . import SequentialTestCase
class TestUtil(SequentialTestCase):
def test_format_satoshis(self):
self.assertEqual("0.00001234", format_satoshis(1234))
def test_format_satoshis_negative(self):
self.assertEqual("-0.00001234", format_satoshis(-1234))
def test_format_fee_float(self):
self.assertEqual("1.7", format_fee_satoshis(1700/1000))
def test_format_fee_decimal(self):
self.assertEqual("1.7", format_fee_satoshis(Decimal("1.7")))
def test_format_fee_precision(self):
self.assertEqual("1.666",
format_fee_satoshis(1666/1000, precision=6))
self.assertEqual("1.7",
format_fee_satoshis(1666/1000, precision=1))
def test_format_satoshis_whitespaces(self):
self.assertEqual(" 0.0001234 ",
format_satoshis(12340, whitespaces=True))
self.assertEqual(" 0.00001234",
format_satoshis(1234, whitespaces=True))
def test_format_satoshis_whitespaces_negative(self):
self.assertEqual(" -0.0001234 ",
format_satoshis(-12340, whitespaces=True))
self.assertEqual(" -0.00001234",
format_satoshis(-1234, whitespaces=True))
def test_format_satoshis_diff_positive(self):
self.assertEqual("+0.00001234",
format_satoshis(1234, is_diff=True))
def test_format_satoshis_diff_negative(self):
self.assertEqual("-0.00001234", format_satoshis(-1234, is_diff=True))
def _do_test_parse_URI(self, uri, expected):
result = parse_URI(uri)
self.assertEqual(expected, result)
def test_parse_URI_address(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma'})
def test_parse_URI_only_address(self):
self._do_test_parse_URI('15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma'})
def test_parse_URI_address_label(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?label=electrum%20test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'label': 'electrum test'})
def test_parse_URI_address_message(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?message=electrum%20test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'message': 'electrum test', 'memo': 'electrum test'})
def test_parse_URI_address_amount(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'amount': 30000})
def test_parse_URI_address_request_url(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?r=http://domain.tld/page?h%3D2a8628fc2fbe',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'r': 'http://domain.tld/page?h=2a8628fc2fbe'})
def test_parse_URI_ignore_args(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?test=test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'test': 'test'})
def test_parse_URI_multiple_args(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.00004&label=electrum-test&message=electrum%20test&test=none&r=http://domain.tld/page',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'amount': 4000, 'label': 'electrum-test', 'message': u'electrum test', 'memo': u'electrum test', 'r': 'http://domain.tld/page', 'test': 'none'})
def test_parse_URI_no_address_request_url(self):
self._do_test_parse_URI('bitcoin:?r=http://domain.tld/page?h%3D2a8628fc2fbe',
{'r': 'http://domain.tld/page?h=2a8628fc2fbe'})
def test_parse_URI_invalid_address(self):
self.assertRaises(BaseException, parse_URI, 'bitcoin:invalidaddress')
def test_parse_URI_invalid(self):
self.assertRaises(BaseException, parse_URI, 'notbitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma')
def test_parse_URI_parameter_polution(self):
self.assertRaises(Exception, parse_URI, 'bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003&label=test&amount=30.0')
def test_is_hash256_str(self):
self.assertTrue(is_hash256_str('09a4c03e3bdf83bbe3955f907ee52da4fc12f4813d459bc75228b64ad08617c7'))
self.assertTrue(is_hash256_str('2A5C3F4062E4F2FCCE7A1C7B4310CB647B327409F580F4ED72CB8FC0B1804DFA'))
self.assertTrue(is_hash256_str('00' * 32))
self.assertFalse(is_hash256_str('00' * 33))
self.assertFalse(is_hash256_str('qweqwe'))
self.assertFalse(is_hash256_str(None))
self.assertFalse(is_hash256_str(7))
def test_chunks(self):
self.assertEqual([[1, 2], [3, 4], [5]],
list(chunks([1, 2, 3, 4, 5], 2)))
with self.assertRaises(ValueError):
list(chunks([1, 2, 3], 0))
|
fujicoin/electrum-fjc
|
electrum/tests/test_util.py
|
Python
|
mit
| 5,385 | 0.003714 |
# -*- coding: utf-8 -*-
"""
Core client, used for all API requests.
"""
import os
import platform
from collections import namedtuple
from plivo.base import ResponseObject
from plivo.exceptions import (AuthenticationError, InvalidRequestError,
PlivoRestError, PlivoServerError,
ResourceNotFoundError, ValidationError)
from plivo.resources import (Accounts, Addresses, Applications, Calls,
Conferences, Endpoints, Identities,
Messages, Powerpacks, Media, Lookup, Brand,Campaign,
Numbers, Pricings, Recordings, Subaccounts, CallFeedback, MultiPartyCalls)
from plivo.resources.live_calls import LiveCalls
from plivo.resources.queued_calls import QueuedCalls
from plivo.resources.regulatory_compliance import EndUsers, ComplianceDocumentTypes, ComplianceDocuments, \
ComplianceRequirements, ComplianceApplications
from plivo.utils import is_valid_mainaccount, is_valid_subaccount
from plivo.version import __version__
from requests import Request, Session
AuthenticationCredentials = namedtuple('AuthenticationCredentials',
'auth_id auth_token')
PLIVO_API = 'https://api.plivo.com'
PLIVO_API_BASE_URI = '/'.join([PLIVO_API, 'v1/Account'])
# Will change these urls before putting this change in production
API_VOICE = 'https://api.plivo.com'
API_VOICE_BASE_URI = '/'.join([API_VOICE, 'v1/Account'])
API_VOICE_FALLBACK_1 = 'https://api.plivo.com'
API_VOICE_FALLBACK_2 = 'https://api.plivo.com'
API_VOICE_BASE_URI_FALLBACK_1 = '/'.join([API_VOICE_FALLBACK_1, 'v1/Account'])
API_VOICE_BASE_URI_FALLBACK_2 = '/'.join([API_VOICE_FALLBACK_2, 'v1/Account'])
CALLINSIGHTS_BASE_URL = 'https://stats.plivo.com'
def get_user_agent():
return 'plivo-python/%s (Python: %s)' % (__version__,
platform.python_version())
def fetch_credentials(auth_id, auth_token):
"""Fetches the right credentials either from params or from environment"""
if not (auth_id and auth_token):
try:
auth_id = os.environ['PLIVO_AUTH_ID']
auth_token = os.environ['PLIVO_AUTH_TOKEN']
except KeyError:
raise AuthenticationError('The Plivo Python SDK '
'could not find your auth credentials.')
if not (is_valid_mainaccount(auth_id) or is_valid_subaccount(auth_id)):
raise AuthenticationError('Invalid auth_id supplied: %s' % auth_id)
return AuthenticationCredentials(auth_id=auth_id, auth_token=auth_token)
class Client(object):
def __init__(self, auth_id=None, auth_token=None, proxies=None, timeout=5):
"""
The Plivo API client.
Deals with all the API requests to be made.
"""
self.base_uri = PLIVO_API_BASE_URI
self.session = Session()
self.session.headers.update({
'User-Agent': get_user_agent(),
'Content-Type': 'application/json',
'Accept': 'application/json',
})
self.session.auth = fetch_credentials(auth_id, auth_token)
self.multipart_session = Session()
self.multipart_session.headers.update({
'User-Agent': get_user_agent(),
'Cache-Control': 'no-cache',
})
self.multipart_session.auth = fetch_credentials(auth_id, auth_token)
self.proxies = proxies
self.timeout = timeout
self.account = Accounts(self)
self.subaccounts = Subaccounts(self)
self.applications = Applications(self)
self.calls = Calls(self)
self.live_calls = LiveCalls(self)
self.queued_calls = QueuedCalls(self)
self.conferences = Conferences(self)
self.endpoints = Endpoints(self)
self.messages = Messages(self)
self.lookup = Lookup(self)
self.numbers = Numbers(self)
self.powerpacks = Powerpacks(self)
self.brand = Brand(self)
self.campaign = Campaign(self)
self.media = Media(self)
self.pricing = Pricings(self)
self.recordings = Recordings(self)
self.addresses = Addresses(self)
self.identities = Identities(self)
self.call_feedback = CallFeedback(self)
self.end_users = EndUsers(self)
self.compliance_document_types = ComplianceDocumentTypes(self)
self.compliance_documents = ComplianceDocuments(self)
self.compliance_requirements = ComplianceRequirements(self)
self.compliance_applications = ComplianceApplications(self)
self.multi_party_calls = MultiPartyCalls(self)
self.voice_retry_count = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.close()
self.multipart_session.close()
def process_response(self,
method,
response,
response_type=None,
objects_type=None):
"""Processes the API response based on the status codes and method used
to access the API
"""
try:
response_json = response.json(
object_hook=lambda x: ResponseObject(x) if isinstance(x, dict) else x)
if response_type:
r = response_type(self, response_json.__dict__)
response_json = r
if 'objects' in response_json and objects_type:
response_json.objects = [
objects_type(self, obj.__dict__)
for obj in response_json.objects
]
except ValueError:
response_json = None
if response.status_code == 400:
if response_json is not None and 'error' in response_json:
raise ValidationError(response_json.error)
raise ValidationError(
'A parameter is missing or is invalid while accessing resource'
'at: {url}'.format(url=response.url))
if response.status_code == 401:
if response_json and 'error' in response_json:
raise AuthenticationError(response_json.error)
raise AuthenticationError(
'Failed to authenticate while accessing resource at: '
'{url}'.format(url=response.url))
if response.status_code == 404:
if response_json and 'error' in response_json:
raise ResourceNotFoundError(response_json.error)
raise ResourceNotFoundError(
'Resource not found at: {url}'.format(url=response.url))
if response.status_code == 405:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'HTTP method "{method}" not allowed to access resource at: '
'{url}'.format(method=method, url=response.url))
if response.status_code == 409:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'Conflict: '
'{url}'.format(url=response.url))
if response.status_code == 422:
if response_json and 'error' in response_json:
raise InvalidRequestError(response_json.error)
raise InvalidRequestError(
'Unprocessable Entity: '
'{url}'.format(url=response.url))
if response.status_code == 500:
if response_json and 'error' in response_json:
raise PlivoServerError(response_json.error)
raise PlivoServerError(
'A server error occurred while accessing resource at: '
'{url}'.format(url=response.url))
if method == 'DELETE':
if response.status_code not in [200, 204]:
raise PlivoRestError('Resource at {url} could not be '
'deleted'.format(url=response.url))
elif response.status_code not in [200, 201, 202, 204, 207]:
raise PlivoRestError(
'Received status code {status_code} for the HTTP method '
'"{method}"'.format(
status_code=response.status_code, method=method))
self.voice_retry_count = 0
return response_json
def create_request(self, method, path=None, data=None, **kwargs):
# The abstraction created by request() and create_request() is moot
# now since several product-specific handling have been aded.
# Requires a refactor.
if 'is_callinsights_request' in kwargs:
url = '/'.join([CALLINSIGHTS_BASE_URL, kwargs['callinsights_request_path']])
req = Request(method, url, **({'params': data} if method == 'GET' else {'json': data}))
elif kwargs.get('is_lookup_request', False):
path = path or []
url = '/'.join(list([str(p) for p in path]))
req = Request(method, url, **({'params': data} if method == 'GET' else {'json': data}))
else:
path = path or []
req = Request(method, '/'.join([self.base_uri, self.session.auth[0]] +
list([str(p) for p in path])) + '/',
**({
'params': data
} if method == 'GET' else {
'json': data
}))
return self.session.prepare_request(req)
def create_multipart_request(self,
method,
path=None,
data=None,
files=None):
path = path or []
data_args = {}
if method == 'GET':
data_args['params'] = data
else:
data_args['data'] = data
try:
if files:
data_args['files'] = files
except Exception as e:
print(e)
url = '/'.join([self.base_uri, self.multipart_session.auth[0]] + list([str(p) for p in path])) + '/'
req = Request(method, url, **data_args)
return self.multipart_session.prepare_request(req)
def send_request(self, request, **kwargs):
if 'session' in kwargs:
session = kwargs['session']
del kwargs['session']
else:
session = self.session
return session.send(
request, proxies=self.proxies, timeout=self.timeout, **kwargs)
def request(self,
method,
path=None,
data=None,
response_type=None,
objects_type=None,
files=None,
**kwargs):
if files is not None:
req = self.create_multipart_request(method, path, data, files)
session = self.multipart_session
else:
if not kwargs.get("is_voice_request", False):
self.base_uri = PLIVO_API_BASE_URI
if data and 'is_callinsights_request' in data:
params_dict = {}
if 'callinsights_request_path' in data:
params_dict['is_callinsights_request'] = data['is_callinsights_request']
params_dict['callinsights_request_path'] = data['callinsights_request_path']
del data['is_callinsights_request']
del data['callinsights_request_path']
req = self.create_request(method, path, data, **params_dict)
elif kwargs.get("is_voice_request", False):
del kwargs["is_voice_request"]
if self.voice_retry_count == 0:
self.base_uri = API_VOICE_BASE_URI
req = self.create_request(method, path, data)
session = self.session
kwargs['session'] = session
response = self.send_request(req, **kwargs)
if response.status_code >= 500:
print('Fallback for URL: {}. Retry {}'.format(response.url, self.voice_retry_count))
self.voice_retry_count += 1
if self.voice_retry_count == 1:
self.base_uri = API_VOICE_BASE_URI_FALLBACK_1
elif self.voice_retry_count == 2:
self.base_uri = API_VOICE_BASE_URI_FALLBACK_2
else:
return self.process_response(method, response, response_type, objects_type)
kwargs["is_voice_request"] = True
return self.request(method, path, data, **kwargs)
return self.process_response(method, response, response_type, objects_type)
elif kwargs.get('is_lookup_request', False):
req = self.create_request(method, path, data, is_lookup_request=True)
del kwargs['is_lookup_request']
else:
req = self.create_request(method, path, data)
session = self.session
kwargs['session'] = session
res = self.send_request(req, **kwargs)
return self.process_response(method, res, response_type, objects_type)
|
plivo/plivo-python
|
plivo/rest/client.py
|
Python
|
mit
| 13,434 | 0.001265 |
import time
import RPi.GPIO as GPIO
# Constants
PULSE_LEN = 0.03 # length of clock motor pulse
A_PIN = 18 # one motor drive pin
B_PIN = 23 # second motor drive pin
# Configure the GPIO pins
GPIO.setmode(GPIO.BCM)
GPIO.setup(A_PIN, GPIO.OUT)
GPIO.setup(B_PIN, GPIO.OUT)
# Glogal variables
positive_polarity = True
period = 2.0 # 2 second tick
last_tick_time = 0 # the time at which last tick occured
def tick():
# Alternate positive and negative pulses
global positive_polarity
if positive_polarity:
pulse(A_PIN, B_PIN)
else:
pulse(B_PIN, A_PIN)
# Flip the polarity ready for the next tick
positive_polarity = not positive_polarity
def pulse(pos_pin, neg_pin):
# Turn on the pulse
GPIO.output(pos_pin, True)
GPIO.output(neg_pin, False)
time.sleep(PULSE_LEN)
# Turn the power off until the next tick
GPIO.output(pos_pin, False)
try:
while True:
t = time.time()
if t > last_tick_time + period:
# its time for the next tick
tick()
last_tick_time = t
finally:
print('Cleaning up GPIO')
GPIO.cleanup()
|
simonmonk/pi_magazine
|
04_analog_clock/analog_clock_24.py
|
Python
|
mit
| 1,066 | 0.025328 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "area"
_path_str = "area.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.area.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.area.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.area.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.area.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.area.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/area/_hoverlabel.py
|
Python
|
mit
| 17,818 | 0.000954 |
# SecuML
# Copyright (C) 2016-2017 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import abc
import numpy as np
import os.path as path
import pandas as pd
import time
from .AnnotationQuery import AnnotationQuery
class AnnotationQueries(object):
def __init__(self, iteration, label):
self.iteration = iteration
self.label = label
self.annotation_queries = []
def run(self):
self.predictions = self.getPredictedProbabilities()
self.runModels()
start_time = time.time()
self.generateAnnotationQueries()
self.generate_queries_time = time.time() - start_time
self.exportAnnotationQueries()
@abc.abstractmethod
def runModels(self):
return
@abc.abstractmethod
def generateAnnotationQueries(self):
return
def generateAnnotationQuery(self, instance_id, predicted_proba,
suggested_label, suggested_family, confidence=None):
return AnnotationQuery(instance_id, predicted_proba,
suggested_label, suggested_family, confidence=confidence)
def getPredictedProbabilities(self):
models_conf = self.iteration.conf.models_conf
if 'binary' in models_conf:
classifier = self.iteration.update_model.models['binary']
predictions = classifier.testing_monitoring.predictions_monitoring.predictions
else:
test_instances = self.iteration.datasets.getTestInstances()
num_instances = test_instances.numInstances()
predictions = pd.DataFrame(
np.zeros((num_instances, 4)),
index=test_instances.ids.getIds(),
columns=['predicted_proba', 'predicted_labels', 'ground_truth', 'scores'])
predictions['predicted_proba'] = [0.5] * num_instances
predictions['predicted_labels'] = [False] * num_instances
predictions['ground_truth'] = test_instances.ground_truth.getLabels()
predictions['scores'] = [0.5] * num_instances
return predictions
def exportAnnotationQueries(self):
iteration_dir = self.iteration.iteration_dir
if iteration_dir is None:
return
filename = path.join(iteration_dir,
'toannotate_' + self.label + '.csv')
with open(filename, 'w') as f:
for i, annotation_query in enumerate(self.annotation_queries):
if i == 0:
annotation_query.displayHeader(f)
annotation_query.export(f)
def annotateAuto(self):
for annotation_query in self.annotation_queries:
annotation_query.annotateAuto(self.iteration, self.label)
def getManualAnnotations(self):
for annotation_query in self.annotation_queries:
annotation_query.getManualAnnotation(self.iteration)
def checkAnnotationQueriesAnswered(self):
for annotation_query in self.annotation_queries:
if not annotation_query.checkAnswered(self.iteration):
return False
return True
def getInstanceIds(self):
return [annotation_query.instance_id
for annotation_query in self.annotation_queries]
|
ah-anssi/SecuML
|
SecuML/core/ActiveLearning/QueryStrategies/AnnotationQueries/AnnotationQueries.py
|
Python
|
gpl-2.0
| 3,837 | 0.001303 |
"""Probit regression class and diagnostics."""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import numpy.linalg as la
import scipy.optimize as op
from scipy.stats import norm, chisqprob
import scipy.sparse as SP
import user_output as USER
import summary_output as SUMMARY
from utils import spdot, spbroadcast
__all__ = ["Probit"]
class BaseProbit(object):
"""
Probit class to do all the computations
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance or spatial weights sparse matrix
aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
Note: Disregards the presence of dummies.
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse2004]_
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in [Kelejian2001]_
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse1998]_
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> x = np.array([dbf.by_col('INC'), dbf.by_col('HOVAL')]).T
>>> x = np.hstack((np.ones(y.shape),x))
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
>>> w.transform='r'
>>> model = BaseProbit((y>40).astype(float), x, w=w)
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
"""
def __init__(self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100):
self.y = y
self.x = x
self.n, self.k = x.shape
self.optim = optim
self.scalem = scalem
self.w = w
self.maxiter = maxiter
par_est, self.warning = self.par_est()
self.betas = np.reshape(par_est[0], (self.k, 1))
self.logl = -float(par_est[1])
@property
def vm(self):
try:
return self._cache['vm']
except AttributeError:
self._cache = {}
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
except KeyError:
H = self.hessian(self.betas)
self._cache['vm'] = -la.inv(H)
return self._cache['vm']
@vm.setter
def vm(self, val):
try:
self._cache['vm'] = val
except AttributeError:
self._cache = {}
self._cache['vm'] = val
@property #could this get packaged into a separate function or something? It feels weird to duplicate this.
def z_stat(self):
try:
return self._cache['z_stat']
except AttributeError:
self._cache = {}
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
except KeyError:
variance = self.vm.diagonal()
zStat = self.betas.reshape(len(self.betas),) / np.sqrt(variance)
rs = {}
for i in range(len(self.betas)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['z_stat'] = rs.values()
return self._cache['z_stat']
@z_stat.setter
def z_stat(self, val):
try:
self._cache['z_stat'] = val
except AttributeError:
self._cache = {}
self._cache['z_stat'] = val
@property
def slopes_std_err(self):
try:
return self._cache['slopes_std_err']
except AttributeError:
self._cache = {}
self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal())
except KeyError:
self._cache['slopes_std_err'] = np.sqrt(self.slopes_vm.diagonal())
return self._cache['slopes_std_err']
@slopes_std_err.setter
def slopes_std_err(self, val):
try:
self._cache['slopes_std_err'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_std_err'] = val
@property
def slopes_z_stat(self):
try:
return self._cache['slopes_z_stat']
except AttributeError:
self._cache = {}
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
except KeyError:
zStat = self.slopes.reshape(
len(self.slopes),) / self.slopes_std_err
rs = {}
for i in range(len(self.slopes)):
rs[i] = (zStat[i], norm.sf(abs(zStat[i])) * 2)
self._cache['slopes_z_stat'] = rs.values()
return self._cache['slopes_z_stat']
@slopes_z_stat.setter
def slopes_z_stat(self, val):
try:
self._cache['slopes_z_stat'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_z_stat'] = val
@property
def xmean(self):
try:
return self._cache['xmean']
except AttributeError:
self._cache = {}
try: #why is this try-accept? can x be a list??
self._cache['xmean'] = np.reshape(sum(self.x) / self.n, (self.k, 1))
except:
self._cache['xmean'] = np.reshape(sum(self.x).toarray() / self.n, (self.k, 1))
except KeyError:
try:
self._cache['xmean'] = np.reshape(sum(self.x) / self.n, (self.k, 1))
except:
self._cache['xmean'] = np.reshape(sum(self.x).toarray() / self.n, (self.k, 1))
return self._cache['xmean']
@xmean.setter
def xmean(self, val):
try:
self._cache['xmean'] = val
except AttributeError:
self._cache = {}
self._cache['xmean'] = val
@property
def xb(self):
try:
return self._cache['xb']
except AttributeError:
self._cache = {}
self._cache['xb'] = spdot(self.x, self.betas)
except KeyError:
self._cache['xb'] = spdot(self.x, self.betas)
return self._cache['xb']
@xb.setter
def xb(self, val):
try:
self._cache['xb'] = val
except AttributeError:
self._cache = {}
self._cache['xb'] = val
@property
def predy(self):
try:
return self._cache['predy']
except AttributeError:
self._cache = {}
self._cache['predy'] = norm.cdf(self.xb)
except KeyError:
self._cache['predy'] = norm.cdf(self.xb)
return self._cache['predy']
@predy.setter
def predy(self, val):
try:
self._cache['predy'] = val
except AttributeError:
self._cache = {}
self._cache['predy'] = val
@property
def predpc(self):
try:
return self._cache['predpc']
except AttributeError:
self._cache = {}
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100.0 * np.sum(predpc) / self.n)
except KeyError:
predpc = abs(self.y - self.predy)
for i in range(len(predpc)):
if predpc[i] > 0.5:
predpc[i] = 0
else:
predpc[i] = 1
self._cache['predpc'] = float(100.0 * np.sum(predpc) / self.n)
return self._cache['predpc']
@predpc.setter
def predpc(self, val):
try:
self._cache['predpc'] = val
except AttributeError:
self._cache = {}
self._cache['predpc'] = val
@property
def phiy(self):
try:
return self._cache['phiy']
except AttributeError:
self._cache = {}
self._cache['phiy'] = norm.pdf(self.xb)
except KeyError:
self._cache['phiy'] = norm.pdf(self.xb)
return self._cache['phiy']
@phiy.setter
def phiy(self, val):
try:
self._cache['phiy'] = val
except AttributeError:
self._cache = {}
self._cache['phiy'] = val
@property
def scale(self):
try:
return self._cache['scale']
except AttributeError:
self._cache = {}
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
elif self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T, self.betas)))
except KeyError:
if self.scalem == 'phimean':
self._cache['scale'] = float(1.0 * np.sum(self.phiy) / self.n)
if self.scalem == 'xmean':
self._cache['scale'] = float(norm.pdf(np.dot(self.xmean.T, self.betas)))
return self._cache['scale']
@scale.setter
def scale(self, val):
try:
self._cache['scale'] = val
except AttributeError:
self._cache = {}
self._cache['scale'] = val
@property
def slopes(self):
try:
return self._cache['slopes']
except AttributeError:
self._cache = {}
self._cache['slopes'] = self.betas[1:] * self.scale
except KeyError:
self._cache['slopes'] = self.betas[1:] * self.scale
return self._cache['slopes']
@slopes.setter
def slopes(self, val):
try:
self._cache['slopes'] = val
except AttributeError:
self._cache = {}
self._cache['slopes'] = val
@property
def slopes_vm(self):
try:
return self._cache['slopes_vm']
except AttributeError:
self._cache = {}
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
except KeyError:
x = self.xmean
b = self.betas
dfdb = np.eye(self.k) - spdot(b.T, x) * spdot(b, x.T)
slopes_vm = (self.scale ** 2) * \
np.dot(np.dot(dfdb, self.vm), dfdb.T)
self._cache['slopes_vm'] = slopes_vm[1:, 1:]
return self._cache['slopes_vm']
@slopes_vm.setter
def slopes_vm(self, val):
try:
self._cache['slopes_vm'] = val
except AttributeError:
self._cache = {}
self._cache['slopes_vm'] = val
@property
def LR(self):
try:
return self._cache['LR']
except AttributeError:
self._cache = {}
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
except KeyError:
P = 1.0 * np.sum(self.y) / self.n
LR = float(
-2 * (self.n * (P * np.log(P) + (1 - P) * np.log(1 - P)) - self.logl))
self._cache['LR'] = (LR, chisqprob(LR, self.k))
return self._cache['LR']
@LR.setter
def LR(self, val):
try:
self._cache['LR'] = val
except AttributeError:
self._cache = {}
self._cache['LR'] = val
@property
def u_naive(self):
try:
return self._cache['u_naive']
except AttributeError:
self._cache = {}
self._cache['u_naive'] = self.y - self.predy
except KeyError:
u_naive = self.y - self.predy
self._cache['u_naive'] = u_naive
return self._cache['u_naive']
@u_naive.setter
def u_naive(self, val):
try:
self._cache['u_naive'] = val
except AttributeError:
self._cache = {}
self._cache['u_naive'] = val
@property
def u_gen(self):
try:
return self._cache['u_gen']
except AttributeError:
self._cache = {}
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
except KeyError:
Phi_prod = self.predy * (1 - self.predy)
u_gen = self.phiy * (self.u_naive / Phi_prod)
self._cache['u_gen'] = u_gen
return self._cache['u_gen']
@u_gen.setter
def u_gen(self, val):
try:
self._cache['u_gen'] = val
except AttributeError:
self._cache = {}
self._cache['u_gen'] = val
@property
def Pinkse_error(self):
try:
return self._cache['Pinkse_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['Pinkse_error']
@Pinkse_error.setter
def Pinkse_error(self, val):
try:
self._cache['Pinkse_error'] = val
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'] = val
@property
def KP_error(self):
try:
return self._cache['KP_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['KP_error']
@KP_error.setter
def KP_error(self, val):
try:
self._cache['KP_error'] = val
except AttributeError:
self._cache = {}
self._cache['KP_error'] = val
@property
def PS_error(self):
try:
return self._cache['PS_error']
except AttributeError:
self._cache = {}
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
except KeyError:
self._cache['Pinkse_error'], self._cache[
'KP_error'], self._cache['PS_error'] = sp_tests(self)
return self._cache['PS_error']
@PS_error.setter
def PS_error(self, val):
try:
self._cache['PS_error'] = val
except AttributeError:
self._cache = {}
self._cache['PS_error'] = val
def par_est(self):
start = np.dot(la.inv(spdot(self.x.T, self.x)),
spdot(self.x.T, self.y))
flogl = lambda par: -self.ll(par)
if self.optim == 'newton':
fgrad = lambda par: self.gradient(par)
fhess = lambda par: self.hessian(par)
par_hat = newton(flogl, start, fgrad, fhess, self.maxiter)
warn = par_hat[2]
else:
fgrad = lambda par: -self.gradient(par)
if self.optim == 'bfgs':
par_hat = op.fmin_bfgs(
flogl, start, fgrad, full_output=1, disp=0)
warn = par_hat[6]
if self.optim == 'ncg':
fhess = lambda par: -self.hessian(par)
par_hat = op.fmin_ncg(
flogl, start, fgrad, fhess=fhess, full_output=1, disp=0)
warn = par_hat[5]
if warn > 0:
warn = True
else:
warn = False
return par_hat, warn
def ll(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
ll = sum(np.log(norm.cdf(qxb)))
return ll
def gradient(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
qxb = q * spdot(self.x, beta)
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
gradient = spdot(lamb.T, self.x)[0]
return gradient
def hessian(self, par):
beta = np.reshape(np.array(par), (self.k, 1))
q = 2 * self.y - 1
xb = spdot(self.x, beta)
qxb = q * xb
lamb = q * norm.pdf(qxb) / norm.cdf(qxb)
hessian = spdot(self.x.T, spbroadcast(self.x,-lamb * (lamb + xb)))
return hessian
class Probit(BaseProbit):
"""
Classic non-spatial Probit and spatial diagnostics. The class includes a
printout that formats all the results and tests in a nice format.
The diagnostics for spatial dependence currently implemented are:
* Pinkse Error [Pinkse2004]_
* Kelejian and Prucha Moran's I [Kelejian2001]_
* Pinkse & Slade Error [Pinkse1998]_
Parameters
----------
x : array
nxk array of independent variables (assumed to be aligned with y)
y : array
nx1 array of dependent binary variable
w : W
PySAL weights instance aligned with y
optim : string
Optimization method.
Default: 'newton' (Newton-Raphson).
Alternatives: 'ncg' (Newton-CG), 'bfgs' (BFGS algorithm)
scalem : string
Method to calculate the scale of the marginal effects.
Default: 'phimean' (Mean of individual marginal effects)
Alternative: 'xmean' (Marginal effects at variables mean)
maxiter : int
Maximum number of iterations until optimizer stops
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
y : array
nx1 array of dependent variable
betas : array
kx1 array with estimated coefficients
predy : array
nx1 array of predicted y values
n : int
Number of observations
k : int
Number of variables
vm : array
Variance-covariance matrix (kxk)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xmean : array
Mean of the independent variables (kx1)
predpc : float
Percent of y correctly predicted
logl : float
Log-Likelihhod of the estimation
scalem : string
Method to calculate the scale of the marginal effects.
scale : float
Scale of the marginal effects.
slopes : array
Marginal effects of the independent variables (k-1x1)
slopes_vm : array
Variance-covariance matrix of the slopes (k-1xk-1)
LR : tuple
Likelihood Ratio test of all coefficients = 0
(test statistics, p-value)
Pinkse_error: float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse2004]_
KP_error : float
Moran's I type test against spatial error correlation.
Implemented as presented in [Kelejian2001]_
PS_error : float
Lagrange Multiplier test against spatial error correlation.
Implemented as presented in [Pinkse1998]_
warning : boolean
if True Maximum number of iterations exceeded or gradient
and/or function calls not changing.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> dbf = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Extract the CRIME column (crime) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept. Since we want to run a probit model and for this
example we use the Columbus data, we also need to transform the continuous
CRIME variable into a binary variable. As in [McMillen1992]_, we define
y = 1 if CRIME > 40.
>>> y = np.array([dbf.by_col('CRIME')]).T
>>> y = (y>40).astype(float)
Extract HOVAL (home values) and INC (income) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> names_to_extract = ['INC', 'HOVAL']
>>> x = np.array([dbf.by_col(name) for name in names_to_extract]).T
Since we want to the test the probit model for spatial dependence, we need to
specify the spatial weights matrix that includes the spatial configuration of
the observations into the error component of the model. To do that, we can open
an already existing gal file or create a new one. In this case, we will use
``columbus.gal``, which contains contiguity relationships between the
observations in the Columbus dataset we are using throughout this example.
Note that, in order to read the file, not only to open it, we need to
append '.read()' at the end of the command.
>>> w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. In PySAL, this
can be easily performed in the following way:
>>> w.transform='r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = Probit(y, x, w=w, name_y='crime', name_x=['income','home value'], name_ds='columbus', name_w='columbus.gal')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them.
>>> np.around(model.betas, decimals=6)
array([[ 3.353811],
[-0.199653],
[-0.029514]])
>>> np.around(model.vm, decimals=6)
array([[ 0.852814, -0.043627, -0.008052],
[-0.043627, 0.004114, -0.000193],
[-0.008052, -0.000193, 0.00031 ]])
Since we have provided a spatial weigths matrix, the diagnostics for
spatial dependence have also been computed. We can access them and their
p-values individually:
>>> tests = np.array([['Pinkse_error','KP_error','PS_error']])
>>> stats = np.array([[model.Pinkse_error[0],model.KP_error[0],model.PS_error[0]]])
>>> pvalue = np.array([[model.Pinkse_error[1],model.KP_error[1],model.PS_error[1]]])
>>> print np.hstack((tests.T,np.around(np.hstack((stats.T,pvalue.T)),6)))
[['Pinkse_error' '3.131719' '0.076783']
['KP_error' '1.721312' '0.085194']
['PS_error' '2.558166' '0.109726']]
Or we can easily obtain a full summary of all the results nicely formatted and
ready to be printed simply by typing 'print model.summary'
"""
def __init__(
self, y, x, w=None, optim='newton', scalem='phimean', maxiter=100,
vm=False, name_y=None, name_x=None, name_w=None, name_ds=None,
spat_diag=False):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
if w != None:
USER.check_weights(w, y)
spat_diag = True
ws = w.sparse
else:
ws = None
x_constant = USER.check_constant(x)
BaseProbit.__init__(self, y=y, x=x_constant, w=ws,
optim=optim, scalem=scalem, maxiter=maxiter)
self.title = "CLASSIC PROBIT ESTIMATOR"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.Probit(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def newton(flogl, start, fgrad, fhess, maxiter):
"""
Calculates the Newton-Raphson method
Parameters
----------
flogl : lambda
Function to calculate the log-likelihood
start : array
kx1 array of starting values
fgrad : lambda
Function to calculate the gradient
fhess : lambda
Function to calculate the hessian
maxiter : int
Maximum number of iterations until optimizer stops
"""
warn = 0
iteration = 0
par_hat0 = start
m = 1
while (iteration < maxiter and m >= 1e-04):
H = -la.inv(fhess(par_hat0))
g = fgrad(par_hat0).reshape(start.shape)
Hg = np.dot(H, g)
par_hat0 = par_hat0 + Hg
iteration += 1
m = np.dot(g.T, Hg)
if iteration == maxiter:
warn = 1
logl = flogl(par_hat0)
return (par_hat0, logl, warn)
def sp_tests(reg):
"""
Calculates tests for spatial dependence in Probit models
Parameters
----------
reg : regression object
output instance from a probit model
"""
if reg.w != None:
try:
w = reg.w.sparse
except:
w = reg.w
Phi = reg.predy
phi = reg.phiy
# Pinkse_error:
Phi_prod = Phi * (1 - Phi)
u_naive = reg.u_naive
u_gen = reg.u_gen
sig2 = np.sum((phi * phi) / Phi_prod) / reg.n
LM_err_num = np.dot(u_gen.T, (w * u_gen)) ** 2
trWW = np.sum((w * w).diagonal())
trWWWWp = trWW + np.sum((w * w.T).diagonal())
LM_err = float(1.0 * LM_err_num / (sig2 ** 2 * trWWWWp))
LM_err = np.array([LM_err, chisqprob(LM_err, 1)])
# KP_error:
moran = moran_KP(reg.w, u_naive, Phi_prod)
# Pinkse-Slade_error:
u_std = u_naive / np.sqrt(Phi_prod)
ps_num = np.dot(u_std.T, (w * u_std)) ** 2
trWpW = np.sum((w.T * w).diagonal())
ps = float(ps_num / (trWW + trWpW))
# chi-square instead of bootstrap.
ps = np.array([ps, chisqprob(ps, 1)])
else:
raise Exception, "W matrix must be provided to calculate spatial tests."
return LM_err, moran, ps
def moran_KP(w, u, sig2i):
"""
Calculates Moran-flavoured tests
Parameters
----------
w : W
PySAL weights instance aligned with y
u : array
nx1 array of naive residuals
sig2i : array
nx1 array of individual variance
"""
try:
w = w.sparse
except:
pass
moran_num = np.dot(u.T, (w * u))
E = SP.lil_matrix(w.get_shape())
E.setdiag(sig2i.flat)
E = E.asformat('csr')
WE = w * E
moran_den = np.sqrt(np.sum((WE * WE + (w.T * E) * WE).diagonal()))
moran = float(1.0 * moran_num / moran_den)
moran = np.array([moran, norm.sf(abs(moran)) * 2.])
return moran
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('CRIME')]).T
var_x = ['INC', 'HOVAL']
x = np.array([dbf.by_col(name) for name in var_x]).T
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
probit1 = Probit(
(y > 40).astype(float), x, w=w, name_x=var_x, name_y="CRIME",
name_ds="Columbus", name_w="columbus.dbf")
print probit1.summary
|
pastephens/pysal
|
pysal/spreg/probit.py
|
Python
|
bsd-3-clause
| 34,383 | 0.002501 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations:
"""DdosCustomPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs: Any
) -> "_models.DdosCustomPolicy":
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs: Any
) -> "_models.DdosCustomPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.DdosCustomPolicy"]:
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.DdosCustomPolicy":
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_07_01/aio/operations/_ddos_custom_policies_operations.py
|
Python
|
mit
| 20,544 | 0.004965 |
from .MidiOutFile import MidiOutFile
from .MidiInFile import MidiInFile
"""
This is an example of the smallest possible type 0 midi file, where
all the midi events are in the same track.
"""
class Transposer(MidiOutFile):
"Transposes all notes by 1 octave"
def _transp(self, ch, note):
if ch != 9: # not the drums!
note += 12
if note > 127:
note = 127
return note
def note_on(self, channel=0, note=0x40, velocity=0x40):
note = self._transp(channel, note)
MidiOutFile.note_on(self, channel, note, velocity)
def note_off(self, channel=0, note=0x40, velocity=0x40):
note = self._transp(channel, note)
MidiOutFile.note_off(self, channel, note, velocity)
out_file = 'midiout/transposed.mid'
midi_out = Transposer(out_file)
#in_file = 'midiout/minimal_type0.mid'
#in_file = 'test/midifiles/Lola.mid'
in_file = 'test/midifiles/tennessee_waltz.mid'
midi_in = MidiInFile(midi_out, in_file)
midi_in.read()
|
JonathanRaiman/Dali
|
data/score_informed_transcription/midi/example_transpose_octave.py
|
Python
|
mit
| 1,038 | 0.010597 |
# region Description
"""
nmap_scanner.py: Scan local network with NMAP
Author: Vladimir Ivanov
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from raw_packet.Utils.base import Base
import xml.etree.ElementTree as ET
import subprocess as sub
from tempfile import gettempdir
from os.path import isfile, join
from os import remove
from typing import Union, List, Dict, NamedTuple
from collections import namedtuple
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Development'
# endregion
# region Main class - NmapScanner
class NmapScanner:
# region Variables
_base: Base = Base(admin_only=True, available_platforms=['Linux', 'Darwin', 'Windows'])
try:
Info = namedtuple(typename='Info', field_names='vendor, os, mac_address, ipv4_address, ports',
defaults=('', '', '', '', []))
except TypeError:
Info = namedtuple(typename='Info', field_names='vendor, os, mac_address, ipv4_address, ports')
# endregion
# region Init
def __init__(self, network_interface: str):
self._your: Dict[str, Union[None, str]] = \
self._base.get_interface_settings(interface_name=network_interface,
required_parameters=['mac-address', 'ipv4-address',
'first-ipv4-address', 'last-ipv4-address'])
self.local_network: str = \
self._your['first-ipv4-address'] + '-' + \
self._your['last-ipv4-address'].split('.')[3]
if self._base.get_platform().startswith('Darwin'):
self._nmap_scan_result: str = '/tmp/nmap_scan.xml'
else:
self._nmap_scan_result: str = join(gettempdir(), 'nmap_scan.xml')
# endregion
# region Find devices in local network with nmap
def scan(self,
exit_on_failure: bool = True,
quiet: bool = False) -> Union[None, List[NamedTuple]]:
try:
# region Variables
network_devices: List[NamedTuple] = list()
ipv4_address: str = ''
mac_address: str = ''
vendor: str = ''
os: str = ''
ports: List[int] = list()
# endregion
nmap_command: str = 'nmap ' + self.local_network + \
' --open -n -O --osscan-guess -T5 -oX ' + self._nmap_scan_result
if not quiet:
self._base.print_info('Start nmap scan: ', nmap_command)
if self._base.get_platform().startswith('Windows'):
nmap_process = sub.Popen(nmap_command, shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
else:
nmap_process = sub.Popen([nmap_command], shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)
nmap_process.wait()
assert isfile(self._nmap_scan_result), \
'Not found nmap scan result file: ' + self._base.error_text(self._nmap_scan_result)
nmap_report = ET.parse(self._nmap_scan_result)
root_tree = nmap_report.getroot()
for element in root_tree:
try:
assert element.tag == 'host'
state = element.find('status').attrib['state']
assert state == 'up'
# region Address
for address in element.findall('address'):
if address.attrib['addrtype'] == 'ipv4':
ipv4_address = address.attrib['addr']
if address.attrib['addrtype'] == 'mac':
mac_address = address.attrib['addr'].lower()
try:
vendor = address.attrib['vendor']
except KeyError:
pass
# endregion
# region Open TCP ports
for ports_info in element.find('ports'):
if ports_info.tag == 'port':
ports.append(ports_info.attrib['portid'])
# endregion
# region OS
for os_info in element.find('os'):
if os_info.tag == 'osmatch':
try:
os = os_info.attrib['name']
except TypeError:
pass
break
# endregion
network_devices.append(self.Info(vendor=vendor, os=os, mac_address=mac_address,
ipv4_address=ipv4_address, ports=ports))
except AssertionError:
pass
remove(self._nmap_scan_result)
assert len(network_devices) != 0, \
'Could not find any devices on interface: ' + self._base.error_text(self._your['network-interface'])
return network_devices
except OSError:
self._base.print_error('Something went wrong while trying to run ', 'nmap')
if exit_on_failure:
exit(2)
except KeyboardInterrupt:
self._base.print_info('Exit')
exit(0)
except AssertionError as Error:
self._base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# endregion
|
Vladimir-Ivanov-Git/raw-packet
|
raw_packet/Scanners/nmap_scanner.py
|
Python
|
mit
| 5,743 | 0.002438 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v10.services.types import remarketing_action_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class RemarketingActionServiceTransport(abc.ABC):
"""Abstract transport class for RemarketingActionService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_remarketing_actions: gapic_v1.method.wrap_method(
self.mutate_remarketing_actions,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def mutate_remarketing_actions(
self,
) -> Callable[
[remarketing_action_service.MutateRemarketingActionsRequest],
Union[
remarketing_action_service.MutateRemarketingActionsResponse,
Awaitable[
remarketing_action_service.MutateRemarketingActionsResponse
],
],
]:
raise NotImplementedError()
__all__ = ("RemarketingActionServiceTransport",)
|
googleads/google-ads-python
|
google/ads/googleads/v10/services/services/remarketing_action_service/transports/base.py
|
Python
|
apache-2.0
| 6,058 | 0.000495 |
import math
import string
from Conundrum.utils import sanitize
letter_to_value = dict(zip('z' + string.ascii_lowercase, range(0, 27)))
value_to_letter = dict(zip(range(0, 27), 'z' + string.ascii_lowercase))
def encrypt(msg: str, key: str) -> str:
msg = sanitize(msg)
key = sanitize(key)
repeat = int(math.ceil(len(msg) / len(key)))
key = key * repeat
return ''.join([value_to_letter[(letter_to_value[msg_letter] +
letter_to_value[key_letter]) % 26]
for msg_letter, key_letter in zip(msg, key)])
def decrypt(msg: str, key: str) -> str:
msg = sanitize(msg)
key = sanitize(key)
repeat = int(math.ceil(len(msg) / len(key)))
key = key * repeat
return ''.join([value_to_letter[(letter_to_value[msg_letter] -
letter_to_value[key_letter]) % 26]
for msg_letter, key_letter in zip(msg, key)])
if __name__ == '__main__':
# Used in Movies 1
encrypted_msg = 'oape dhzoawx cz hny'
guessed_key = 'plum scarlett green mustard'
print(decrypt(encrypted_msg, guessed_key))
# Used in Movies 3
# decrypted_msg = 'metropolis'
# film_key = 'Close Encounters Of The Third Kind'
# print(encrypt(decrypted_msg, film_key))
|
physicalattraction/kerstpuzzel
|
src/Conundrum/key_cipher.py
|
Python
|
mit
| 1,290 | 0.000775 |
#!/usr/bin/env python
# pylint: disable=invalid-name
"""
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
"""
import sys
from problembaseclass import ProblemBaseClass
class Problem5(ProblemBaseClass):
"""
@class Solution for Problem 5
@brief
"""
def __init__(self, range):
self.result = None
self.range = range
def compute(self):
notfound=True
val=0
while(notfound):
notfound = False
val = val + 1
for n in range(1, self.range):
if (val % n):
notfound = True
self.result = val
if __name__ == '__main__':
problem = Problem5(10)
problem.compute()
print problem.result
del problem
problem = Problem5(20)
problem.compute()
print problem.result #232792560
del problem
|
jakubczaplicki/projecteuler
|
problem005.py
|
Python
|
mit
| 1,000 | 0.008 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2010 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Contains the AutoCompletor class."""
import gobject
import re
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=lambda: None):
self.__factory = default_factory
def __getitem__(self, key):
if key in self:
return super(defaultdict, self).__getitem__(key)
else:
return self.__factory()
from virtaal.controllers.baseplugin import BasePlugin
from virtaal.views.widgets.textbox import TextBox
class AutoCompletor(object):
"""
Does auto-completion of registered words in registered widgets.
"""
wordsep_re = re.compile(r'\W+', re.UNICODE)
MAX_WORDS = 10000
DEFAULT_COMPLETION_LENGTH = 4 # The default minimum length of a word that may
# be auto-completed.
def __init__(self, main_controller, word_list=[], comp_len=DEFAULT_COMPLETION_LENGTH):
"""Constructor.
@type word_list: iterable
@param word_list: A list of words that should be auto-completed."""
self.main_controller = main_controller
assert isinstance(word_list, list)
self.comp_len = comp_len
self._word_list = []
self._word_freq = defaultdict(lambda: 0)
self.add_words(word_list)
self.widgets = set()
def add_widget(self, widget):
"""Add a widget to the list of widgets to do auto-completion for."""
if widget in self.widgets:
return # Widget already added
if isinstance(widget, TextBox):
self._add_text_box(widget)
return
raise ValueError("Widget type %s not supported." % (type(widget)))
def add_words(self, words, update=True):
"""Add a word or words to the list of words to auto-complete."""
for word in words:
if self.isusable(word):
self._word_freq[word] += 1
if update:
self._update_word_list()
def add_words_from_units(self, units):
"""Collect all words from the given translation units to use for
auto-completion.
@type units: list
@param units: The translation units to collect words from.
"""
for unit in units:
target = unit.target
if not target:
continue
self.add_words(self.wordsep_re.split(target), update=False)
if len(self._word_freq) > self.MAX_WORDS:
break
self._update_word_list()
def autocomplete(self, word):
for w in self._word_list:
if w.startswith(word):
return w, w[len(word):]
return None, u''
def clear_widgets(self):
"""Release all registered widgets from the spell of auto-completion."""
for w in set(self.widgets):
self.remove_widget(w)
def clear_words(self):
"""Remove all registered words; effectively turns off auto-completion."""
self._word_freq = []
self._word_list = defaultdict(lambda: 0)
def isusable(self, word):
"""Returns a value indicating if the given word should be kept as a
suggestion for autocomplete."""
return len(word) > self.comp_len + 2
def remove_widget(self, widget):
"""Remove a widget (currently only L{TextBox}s are accepted) from
the list of widgets to do auto-correction for.
"""
if isinstance(widget, TextBox) and widget in self.widgets:
self._remove_textbox(widget)
def remove_words(self, words):
"""Remove a word or words from the list of words to auto-complete."""
if isinstance(words, basestring):
del self._word_freq[words]
self._word_list.remove(words)
else:
for w in words:
try:
del self._word_freq[w]
self._word_list.remove(w)
except KeyError:
pass
def _add_text_box(self, textbox):
"""Add the given L{TextBox} to the list of widgets to do auto-
correction on."""
if not hasattr(self, '_textbox_insert_ids'):
self._textbox_insert_ids = {}
handler_id = textbox.connect('text-inserted', self._on_insert_text)
self._textbox_insert_ids[textbox] = handler_id
self.widgets.add(textbox)
def _on_insert_text(self, textbox, text, offset, elem):
if not isinstance(text, basestring) or self.wordsep_re.match(text):
return
# We are only interested in single character insertions, otherwise we
# react similarly for paste and similar events
if len(text.decode('utf-8')) > 1:
return
prefix = unicode(textbox.get_text(0, offset) + text)
postfix = unicode(textbox.get_text(offset))
buffer = textbox.buffer
# Quick fix to check that we don't autocomplete in the middle of a word.
right_lim = len(postfix) > 0 and postfix[0] or ' '
if not self.wordsep_re.match(right_lim):
return
lastword = self.wordsep_re.split(prefix)[-1]
if len(lastword) >= self.comp_len:
completed_word, word_postfix = self.autocomplete(lastword)
if completed_word == lastword:
return
if completed_word:
# Updating of the buffer is deferred until after this signal
# and its side effects are taken care of. We abuse
# gobject.idle_add for that.
insert_offset = offset + len(text)
def suggest_completion():
textbox.handler_block(self._textbox_insert_ids[textbox])
#logging.debug("textbox.suggestion = {'text': u'%s', 'offset': %d}" % (word_postfix, insert_offset))
textbox.suggestion = {'text': word_postfix, 'offset': insert_offset}
textbox.handler_unblock(self._textbox_insert_ids[textbox])
sel_iter_start = buffer.get_iter_at_offset(insert_offset)
sel_iter_end = buffer.get_iter_at_offset(insert_offset + len(word_postfix))
buffer.select_range(sel_iter_start, sel_iter_end)
return False
gobject.idle_add(suggest_completion, priority=gobject.PRIORITY_HIGH)
def _remove_textbox(self, textbox):
"""Remove the given L{TextBox} from the list of widgets to do
auto-correction on.
"""
if not hasattr(self, '_textbox_insert_ids'):
return
# Disconnect the "insert-text" event handler
textbox.disconnect(self._textbox_insert_ids[textbox])
self.widgets.remove(textbox)
def _update_word_list(self):
"""Update and sort found words according to frequency."""
wordlist = self._word_freq.items()
wordlist.sort(key=lambda x:x[1], reverse=True)
self._word_list = [items[0] for items in wordlist]
class Plugin(BasePlugin):
description = _('Automatically complete long words while you type')
display_name = _('AutoCompletor')
version = 0.1
# INITIALIZERS #
def __init__(self, internal_name, main_controller):
self.internal_name = internal_name
self.main_controller = main_controller
self._init_plugin()
def _init_plugin(self):
from virtaal.common import pan_app
self.autocomp = AutoCompletor(self.main_controller)
self._store_loaded_id = self.main_controller.store_controller.connect('store-loaded', self._on_store_loaded)
if self.main_controller.store_controller.get_store():
# Connect to already loaded store. This happens when the plug-in is enabled after loading a store.
self._on_store_loaded(self.main_controller.store_controller)
self._unitview_id = None
unitview = self.main_controller.unit_controller.view
if unitview.targets:
self._connect_to_textboxes(unitview, unitview.targets)
else:
self._unitview_id = unitview.connect('targets-created', self._connect_to_textboxes)
def _connect_to_textboxes(self, unitview, textboxes):
for target in textboxes:
self.autocomp.add_widget(target)
# METHDOS #
def destroy(self):
"""Remove all signal-connections."""
self.autocomp.clear_words()
self.autocomp.clear_widgets()
self.main_controller.store_controller.disconnect(self._store_loaded_id)
if getattr(self, '_cursor_changed_id', None):
self.store_cursor.disconnect(self._cursor_changed_id)
if self._unitview_id:
self.main_controller.unit_controller.view.disconnect(self._unitview_id)
# EVENT HANDLERS #
def _on_cursor_change(self, cursor):
def add_widgets():
if hasattr(self, 'lastunit'):
if self.lastunit.hasplural():
for target in self.lastunit.target:
if target:
#logging.debug('Adding words: %s' % (self.autocomp.wordsep_re.split(unicode(target))))
self.autocomp.add_words(self.autocomp.wordsep_re.split(unicode(target)))
else:
if self.lastunit.target:
#logging.debug('Adding words: %s' % (self.autocomp.wordsep_re.split(unicode(self.lastunit.target))))
self.autocomp.add_words(self.autocomp.wordsep_re.split(unicode(self.lastunit.target)))
self.lastunit = cursor.deref()
gobject.idle_add(add_widgets)
def _on_store_loaded(self, storecontroller):
self.autocomp.add_words_from_units(storecontroller.get_store().get_units())
if hasattr(self, '_cursor_changed_id'):
self.store_cursor.disconnect(self._cursor_changed_id)
self.store_cursor = storecontroller.cursor
self._cursor_changed_id = self.store_cursor.connect('cursor-changed', self._on_cursor_change)
self._on_cursor_change(self.store_cursor)
|
elric/virtaal-debian-snapshots
|
virtaal/plugins/autocompletor.py
|
Python
|
gpl-2.0
| 10,937 | 0.002834 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.