repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
flyingbanana1024102/transmission-line-simulator
|
src/views/materialwidget.py
|
1
|
2230
|
#
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: Jun-28-2017
#
from kivy.uix.widget import Widget
from kivy.properties import *
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from kivy.graphics import *
from PIL import Image, ImageDraw, ImageFilter
class MaterialWidget(Widget):
"""
The basic UI element layout, automatically draws and updates its shadows.
raised: whether this widget has an edge and shadow.
"""
keyShadowTexture = ObjectProperty(None)
ambientShadowTexture = ObjectProperty(None)
raised = BooleanProperty(True)
clipSubviews = BooleanProperty(False)
elevation = NumericProperty(2.0)
backgroundColor = ListProperty([1, 1, 1, 1])
def __init__(self, **kwargs):
super(MaterialWidget, self).__init__(**kwargs)
def on_size(self, *args, **kwargs):
self._updateShadow()
def on_pos(self, *args, **kwargs):
self._updateShadow()
def on_elevation(self, *args, **kwargs):
self._updateShadow()
def _updateShadow(self):
# Shadow 1
offset_y = self.elevation
radius = self.elevation / 2.0
t1 = self._genShadow(self.size[0], self.size[1], radius, 0.26)
self.keyShadowTexture = t1
# Shadow 2
radius = self.elevation
t2 = self._genShadow(self.size[0], self.size[1], radius, 0.05)
self.ambientShadowTexture = t2
def _genShadow(self, ow, oh, radius, alpha):
# We need a bigger texture to correctly blur the edges
w = ow + radius * 6.0
h = oh + radius * 6.0
w = int(w)
h = int(h)
texture = Texture.create(size=(w, h), colorfmt='rgba')
im = Image.new('RGBA', (w, h), color=(1, 1, 1, 0))
draw = ImageDraw.Draw(im)
# the rectangle to be rendered needs to be centered on the texture
x0, y0 = (w - ow) / 2., (h - oh) / 2.
x1, y1 = x0 + ow - 1, y0 + oh - 1
draw.rectangle((x0, y0, x1, y1), fill=(0, 0, 0, int(255 * alpha)))
im = im.filter(ImageFilter.GaussianBlur(radius))
texture.blit_buffer(im.tobytes(), colorfmt='rgba', bufferfmt='ubyte')
return texture
|
mit
|
wskplho/sl4a
|
python/src/Lib/lib-tk/ScrolledText.py
|
51
|
1701
|
"""A ScrolledText widget feels like a text widget but also has a
vertical scroll bar on its right. (Later, options may be added to
add a horizontal bar as well, to make the bars disappear
automatically when not needed, to move them to the other side of the
window, etc.)
Configuration options are passed to the Text widget.
A Frame widget is inserted between the master and the text, to hold
the Scrollbar widget.
Most methods calls are inherited from the Text widget; Pack, Grid and
Place methods are redirected to the Frame widget however.
"""
__all__ = ['ScrolledText']
from Tkinter import Frame, Text, Scrollbar, Pack, Grid, Place
from Tkconstants import RIGHT, LEFT, Y, BOTH
class ScrolledText(Text):
def __init__(self, master=None, **kw):
self.frame = Frame(master)
self.vbar = Scrollbar(self.frame)
self.vbar.pack(side=RIGHT, fill=Y)
kw.update({'yscrollcommand': self.vbar.set})
Text.__init__(self, self.frame, **kw)
self.pack(side=LEFT, fill=BOTH, expand=True)
self.vbar['command'] = self.yview
# Copy geometry methods of self.frame -- hack!
methods = vars(Pack).keys() + vars(Grid).keys() + vars(Place).keys()
for m in methods:
if m[0] != '_' and m != 'config' and m != 'configure':
setattr(self, m, getattr(self.frame, m))
def __str__(self):
return str(self.frame)
def example():
import __main__
from Tkconstants import END
stext = ScrolledText(bg='white', height=10)
stext.insert(END, __main__.__doc__)
stext.pack(fill=BOTH, side=LEFT, expand=True)
stext.focus_set()
stext.mainloop()
if __name__ == "__main__":
example()
|
apache-2.0
|
popazerty/SDG-e2
|
lib/python/Components/Converter/SdgServName.py
|
4
|
12811
|
# -*- coding: utf-8 -*-
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService, iPlayableServicePtr, eServiceReference, eServiceCenter, eTimer
from Components.Element import cached
from Components.config import config
class SdgServName(Converter, object):
NAME = 0
NUMBER = 1
BOUQUET = 2
PROVIDER = 3
REFERENCE = 4
ORBPOS = 5
TPRDATA = 6
SATELLITE = 7
FORMAT = 8
def __init__(self, type):
Converter.__init__(self, type)
if type == "Name" or not len(str(type)):
self.type = self.NAME
elif type == "Number":
self.type = self.NUMBER
elif type == "Bouquet":
self.type = self.BOUQUET
elif type == "Provider":
self.type = self.PROVIDER
elif type == "Reference":
self.type = self.REFERENCE
elif type == "OrbitalPos":
self.type = self.ORBPOS
elif type == "TpansponderInfo":
self.type = self.TPRDATA
elif type == "Satellite":
self.type = self.SATELLITE
else:
self.type = self.FORMAT
self.sfmt = type[:]
self.what = self.tpdata = None
self.Timer = eTimer()
self.Timer.callback.append(self.neededChange)
def getServiceNumber(self, ref):
def searchHelper(serviceHandler, num, bouquet):
servicelist = serviceHandler.list(bouquet)
if not servicelist is None:
while True:
s = servicelist.getNext()
if not s.valid(): break
if not (s.flags & (eServiceReference.isMarker|eServiceReference.isDirectory)):
num += 1
if s == ref: return s, num
return None, num
if isinstance(ref, eServiceReference):
isRadioService = ref.getData(0) in (2,10)
lastpath = isRadioService and config.radio.lastroot.value or config.tv.lastroot.value
if lastpath.find('FROM BOUQUET') == -1:
if 'FROM PROVIDERS' in lastpath:
return 'P', 'Provider'
if 'FROM SATELLITES' in lastpath:
return 'S', 'Satellites'
if ') ORDER BY name' in lastpath:
return 'A', 'All Services'
return 0, 'N/A'
try:
acount = config.plugins.NumberZapExt.enable.value and config.plugins.NumberZapExt.acount.value
except:
acount = False
rootstr = ''
for x in lastpath.split(';'):
if x != '': rootstr = x
serviceHandler = eServiceCenter.getInstance()
if acount is True or not config.usage.multibouquet.value:
bouquet = eServiceReference(rootstr)
service, number = searchHelper(serviceHandler, 0, bouquet)
else:
if isRadioService:
bqrootstr = '1:7:2:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'
else:
bqrootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
number = 0
cur = eServiceReference(rootstr)
bouquet = eServiceReference(bqrootstr)
bouquetlist = serviceHandler.list(bouquet)
if not bouquetlist is None:
while True:
bouquet = bouquetlist.getNext()
if not bouquet.valid(): break
if bouquet.flags & eServiceReference.isDirectory:
service, number = searchHelper(serviceHandler, number, bouquet)
if not service is None and cur == bouquet: break
if not service is None:
info = serviceHandler.info(bouquet)
name = info and info.getName(bouquet) or ''
return number, name
return 0, ''
def getProviderName(self, ref):
if isinstance(ref, eServiceReference):
from Screens.ChannelSelection import service_types_radio, service_types_tv
typestr = ref.getData(0) in (2,10) and service_types_radio or service_types_tv
pos = typestr.rfind(':')
rootstr = '%s (channelID == %08x%04x%04x) && %s FROM PROVIDERS ORDER BY name' %(typestr[:pos+1],ref.getUnsignedData(4),ref.getUnsignedData(2),ref.getUnsignedData(3),typestr[pos+1:])
provider_root = eServiceReference(rootstr)
serviceHandler = eServiceCenter.getInstance()
providerlist = serviceHandler.list(provider_root)
if not providerlist is None:
while True:
provider = providerlist.getNext()
if not provider.valid(): break
if provider.flags & eServiceReference.isDirectory:
servicelist = serviceHandler.list(provider)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): break
if service == ref:
info = serviceHandler.info(provider)
return info and info.getName(provider) or "Unknown"
return 'N/A'
def getTransponderInfo(self, info, ref, fmt):
result = ''
if self.tpdata is None:
self.tpdata = ref and (info.getInfoObject(ref, iServiceInformation.sTransponderData) or -1) or info.getInfoObject(iServiceInformation.sTransponderData)
if not isinstance(self.tpdata, dict):
self.tpdata = None
return result
type = self.tpdata.get('tuner_type', '')
if fmt == '' or fmt == '%T':
if type == 'DVB-C':
fmt = '%t %F %Y %i %f %M' #(type frequency symbol_rate inversion fec modulation)
elif type == 'DVB-T':
fmt = '%t %F %h %m %g %c' #(type frequency code_rate_hp transmission_mode guard_interval constellation)
else:
fmt = '%O %F%p %Y %f' #(orbital_position frequency polarization symbol_rate fec)
while True:
pos = fmt.find('%')
if pos == -1:
result += fmt
break
result += fmt[:pos]
pos += 1
l = len(fmt)
f = pos < l and fmt[pos] or '%'
if f == 't': # %t - tuner_type (dvb-s/s2/c/t)
if type == 'DVB-S':
result += _("Satellite")
elif type == 'DVB-C':
result += _("Cable")
elif type == 'DVB-T':
result += _("Terrestrial")
else:
result += 'N/A'
elif f == 's': # %s - system (dvb-s/s2/c/t)
if type == 'DVB-S':
x = self.tpdata.get('system', 0)
result += x in range(2) and {0:'DVB-S',1:'DVB-S2'}[x] or ''
else:
result += type
elif f == 'F': # %F - frequency (dvb-s/s2/c/t) in KHz
result += '%d'%(self.tpdata.get('frequency', 0) / 1000)
elif f == 'f': # %f - fec_inner (dvb-s/s2/c/t)
if type in ('DVB-S','DVB-C'):
x = self.tpdata.get('fec_inner', 15)
result += x in range(10)+[15] and {0:'Auto',1:'1/2',2:'2/3',3:'3/4',4:'5/6',5:'7/8',6:'8/9',7:'3/5',8:'4/5',9:'9/10',15:'None'}[x] or ''
elif type == 'DVB-T':
x = self.tpdata.get('code_rate_lp', 5)
result += x in range(6) and {0:'1/2',1:'2/3',2:'3/4',3:'5/6',4:'7/8',5:'Auto'}[x] or ''
elif f == 'i': # %i - inversion (dvb-s/s2/c/t)
x = self.tpdata.get('inversion', 2)
result += x in range(3) and {0:'On',1:'Off',2:'Auto'}[x] or ''
elif f == 'O': # %O - orbital_position (dvb-s/s2)
if type == 'DVB-S':
x = self.tpdata.get('orbital_position', 0)
result += x > 1800 and "%d.%d°W"%((3600-x)/10, (3600-x)%10) or "%d.%d°E"%(x/10, x%10)
elif f == 'M': # %M - modulation (dvb-s/s2/c)
x = self.tpdata.get('modulation', 1)
if type == 'DVB-S':
result += x in range(4) and {0:'Auto',1:'QPSK',2:'8PSK',3:'QAM16'}[x] or ''
elif type == 'DVB-C':
result += x in range(6) and {0:'Auto',1:'QAM16',2:'QAM32',3:'QAM64',4:'QAM128',5:'QAM256'}[x] or ''
elif f == 'p': # %p - polarization (dvb-s/s2)
if type == 'DVB-S':
x = self.tpdata.get('polarization', 0)
result += x in range(4) and {0:'H',1:'V',2:'L',3:'R'}[x] or '?'
elif f == 'Y': # %Y - symbol_rate (dvb-s/s2/c)
if type in ('DVB-S','DVB-C'):
result += '%d'%(self.tpdata.get('symbol_rate', 0) / 1000)
elif f == 'r': # %r - rolloff (dvb-s2)
x = self.tpdata.get('rolloff')
if not x is None:
result += x in range(3) and {0:'0.35',1:'0.25',2:'0.20'}[x] or ''
elif f == 'o': # %o - pilot (dvb-s2)
x = self.tpdata.get('pilot')
if not x is None:
result += x in range(3) and {0:'Off',1:'On',2:'Auto'}[x] or ''
elif f == 'c': # %c - constellation (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('constellation', 3)
result += x in range(4) and {0:'QPSK',1:'QAM16',2:'QAM64',3:'Auto'}[x] or ''
elif f == 'l': # %l - code_rate_lp (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('code_rate_lp', 5)
result += x in range(6) and {0:'1/2',1:'2/3',2:'3/4',3:'5/6',4:'7/8',5:'Auto'}[x] or ''
elif f == 'h': # %h - code_rate_hp (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('code_rate_hp', 5)
result += x in range(6) and {0:'1/2',1:'2/3',2:'3/4',3:'5/6',4:'7/8',5:'Auto'}[x] or ''
elif f == 'm': # %m - transmission_mode (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('transmission_mode', 2)
result += x in range(3) and {0:'2k',1:'8k',2:'Auto'}[x] or ''
elif f == 'g': # %g - guard_interval (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('guard_interval', 4)
result += x in range(5) and {0:'1/32',1:'1/16',2:'1/8',3:'1/4',4:'Auto'}[x] or ''
elif f == 'b': # %b - bandwidth (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('bandwidth', 1)
result += x in range(4) and {0:'8 MHz',1:'7 MHz',2:'6 MHz',3:'Auto'}[x] or ''
elif f == 'e': # %e - hierarchy_information (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('hierarchy_information', 4)
result += x in range(5) and {0:'None',1:'1',2:'2',3:'4',4:'Auto'}[x] or ''
else:
result += f
if pos+1 >= l: break
fmt = fmt[pos+1:]
return result
def getSatelliteName(self, ref):
name = 'N/A'
if isinstance(ref, eServiceReference):
orbpos = ref.getUnsignedData(4) >> 16
if orbpos == 0xFFFF: #Cable
name = _("Cable")
elif orbpos == 0xEEEE: #Terrestrial
name = _("Terrestrial")
else: #Satellite
orbpos = ref.getData(4) >> 16
if orbpos < 0: orbpos += 3600
try:
from Components.NimManager import nimmanager
name = str(nimmanager.getSatDescription(orbpos))
except:
name = orbpos > 1800 and "%d.%d°W"%((3600-orbpos)/10, (3600-orbpos)%10) or "%d.%d°E"%(orbpos/10, orbpos%10)
return name
@cached
def getText(self):
service = self.source.service
if isinstance(service, iPlayableServicePtr):
info = service and service.info()
ref = None
else: # reference
info = service and self.source.info
ref = service
if info is None: return ""
if self.type == self.NAME:
name = ref and (info.getName(ref) or 'N/A') or (info.getName() or 'N/A')
return name.replace('\xc2\x86', '').replace('\xc2\x87', '')
elif self.type == self.NUMBER:
num, bouq = self.getServiceNumber(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
return num and str(num) or ''
elif self.type == self.BOUQUET:
num, bouq = self.getServiceNumber(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
return bouq
elif self.type == self.PROVIDER:
return ref and self.getProviderName(ref) or info.getInfoString(iServiceInformation.sProvider)
elif self.type == self.REFERENCE:
return ref and ref.toString() or info.getInfoString(iServiceInformation.sServiceref)
elif self.type == self.ORBPOS:
return self.getTransponderInfo(info, ref, '%O')
elif self.type == self.TPRDATA:
return self.getTransponderInfo(info, ref, '%T')
elif self.type == self.SATELLITE:
return self.getSatelliteName(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
elif self.type == self.FORMAT:
ret = num = bouq = ''
if '%n' in self.sfmt or '%B' in self.sfmt:
num, bouq = self.getServiceNumber(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
tmp = self.sfmt[:]
while True:
pos = tmp.find('%')
if pos == -1:
ret += tmp
break
ret += tmp[:pos]
pos += 1
l = len(tmp)
f = pos < l and tmp[pos] or '%'
if f == 'N': # %N - Name
name = ref and (info.getName(ref) or 'N/A') or (info.getName() or 'N/A')
ret += name.replace('\xc2\x86', '').replace('\xc2\x87', '')
elif f == 'n': # %n - Number
ret += num and str(num) or ''
elif f == 'B': # %B - Bouquet
ret += bouq
elif f == 'P': # %P - Provider
ret += ref and self.getProviderName(ref) or info.getInfoString(iServiceInformation.sProvider)
elif f == 'R': # %R - Reference
ret += ref and ref.toString() or info.getInfoString(iServiceInformation.sServiceref)
elif f == 'S': # %S - Satellite
ret += self.getSatelliteName(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
elif f in 'TtsFfiOMpYroclhmgbe':
ret += self.getTransponderInfo(info, ref, '%'+f)
else:
ret += f
if pos+1 >= l: break
tmp = tmp[pos+1:]
return '%s'%(ret.replace('N/A', ''))
text = property(getText)
def neededChange(self):
if self.what:
Converter.changed(self, self.what)
self.what = None
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evStart,):
self.tpdata = None
if self.type in (self.NUMBER,self.BOUQUET) or \
(self.type == self.FORMAT and ('%n' in self.sfmt or '%B' in self.sfmt)):
self.what = what
self.Timer.start(200, True)
else:
Converter.changed(self, what)
|
gpl-2.0
|
davy39/eric
|
Plugins/VcsPlugins/vcsMercurial/HgImportDialog.py
|
1
|
3032
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a dialog to enter data for the Mercurial import command.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot, QDateTime
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from E5Gui import E5FileDialog
from E5Gui.E5Completers import E5FileCompleter
from .Ui_HgImportDialog import Ui_HgImportDialog
import Utilities
import UI.PixmapCache
class HgImportDialog(QDialog, Ui_HgImportDialog):
"""
Class implementing a dialog to enter data for the Mercurial import command.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(HgImportDialog, self).__init__(parent)
self.setupUi(self)
self.patchFileButton.setIcon(UI.PixmapCache.getIcon("open.png"))
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.__patchFileCompleter = E5FileCompleter(self.patchFileEdit)
self.__initDateTime = QDateTime.currentDateTime()
self.dateEdit.setDateTime(self.__initDateTime)
def __updateOK(self):
"""
Private slot to update the OK button.
"""
enabled = True
if self.patchFileEdit.text() == "":
enabled = False
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled)
@pyqtSlot(str)
def on_patchFileEdit_textChanged(self, txt):
"""
Private slot to react on changes of the patch file edit.
@param txt contents of the line edit (string)
"""
self.__updateOK()
@pyqtSlot()
def on_patchFileButton_clicked(self):
"""
Private slot called by pressing the file selection button.
"""
fn = E5FileDialog.getOpenFileName(
self,
self.tr("Select patch file"),
self.patchFileEdit.text(),
self.tr("Patch Files (*.diff *.patch);;All Files (*)"))
if fn:
self.patchFileEdit.setText(Utilities.toNativeSeparators(fn))
def getParameters(self):
"""
Public method to retrieve the import data.
@return tuple naming the patch file, a flag indicating to not commit,
a commit message, a commit date, a commit user, a strip count and
a flag indicating to enforce the import
(string, boolean, string, string, string, integer, boolean)
"""
if self.dateEdit.dateTime() != self.__initDateTime:
date = self.dateEdit.dateTime().toString("yyyy-MM-dd hh:mm")
else:
date = ""
return (self.patchFileEdit.text(), self.noCommitCheckBox.isChecked(),
self.messageEdit.toPlainText(), date, self.userEdit.text(),
self.stripSpinBox.value(), self.forceCheckBox.isChecked())
|
gpl-3.0
|
plotly/python-api
|
packages/python/plotly/plotly/validators/sunburst/hoverlabel/_font.py
|
2
|
1860
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="sunburst.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
|
mit
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_inspect.py
|
8
|
119488
|
import collections
import datetime
import functools
import importlib
import inspect
import io
import linecache
import os
from os.path import normcase
import _pickle
import re
import shutil
import sys
import types
import textwrap
import unicodedata
import unittest
import unittest.mock
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
from test.support import run_unittest, TESTFN, DirsOnSysPath, cpython_only
from test.support import MISSING_C_DOCSTRINGS
from test.script_helper import assert_python_ok, assert_python_failure
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
from test.test_import import _ready_to_import
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, isgenerator, isgeneratorfunction, getmembers,
# getdoc, getfile, getmodule, getsourcefile, getcomments, getsource,
# getclasstree, getargspec, getargvalues, formatargspec, formatargvalues,
# currentframe, stack, trace, isdatadescriptor
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
# Normalize file names: on Windows, the case of file names of compiled
# modules depends on the path used to start the python executable.
modfile = normcase(modfile)
def revise(filename, *args):
return (normcase(filename),) + args
import builtins
git = mod.StupidGit()
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback,
inspect.isgenerator, inspect.isgeneratorfunction])
def istest(self, predicate, exp):
obj = eval(exp)
self.assertTrue(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
if predicate == inspect.isgeneratorfunction and\
other == inspect.isfunction:
continue
self.assertFalse(other(obj), 'not %s(%s)' % (other.__name__, exp))
def generator_function_example(self):
for i in range(2):
yield i
class TestPredicates(IsTestBase):
def test_sixteen(self):
count = len([x for x in dir(inspect) if x.startswith('is')])
# This test is here for remember you to update Doc/library/inspect.rst
# which claims there are 16 such functions
expected = 16
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
global tb
self.istest(inspect.isbuiltin, 'sys.exit')
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.iscode, 'mod.spam.__code__')
try:
1/0
except:
tb = sys.exc_info()[2]
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.istraceback, 'tb')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
else:
self.assertFalse(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
finally:
# Clear traceback and all the frames and local variables hanging to it.
tb = None
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.isfunction, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.isdatadescriptor, 'collections.defaultdict.default_factory')
self.istest(inspect.isgenerator, '(x for x in range(2))')
self.istest(inspect.isgeneratorfunction, 'generator_function_example')
if hasattr(types, 'MemberDescriptorType'):
self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
else:
self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assertTrue(inspect.isroutine(mod.spam))
self.assertTrue(inspect.isroutine([].count))
def test_isclass(self):
self.istest(inspect.isclass, 'mod.StupidGit')
self.assertTrue(inspect.isclass(list))
class CustomGetattr(object):
def __getattr__(self, attr):
return None
self.assertFalse(inspect.isclass(CustomGetattr()))
def test_get_slot_members(self):
class C(object):
__slots__ = ("a", "b")
x = C()
x.a = 42
members = dict(inspect.getmembers(x))
self.assertIn('a', members)
self.assertNotIn('b', members)
def test_isabstract(self):
from abc import ABCMeta, abstractmethod
class AbstractClassExample(metaclass=ABCMeta):
@abstractmethod
def foo(self):
pass
class ClassExample(AbstractClassExample):
def foo(self):
pass
a = ClassExample()
# Test general behaviour.
self.assertTrue(inspect.isabstract(AbstractClassExample))
self.assertFalse(inspect.isabstract(ClassExample))
self.assertFalse(inspect.isabstract(a))
self.assertFalse(inspect.isabstract(int))
self.assertFalse(inspect.isabstract(5))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assertTrue(len(mod.st) >= 5)
self.assertEqual(revise(*mod.st[0][1:]),
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(revise(*mod.st[1][1:]),
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(revise(*mod.st[2][1:]),
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(revise(*mod.st[3][1:]),
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(revise(*git.tr[0][1:]),
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(revise(*git.tr[1][1:]),
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(revise(*git.tr[2][1:]),
(modfile, 18, 'eggs', [' q = y / 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', 'e', 'f'])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, e=4, f=5, *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderModule = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
with open(inspect.getsourcefile(self.fodderModule)) as fp:
self.source = fp.read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderModule = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit),
('Tit', mod.MalodorousPervert),
])
tree = inspect.getclasstree([cls[1] for cls in classes])
self.assertEqual(tree,
[(object, ()),
[(mod.ParrotDroppings, (object,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
],
(mod.StupidGit, (object,)),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
]
])
tree = inspect.getclasstree([cls[1] for cls in classes], True)
self.assertEqual(tree,
[(object, ()),
[(mod.ParrotDroppings, (object,)),
(mod.StupidGit, (object,)),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_cleandoc(self):
self.assertEqual(inspect.cleandoc('An\n indented\n docstring.'),
'An\nindented\ndocstring.')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["builtins"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(normcase(inspect.getsourcefile(mod.spam)), modfile)
self.assertEqual(normcase(inspect.getsourcefile(git.abuse)), modfile)
fn = "_non_existing_filename_used_for_sourcefile_test.py"
co = compile("None", fn, "exec")
self.assertEqual(inspect.getsourcefile(co), None)
linecache.cache[co.co_filename] = (1, None, "None", co.co_filename)
try:
self.assertEqual(normcase(inspect.getsourcefile(co)), fn)
finally:
del linecache.cache[co.co_filename]
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getfile_class_without_module(self):
class CM(type):
@property
def __module__(cls):
raise AttributeError
class C(metaclass=CM):
pass
with self.assertRaises(TypeError):
inspect.getfile(C)
def test_getmodule_recursion(self):
from types import ModuleType
name = '__inspect_dummy'
m = sys.modules[name] = ModuleType(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec("def x(): pass", m.__dict__)
self.assertEqual(inspect.getsourcefile(m.x.__code__), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
def test_proceed_with_fake_filename(self):
'''doctest monkeypatches linecache to enable inspection'''
fn, source = '<test>', 'def x(): pass\n'
getlines = linecache.getlines
def monkey(filename, module_globals=None):
if filename == fn:
return source.splitlines(keepends=True)
else:
return getlines(filename, module_globals)
linecache.getlines = monkey
try:
ns = {}
exec(compile(source, fn, 'single'), ns)
inspect.getsource(ns["x"])
finally:
linecache.getlines = getlines
class TestDecorators(GetSourceBase):
fodderModule = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderModule = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderModule = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
# This should not skip for CPython, but might on a repackaged python where
# unicodedata is not an external module, or on pypy.
@unittest.skipIf(not hasattr(unicodedata, '__file__') or
unicodedata.__file__.endswith('.py'),
"unicodedata is not an external binary module")
def test_findsource_binary(self):
self.assertRaises(OSError, inspect.getsource, unicodedata)
self.assertRaises(OSError, inspect.findsource, unicodedata)
def test_findsource_code_in_linecache(self):
lines = ["x=1"]
co = compile(lines[0], "_dynamically_created_file", "exec")
self.assertRaises(OSError, inspect.findsource, co)
self.assertRaises(OSError, inspect.getsource, co)
linecache.cache[co.co_filename] = (1, None, lines, co.co_filename)
try:
self.assertEqual(inspect.findsource(co), (lines,0))
self.assertEqual(inspect.getsource(co), lines[0])
finally:
del linecache.cache[co.co_filename]
def test_findsource_without_filename(self):
for fname in ['', '<string>']:
co = compile('x=1', fname, "exec")
self.assertRaises(IOError, inspect.findsource, co)
self.assertRaises(IOError, inspect.getsource, co)
class TestNoEOL(GetSourceBase):
def __init__(self, *args, **kwargs):
self.tempdir = TESTFN + '_dir'
os.mkdir(self.tempdir)
with open(os.path.join(self.tempdir,
'inspect_fodder3%spy' % os.extsep), 'w') as f:
f.write("class X:\n pass # No EOL")
with DirsOnSysPath(self.tempdir):
import inspect_fodder3 as mod3
self.fodderModule = mod3
GetSourceBase.__init__(self, *args, **kwargs)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_class(self):
self.assertSourceEqual(self.fodderModule.X, 1, 2)
class _BrokenDataDescriptor(object):
"""
A broken data descriptor. See bug #1785.
"""
def __get__(*args):
raise AttributeError("broken data descriptor")
def __set__(*args):
raise RuntimeError
def __getattr__(*args):
raise AttributeError("broken data descriptor")
class _BrokenMethodDescriptor(object):
"""
A broken method descriptor. See bug #1785.
"""
def __get__(*args):
raise AttributeError("broken method descriptor")
def __getattr__(*args):
raise AttributeError("broken method descriptor")
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e=None,
varkw_e=None, defaults_e=None, formatted=None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
def assertFullArgSpecEquals(self, routine, args_e, varargs_e=None,
varkw_e=None, defaults_e=None,
kwonlyargs_e=[], kwonlydefaults_e=None,
ann_e={}, formatted=None):
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
inspect.getfullargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
self.assertEqual(kwonlyargs, kwonlyargs_e)
self.assertEqual(kwonlydefaults, kwonlydefaults_e)
self.assertEqual(ann, ann_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, ann),
formatted)
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted='(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', 'e', 'f'],
'g', 'h', (3, 4, 5),
'(a, b, c, d=3, e=4, f=5, *g, **h)')
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.keyworded, [])
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.annotated, [])
self.assertRaises(ValueError, self.assertArgSpecEquals,
mod2.keyword_only_arg, [])
def test_getfullargspec(self):
self.assertFullArgSpecEquals(mod2.keyworded, [], varargs_e='arg1',
kwonlyargs_e=['arg2'],
kwonlydefaults_e={'arg2':1},
formatted='(*arg1, arg2=1)')
self.assertFullArgSpecEquals(mod2.annotated, ['arg1'],
ann_e={'arg1' : list},
formatted='(arg1: list)')
self.assertFullArgSpecEquals(mod2.keyword_only_arg, [],
kwonlyargs_e=['arg'],
formatted='(*, arg)')
def test_argspec_api_ignores_wrapped(self):
# Issue 20684: low level introspection API must ignore __wrapped__
@functools.wraps(mod.spam)
def ham(x, y):
pass
# Basic check
self.assertArgSpecEquals(ham, ['x', 'y'], formatted='(x, y)')
self.assertFullArgSpecEquals(ham, ['x', 'y'], formatted='(x, y)')
self.assertFullArgSpecEquals(functools.partial(ham),
['x', 'y'], formatted='(x, y)')
# Other variants
def check_method(f):
self.assertArgSpecEquals(f, ['self', 'x', 'y'],
formatted='(self, x, y)')
class C:
@functools.wraps(mod.spam)
def ham(self, x, y):
pass
pham = functools.partialmethod(ham)
@functools.wraps(mod.spam)
def __call__(self, x, y):
pass
check_method(C())
check_method(C.ham)
check_method(C().ham)
check_method(C.pham)
check_method(C().pham)
class C_new:
@functools.wraps(mod.spam)
def __new__(self, x, y):
pass
check_method(C_new)
class C_init:
@functools.wraps(mod.spam)
def __init__(self, x, y):
pass
check_method(C_init)
def test_getfullargspec_signature_attr(self):
def test():
pass
spam_param = inspect.Parameter('spam', inspect.Parameter.POSITIONAL_ONLY)
test.__signature__ = inspect.Signature(parameters=(spam_param,))
self.assertFullArgSpecEquals(test, args_e=['spam'], formatted='(spam)')
def test_getfullargspec_signature_annos(self):
def test(a:'spam') -> 'ham': pass
spec = inspect.getfullargspec(test)
self.assertEqual(test.__annotations__, spec.annotations)
def test(): pass
spec = inspect.getfullargspec(test)
self.assertEqual(test.__annotations__, spec.annotations)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_getfullargspec_builtin_methods(self):
self.assertFullArgSpecEquals(_pickle.Pickler.dump,
args_e=['self', 'obj'], formatted='(self, obj)')
self.assertFullArgSpecEquals(_pickle.Pickler(io.BytesIO()).dump,
args_e=['self', 'obj'], formatted='(self, obj)')
self.assertFullArgSpecEquals(
os.stat,
args_e=['path'],
kwonlyargs_e=['dir_fd', 'follow_symlinks'],
kwonlydefaults_e={'dir_fd': None, 'follow_symlinks': True},
formatted='(path, *, dir_fd=None, follow_symlinks=True)')
@cpython_only
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_getfullagrspec_builtin_func(self):
import _testcapi
builtin = _testcapi.docstring_with_signature_with_defaults
spec = inspect.getfullargspec(builtin)
self.assertEqual(spec.defaults[0], 'avocado')
@cpython_only
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_getfullagrspec_builtin_func_no_signature(self):
import _testcapi
builtin = _testcapi.docstring_no_signature
with self.assertRaises(TypeError):
inspect.getfullargspec(builtin)
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
def test_classify_newstyle(self):
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
attrs = attrs_wo_objs(A)
self.assertIn(('__new__', 'method', object), attrs, 'missing __new__')
self.assertIn(('__init__', 'method', object), attrs, 'missing __init__')
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', A), attrs,
'missing plain method: %r' % attrs)
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', C), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', D), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
def test_classify_builtin_types(self):
# Simple sanity check that all built-in types can have their
# attributes classified.
for name in dir(__builtins__):
builtin = getattr(__builtins__, name)
if isinstance(builtin, type):
inspect.classify_class_attrs(builtin)
def test_classify_DynamicClassAttribute(self):
class Meta(type):
def __getattr__(self, name):
if name == 'ham':
return 'spam'
return super().__getattr__(name)
class VA(metaclass=Meta):
@types.DynamicClassAttribute
def ham(self):
return 'eggs'
should_find_dca = inspect.Attribute('ham', 'data', VA, VA.__dict__['ham'])
self.assertIn(should_find_dca, inspect.classify_class_attrs(VA))
should_find_ga = inspect.Attribute('ham', 'data', Meta, 'spam')
self.assertIn(should_find_ga, inspect.classify_class_attrs(VA))
def test_classify_metaclass_class_attribute(self):
class Meta(type):
fish = 'slap'
def __dir__(self):
return ['__class__', '__modules__', '__name__', 'fish']
class Class(metaclass=Meta):
pass
should_find = inspect.Attribute('fish', 'data', Meta, 'slap')
self.assertIn(should_find, inspect.classify_class_attrs(Class))
def test_classify_VirtualAttribute(self):
class Meta(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'BOOM']
def __getattr__(self, name):
if name =='BOOM':
return 42
return super().__getattr(name)
class Class(metaclass=Meta):
pass
should_find = inspect.Attribute('BOOM', 'data', Meta, 42)
self.assertIn(should_find, inspect.classify_class_attrs(Class))
def test_classify_VirtualAttribute_multi_classes(self):
class Meta1(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'one']
def __getattr__(self, name):
if name =='one':
return 1
return super().__getattr__(name)
class Meta2(type):
def __dir__(cls):
return ['__class__', '__module__', '__name__', 'two']
def __getattr__(self, name):
if name =='two':
return 2
return super().__getattr__(name)
class Meta3(Meta1, Meta2):
def __dir__(cls):
return list(sorted(set(['__class__', '__module__', '__name__', 'three'] +
Meta1.__dir__(cls) + Meta2.__dir__(cls))))
def __getattr__(self, name):
if name =='three':
return 3
return super().__getattr__(name)
class Class1(metaclass=Meta1):
pass
class Class2(Class1, metaclass=Meta3):
pass
should_find1 = inspect.Attribute('one', 'data', Meta1, 1)
should_find2 = inspect.Attribute('two', 'data', Meta2, 2)
should_find3 = inspect.Attribute('three', 'data', Meta3, 3)
cca = inspect.classify_class_attrs(Class2)
for sf in (should_find1, should_find2, should_find3):
self.assertIn(sf, cca)
def test_classify_class_attrs_with_buggy_dir(self):
class M(type):
def __dir__(cls):
return ['__class__', '__name__', 'missing']
class C(metaclass=M):
pass
attrs = [a[0] for a in inspect.classify_class_attrs(C)]
self.assertNotIn('missing', attrs)
def test_getmembers_descriptors(self):
class A(object):
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
def pred_wrapper(pred):
# A quick'n'dirty way to discard standard attributes of new-style
# classes.
class Empty(object):
pass
def wrapped(x):
if '__name__' in dir(x) and hasattr(Empty, x.__name__):
return False
return pred(x)
return wrapped
ismethoddescriptor = pred_wrapper(inspect.ismethoddescriptor)
isdatadescriptor = pred_wrapper(inspect.isdatadescriptor)
self.assertEqual(inspect.getmembers(A, ismethoddescriptor),
[('md', A.__dict__['md'])])
self.assertEqual(inspect.getmembers(A, isdatadescriptor),
[('dd', A.__dict__['dd'])])
class B(A):
pass
self.assertEqual(inspect.getmembers(B, ismethoddescriptor),
[('md', A.__dict__['md'])])
self.assertEqual(inspect.getmembers(B, isdatadescriptor),
[('dd', A.__dict__['dd'])])
def test_getmembers_method(self):
class B:
def f(self):
pass
self.assertIn(('f', B.f), inspect.getmembers(B))
self.assertNotIn(('f', B.f), inspect.getmembers(B, inspect.ismethod))
b = B()
self.assertIn(('f', b.f), inspect.getmembers(b))
self.assertIn(('f', b.f), inspect.getmembers(b, inspect.ismethod))
def test_getmembers_VirtualAttribute(self):
class M(type):
def __getattr__(cls, name):
if name == 'eggs':
return 'scrambled'
return super().__getattr__(name)
class A(metaclass=M):
@types.DynamicClassAttribute
def eggs(self):
return 'spam'
self.assertIn(('eggs', 'scrambled'), inspect.getmembers(A))
self.assertIn(('eggs', 'spam'), inspect.getmembers(A()))
def test_getmembers_with_buggy_dir(self):
class M(type):
def __dir__(cls):
return ['__class__', '__name__', 'missing']
class C(metaclass=M):
pass
attrs = [a[0] for a in inspect.getmembers(C)]
self.assertNotIn('missing', attrs)
_global_ref = object()
class TestGetClosureVars(unittest.TestCase):
def test_name_resolution(self):
# Basic test of the 4 different resolution mechanisms
def f(nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(f(_arg)), expected)
def test_generator_closure(self):
def f(nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
yield
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(f(_arg)), expected)
def test_method_closure(self):
class C:
def f(self, nonlocal_ref):
def g(local_ref):
print(local_ref, nonlocal_ref, _global_ref, unbound_ref)
return g
_arg = object()
nonlocal_vars = {"nonlocal_ref": _arg}
global_vars = {"_global_ref": _global_ref}
builtin_vars = {"print": print}
unbound_names = {"unbound_ref"}
expected = inspect.ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
self.assertEqual(inspect.getclosurevars(C().f(_arg)), expected)
def test_nonlocal_vars(self):
# More complex tests of nonlocal resolution
def _nonlocal_vars(f):
return inspect.getclosurevars(f).nonlocals
def make_adder(x):
def add(y):
return x + y
return add
def curry(func, arg1):
return lambda arg2: func(arg1, arg2)
def less_than(a, b):
return a < b
# The infamous Y combinator.
def Y(le):
def g(f):
return le(lambda x: f(f)(x))
Y.g_ref = g
return g(g)
def check_y_combinator(func):
self.assertEqual(_nonlocal_vars(func), {'f': Y.g_ref})
inc = make_adder(1)
add_two = make_adder(2)
greater_than_five = curry(less_than, 5)
self.assertEqual(_nonlocal_vars(inc), {'x': 1})
self.assertEqual(_nonlocal_vars(add_two), {'x': 2})
self.assertEqual(_nonlocal_vars(greater_than_five),
{'arg1': 5, 'func': less_than})
self.assertEqual(_nonlocal_vars((lambda x: lambda y: x + y)(3)),
{'x': 3})
Y(check_y_combinator)
def test_getclosurevars_empty(self):
def foo(): pass
_empty = inspect.ClosureVars({}, {}, {}, set())
self.assertEqual(inspect.getclosurevars(lambda: True), _empty)
self.assertEqual(inspect.getclosurevars(foo), _empty)
def test_getclosurevars_error(self):
class T: pass
self.assertRaises(TypeError, inspect.getclosurevars, 1)
self.assertRaises(TypeError, inspect.getclosurevars, list)
self.assertRaises(TypeError, inspect.getclosurevars, {})
def _private_globals(self):
code = """def f(): print(path)"""
ns = {}
exec(code, ns)
return ns["f"], ns
def test_builtins_fallback(self):
f, ns = self._private_globals()
ns.pop("__builtins__", None)
expected = inspect.ClosureVars({}, {}, {"print":print}, {"path"})
self.assertEqual(inspect.getclosurevars(f), expected)
def test_builtins_as_dict(self):
f, ns = self._private_globals()
ns["__builtins__"] = {"path":1}
expected = inspect.ClosureVars({}, {}, {"path":1}, {"print"})
self.assertEqual(inspect.getclosurevars(f), expected)
def test_builtins_as_module(self):
f, ns = self._private_globals()
ns["__builtins__"] = os
expected = inspect.ClosureVars({}, {}, {"path":os.path}, {"print"})
self.assertEqual(inspect.getclosurevars(f), expected)
class TestGetcallargsFunctions(unittest.TestCase):
def assertEqualCallArgs(self, func, call_params_string, locs=None):
locs = dict(locs or {}, func=func)
r1 = eval('func(%s)' % call_params_string, None, locs)
r2 = eval('inspect.getcallargs(func, %s)' % call_params_string, None,
locs)
self.assertEqual(r1, r2)
def assertEqualException(self, func, call_param_string, locs=None):
locs = dict(locs or {}, func=func)
try:
eval('func(%s)' % call_param_string, None, locs)
except Exception as e:
ex1 = e
else:
self.fail('Exception not raised')
try:
eval('inspect.getcallargs(func, %s)' % call_param_string, None,
locs)
except Exception as e:
ex2 = e
else:
self.fail('Exception not raised')
self.assertIs(type(ex1), type(ex2))
self.assertEqual(str(ex1), str(ex2))
del ex1, ex2
def makeCallable(self, signature):
"""Create a function that returns its locals()"""
code = "lambda %s: locals()"
return eval(code % signature)
def test_plain(self):
f = self.makeCallable('a, b=1')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, 'b=3, a=2')
self.assertEqualCallArgs(f, '2, b=3')
# expand *iterable / **mapping
self.assertEqualCallArgs(f, '*(2,)')
self.assertEqualCallArgs(f, '*[2]')
self.assertEqualCallArgs(f, '*(2, 3)')
self.assertEqualCallArgs(f, '*[2, 3]')
self.assertEqualCallArgs(f, '**{"a":2}')
self.assertEqualCallArgs(f, 'b=3, **{"a":2}')
self.assertEqualCallArgs(f, '2, **{"b":3}')
self.assertEqualCallArgs(f, '**{"b":3, "a":2}')
# expand UserList / UserDict
self.assertEqualCallArgs(f, '*collections.UserList([2])')
self.assertEqualCallArgs(f, '*collections.UserList([2, 3])')
self.assertEqualCallArgs(f, '**collections.UserDict(a=2)')
self.assertEqualCallArgs(f, '2, **collections.UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **collections.UserDict(a=3)')
def test_varargs(self):
f = self.makeCallable('a, b=1, *c')
self.assertEqualCallArgs(f, '2')
self.assertEqualCallArgs(f, '2, 3')
self.assertEqualCallArgs(f, '2, 3, 4')
self.assertEqualCallArgs(f, '*(2,3,4)')
self.assertEqualCallArgs(f, '2, *[3,4]')
self.assertEqualCallArgs(f, '2, 3, *collections.UserList([4])')
def test_varkw(self):
f = self.makeCallable('a, b=1, **c')
self.assertEqualCallArgs(f, 'a=2')
self.assertEqualCallArgs(f, '2, b=3, c=4')
self.assertEqualCallArgs(f, 'b=3, a=2, c=4')
self.assertEqualCallArgs(f, 'c=4, **{"a":2, "b":3}')
self.assertEqualCallArgs(f, '2, c=4, **{"b":3}')
self.assertEqualCallArgs(f, 'b=2, **{"a":3, "c":4}')
self.assertEqualCallArgs(f, '**collections.UserDict(a=2, b=3, c=4)')
self.assertEqualCallArgs(f, '2, c=4, **collections.UserDict(b=3)')
self.assertEqualCallArgs(f, 'b=2, **collections.UserDict(a=3, c=4)')
def test_varkw_only(self):
# issue11256:
f = self.makeCallable('**c')
self.assertEqualCallArgs(f, '')
self.assertEqualCallArgs(f, 'a=1')
self.assertEqualCallArgs(f, 'a=1, b=2')
self.assertEqualCallArgs(f, 'c=3, **{"a": 1, "b": 2}')
self.assertEqualCallArgs(f, '**collections.UserDict(a=1, b=2)')
self.assertEqualCallArgs(f, 'c=3, **collections.UserDict(a=1, b=2)')
def test_keyword_only(self):
f = self.makeCallable('a=3, *, c, d=2')
self.assertEqualCallArgs(f, 'c=3')
self.assertEqualCallArgs(f, 'c=3, a=3')
self.assertEqualCallArgs(f, 'a=2, c=4')
self.assertEqualCallArgs(f, '4, c=4')
self.assertEqualException(f, '')
self.assertEqualException(f, '3')
self.assertEqualException(f, 'a=3')
self.assertEqualException(f, 'd=4')
f = self.makeCallable('*, c, d=2')
self.assertEqualCallArgs(f, 'c=3')
self.assertEqualCallArgs(f, 'c=3, d=4')
self.assertEqualCallArgs(f, 'd=4, c=3')
def test_multiple_features(self):
f = self.makeCallable('a, b=2, *f, **g')
self.assertEqualCallArgs(f, '2, 3, 7')
self.assertEqualCallArgs(f, '2, 3, x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9')
self.assertEqualCallArgs(f, 'x=8, *collections.UserList('
'[2, 3, (4,[5,6])]), **{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *collections.UserList([3, '
'(4,[5,6])]), **collections.UserDict('
'y=9, z=10)')
f = self.makeCallable('a, b=2, *f, x, y=99, **g')
self.assertEqualCallArgs(f, '2, 3, x=8')
self.assertEqualCallArgs(f, '2, 3, x=8, *[(4,[5,6]), 7]')
self.assertEqualCallArgs(f, '2, x=8, *[3, (4,[5,6]), 7], y=9, z=10')
self.assertEqualCallArgs(f, 'x=8, *[2, 3, (4,[5,6])], y=9, z=10')
self.assertEqualCallArgs(f, 'x=8, *collections.UserList('
'[2, 3, (4,[5,6])]), q=0, **{"y":9, "z":10}')
self.assertEqualCallArgs(f, '2, x=8, *collections.UserList([3, '
'(4,[5,6])]), q=0, **collections.UserDict('
'y=9, z=10)')
def test_errors(self):
f0 = self.makeCallable('')
f1 = self.makeCallable('a, b')
f2 = self.makeCallable('a, b=1')
# f0 takes no arguments
self.assertEqualException(f0, '1')
self.assertEqualException(f0, 'x=1')
self.assertEqualException(f0, '1,x=1')
# f1 takes exactly 2 arguments
self.assertEqualException(f1, '')
self.assertEqualException(f1, '1')
self.assertEqualException(f1, 'a=2')
self.assertEqualException(f1, 'b=3')
# f2 takes at least 1 argument
self.assertEqualException(f2, '')
self.assertEqualException(f2, 'b=3')
for f in f1, f2:
# f1/f2 takes exactly/at most 2 arguments
self.assertEqualException(f, '2, 3, 4')
self.assertEqualException(f, '1, 2, 3, a=1')
self.assertEqualException(f, '2, 3, 4, c=5')
# XXX: success of this one depends on dict order
## self.assertEqualException(f, '2, 3, 4, a=1, c=5')
# f got an unexpected keyword argument
self.assertEqualException(f, 'c=2')
self.assertEqualException(f, '2, c=3')
self.assertEqualException(f, '2, 3, c=4')
self.assertEqualException(f, '2, c=4, b=3')
self.assertEqualException(f, '**{u"\u03c0\u03b9": 4}')
# f got multiple values for keyword argument
self.assertEqualException(f, '1, a=2')
self.assertEqualException(f, '1, **{"a":2}')
self.assertEqualException(f, '1, 2, b=3')
# XXX: Python inconsistency
# - for functions and bound methods: unexpected keyword 'c'
# - for unbound methods: multiple values for keyword 'a'
#self.assertEqualException(f, '1, c=3, a=2')
# issue11256:
f3 = self.makeCallable('**c')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
f4 = self.makeCallable('*, a, b=0')
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
# issue #20816: getcallargs() fails to iterate over non-existent
# kwonlydefaults and raises a wrong TypeError
def f5(*, a): pass
with self.assertRaisesRegex(TypeError,
'missing 1 required keyword-only'):
inspect.getcallargs(f5)
# issue20817:
def f6(a, b, c):
pass
with self.assertRaisesRegex(TypeError, "'a', 'b' and 'c'"):
inspect.getcallargs(f6)
class TestGetcallargsMethods(TestGetcallargsFunctions):
def setUp(self):
class Foo(object):
pass
self.cls = Foo
self.inst = Foo()
def makeCallable(self, signature):
assert 'self' not in signature
mk = super(TestGetcallargsMethods, self).makeCallable
self.cls.method = mk('self, ' + signature)
return self.inst.method
class TestGetcallargsUnboundMethods(TestGetcallargsMethods):
def makeCallable(self, signature):
super(TestGetcallargsUnboundMethods, self).makeCallable(signature)
return self.cls.method
def assertEqualCallArgs(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualCallArgs(
*self._getAssertEqualParams(func, call_params_string, locs))
def assertEqualException(self, func, call_params_string, locs=None):
return super(TestGetcallargsUnboundMethods, self).assertEqualException(
*self._getAssertEqualParams(func, call_params_string, locs))
def _getAssertEqualParams(self, func, call_params_string, locs=None):
assert 'inst' not in call_params_string
locs = dict(locs or {}, inst=self.inst)
return (func, 'inst,' + call_params_string, locs)
class TestGetattrStatic(unittest.TestCase):
def test_basic(self):
class Thing(object):
x = object()
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
self.assertEqual(inspect.getattr_static(thing, 'x', None), Thing.x)
with self.assertRaises(AttributeError):
inspect.getattr_static(thing, 'y')
self.assertEqual(inspect.getattr_static(thing, 'y', 3), 3)
def test_inherited(self):
class Thing(object):
x = object()
class OtherThing(Thing):
pass
something = OtherThing()
self.assertEqual(inspect.getattr_static(something, 'x'), Thing.x)
def test_instance_attr(self):
class Thing(object):
x = 2
def __init__(self, x):
self.x = x
thing = Thing(3)
self.assertEqual(inspect.getattr_static(thing, 'x'), 3)
del thing.x
self.assertEqual(inspect.getattr_static(thing, 'x'), 2)
def test_property(self):
class Thing(object):
@property
def x(self):
raise AttributeError("I'm pretending not to exist")
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
def test_descriptor_raises_AttributeError(self):
class descriptor(object):
def __get__(*_):
raise AttributeError("I'm pretending not to exist")
desc = descriptor()
class Thing(object):
x = desc
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), desc)
def test_classAttribute(self):
class Thing(object):
x = object()
self.assertEqual(inspect.getattr_static(Thing, 'x'), Thing.x)
def test_classVirtualAttribute(self):
class Thing(object):
@types.DynamicClassAttribute
def x(self):
return self._x
_x = object()
self.assertEqual(inspect.getattr_static(Thing, 'x'), Thing.__dict__['x'])
def test_inherited_classattribute(self):
class Thing(object):
x = object()
class OtherThing(Thing):
pass
self.assertEqual(inspect.getattr_static(OtherThing, 'x'), Thing.x)
def test_slots(self):
class Thing(object):
y = 'bar'
__slots__ = ['x']
def __init__(self):
self.x = 'foo'
thing = Thing()
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
self.assertEqual(inspect.getattr_static(thing, 'y'), 'bar')
del thing.x
self.assertEqual(inspect.getattr_static(thing, 'x'), Thing.x)
def test_metaclass(self):
class meta(type):
attr = 'foo'
class Thing(object, metaclass=meta):
pass
self.assertEqual(inspect.getattr_static(Thing, 'attr'), 'foo')
class sub(meta):
pass
class OtherThing(object, metaclass=sub):
x = 3
self.assertEqual(inspect.getattr_static(OtherThing, 'attr'), 'foo')
class OtherOtherThing(OtherThing):
pass
# this test is odd, but it was added as it exposed a bug
self.assertEqual(inspect.getattr_static(OtherOtherThing, 'x'), 3)
def test_no_dict_no_slots(self):
self.assertEqual(inspect.getattr_static(1, 'foo', None), None)
self.assertNotEqual(inspect.getattr_static('foo', 'lower'), None)
def test_no_dict_no_slots_instance_member(self):
# returns descriptor
with open(__file__) as handle:
self.assertEqual(inspect.getattr_static(handle, 'name'), type(handle).name)
def test_inherited_slots(self):
# returns descriptor
class Thing(object):
__slots__ = ['x']
def __init__(self):
self.x = 'foo'
class OtherThing(Thing):
pass
# it would be nice if this worked...
# we get the descriptor instead of the instance attribute
self.assertEqual(inspect.getattr_static(OtherThing(), 'x'), Thing.x)
def test_descriptor(self):
class descriptor(object):
def __get__(self, instance, owner):
return 3
class Foo(object):
d = descriptor()
foo = Foo()
# for a non data descriptor we return the instance attribute
foo.__dict__['d'] = 1
self.assertEqual(inspect.getattr_static(foo, 'd'), 1)
# if the descriptor is a data-desciptor we should return the
# descriptor
descriptor.__set__ = lambda s, i, v: None
self.assertEqual(inspect.getattr_static(foo, 'd'), Foo.__dict__['d'])
def test_metaclass_with_descriptor(self):
class descriptor(object):
def __get__(self, instance, owner):
return 3
class meta(type):
d = descriptor()
class Thing(object, metaclass=meta):
pass
self.assertEqual(inspect.getattr_static(Thing, 'd'), meta.__dict__['d'])
def test_class_as_property(self):
class Base(object):
foo = 3
class Something(Base):
executed = False
@property
def __class__(self):
self.executed = True
return object
instance = Something()
self.assertEqual(inspect.getattr_static(instance, 'foo'), 3)
self.assertFalse(instance.executed)
self.assertEqual(inspect.getattr_static(Something, 'foo'), 3)
def test_mro_as_property(self):
class Meta(type):
@property
def __mro__(self):
return (object,)
class Base(object):
foo = 3
class Something(Base, metaclass=Meta):
pass
self.assertEqual(inspect.getattr_static(Something(), 'foo'), 3)
self.assertEqual(inspect.getattr_static(Something, 'foo'), 3)
def test_dict_as_property(self):
test = self
test.called = False
class Foo(dict):
a = 3
@property
def __dict__(self):
test.called = True
return {}
foo = Foo()
foo.a = 4
self.assertEqual(inspect.getattr_static(foo, 'a'), 3)
self.assertFalse(test.called)
def test_custom_object_dict(self):
test = self
test.called = False
class Custom(dict):
def get(self, key, default=None):
test.called = True
super().get(key, default)
class Foo(object):
a = 3
foo = Foo()
foo.__dict__ = Custom()
self.assertEqual(inspect.getattr_static(foo, 'a'), 3)
self.assertFalse(test.called)
def test_metaclass_dict_as_property(self):
class Meta(type):
@property
def __dict__(self):
self.executed = True
class Thing(metaclass=Meta):
executed = False
def __init__(self):
self.spam = 42
instance = Thing()
self.assertEqual(inspect.getattr_static(instance, "spam"), 42)
self.assertFalse(Thing.executed)
def test_module(self):
sentinel = object()
self.assertIsNot(inspect.getattr_static(sys, "version", sentinel),
sentinel)
def test_metaclass_with_metaclass_with_dict_as_property(self):
class MetaMeta(type):
@property
def __dict__(self):
self.executed = True
return dict(spam=42)
class Meta(type, metaclass=MetaMeta):
executed = False
class Thing(metaclass=Meta):
pass
with self.assertRaises(AttributeError):
inspect.getattr_static(Thing, "spam")
self.assertFalse(Thing.executed)
class TestGetGeneratorState(unittest.TestCase):
def setUp(self):
def number_generator():
for number in range(5):
yield number
self.generator = number_generator()
def _generatorstate(self):
return inspect.getgeneratorstate(self.generator)
def test_created(self):
self.assertEqual(self._generatorstate(), inspect.GEN_CREATED)
def test_suspended(self):
next(self.generator)
self.assertEqual(self._generatorstate(), inspect.GEN_SUSPENDED)
def test_closed_after_exhaustion(self):
for i in self.generator:
pass
self.assertEqual(self._generatorstate(), inspect.GEN_CLOSED)
def test_closed_after_immediate_exception(self):
with self.assertRaises(RuntimeError):
self.generator.throw(RuntimeError)
self.assertEqual(self._generatorstate(), inspect.GEN_CLOSED)
def test_running(self):
# As mentioned on issue #10220, checking for the RUNNING state only
# makes sense inside the generator itself.
# The following generator checks for this by using the closure's
# reference to self and the generator state checking helper method
def running_check_generator():
for number in range(5):
self.assertEqual(self._generatorstate(), inspect.GEN_RUNNING)
yield number
self.assertEqual(self._generatorstate(), inspect.GEN_RUNNING)
self.generator = running_check_generator()
# Running up to the first yield
next(self.generator)
# Running after the first yield
next(self.generator)
def test_easy_debugging(self):
# repr() and str() of a generator state should contain the state name
names = 'GEN_CREATED GEN_RUNNING GEN_SUSPENDED GEN_CLOSED'.split()
for name in names:
state = getattr(inspect, name)
self.assertIn(name, repr(state))
self.assertIn(name, str(state))
def test_getgeneratorlocals(self):
def each(lst, a=None):
b=(1, 2, 3)
for v in lst:
if v == 3:
c = 12
yield v
numbers = each([1, 2, 3])
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3]})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 1,
'b': (1, 2, 3)})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 2,
'b': (1, 2, 3)})
next(numbers)
self.assertEqual(inspect.getgeneratorlocals(numbers),
{'a': None, 'lst': [1, 2, 3], 'v': 3,
'b': (1, 2, 3), 'c': 12})
try:
next(numbers)
except StopIteration:
pass
self.assertEqual(inspect.getgeneratorlocals(numbers), {})
def test_getgeneratorlocals_empty(self):
def yield_one():
yield 1
one = yield_one()
self.assertEqual(inspect.getgeneratorlocals(one), {})
try:
next(one)
except StopIteration:
pass
self.assertEqual(inspect.getgeneratorlocals(one), {})
def test_getgeneratorlocals_error(self):
self.assertRaises(TypeError, inspect.getgeneratorlocals, 1)
self.assertRaises(TypeError, inspect.getgeneratorlocals, lambda x: True)
self.assertRaises(TypeError, inspect.getgeneratorlocals, set)
self.assertRaises(TypeError, inspect.getgeneratorlocals, (2,3))
class TestSignatureObject(unittest.TestCase):
@staticmethod
def signature(func):
sig = inspect.signature(func)
return (tuple((param.name,
(... if param.default is param.empty else param.default),
(... if param.annotation is param.empty
else param.annotation),
str(param.kind).lower())
for param in sig.parameters.values()),
(... if sig.return_annotation is sig.empty
else sig.return_annotation))
def test_signature_object(self):
S = inspect.Signature
P = inspect.Parameter
self.assertEqual(str(S()), '()')
def test(po, pk, pod=42, pkd=100, *args, ko, **kwargs):
pass
sig = inspect.signature(test)
po = sig.parameters['po'].replace(kind=P.POSITIONAL_ONLY)
pod = sig.parameters['pod'].replace(kind=P.POSITIONAL_ONLY)
pk = sig.parameters['pk']
pkd = sig.parameters['pkd']
args = sig.parameters['args']
ko = sig.parameters['ko']
kwargs = sig.parameters['kwargs']
S((po, pk, args, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((pk, po, args, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((po, args, pk, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((args, po, pk, ko, kwargs))
with self.assertRaisesRegex(ValueError, 'wrong parameter order'):
S((po, pk, args, kwargs, ko))
kwargs2 = kwargs.replace(name='args')
with self.assertRaisesRegex(ValueError, 'duplicate parameter name'):
S((po, pk, args, kwargs2, ko))
with self.assertRaisesRegex(ValueError, 'follows default argument'):
S((pod, po))
with self.assertRaisesRegex(ValueError, 'follows default argument'):
S((po, pkd, pk))
with self.assertRaisesRegex(ValueError, 'follows default argument'):
S((pkd, pk))
def test_signature_immutability(self):
def test(a):
pass
sig = inspect.signature(test)
with self.assertRaises(AttributeError):
sig.foo = 'bar'
with self.assertRaises(TypeError):
sig.parameters['a'] = None
def test_signature_on_noarg(self):
def test():
pass
self.assertEqual(self.signature(test), ((), ...))
def test_signature_on_wargs(self):
def test(a, b:'foo') -> 123:
pass
self.assertEqual(self.signature(test),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., 'foo', "positional_or_keyword")),
123))
def test_signature_on_wkwonly(self):
def test(*, a:float, b:str) -> int:
pass
self.assertEqual(self.signature(test),
((('a', ..., float, "keyword_only"),
('b', ..., str, "keyword_only")),
int))
def test_signature_on_complex_args(self):
def test(a, b:'foo'=10, *args:'bar', spam:'baz', ham=123, **kwargs:int):
pass
self.assertEqual(self.signature(test),
((('a', ..., ..., "positional_or_keyword"),
('b', 10, 'foo', "positional_or_keyword"),
('args', ..., 'bar', "var_positional"),
('spam', ..., 'baz', "keyword_only"),
('ham', 123, ..., "keyword_only"),
('kwargs', ..., int, "var_keyword")),
...))
@cpython_only
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_signature_on_builtins(self):
import _testcapi
def test_unbound_method(o):
"""Use this to test unbound methods (things that should have a self)"""
signature = inspect.signature(o)
self.assertTrue(isinstance(signature, inspect.Signature))
self.assertEqual(list(signature.parameters.values())[0].name, 'self')
return signature
def test_callable(o):
"""Use this to test bound methods or normal callables (things that don't expect self)"""
signature = inspect.signature(o)
self.assertTrue(isinstance(signature, inspect.Signature))
if signature.parameters:
self.assertNotEqual(list(signature.parameters.values())[0].name, 'self')
return signature
signature = test_callable(_testcapi.docstring_with_signature_with_defaults)
def p(name): return signature.parameters[name].default
self.assertEqual(p('s'), 'avocado')
self.assertEqual(p('b'), b'bytes')
self.assertEqual(p('d'), 3.14)
self.assertEqual(p('i'), 35)
self.assertEqual(p('n'), None)
self.assertEqual(p('t'), True)
self.assertEqual(p('f'), False)
self.assertEqual(p('local'), 3)
self.assertEqual(p('sys'), sys.maxsize)
self.assertEqual(p('exp'), sys.maxsize - 1)
test_callable(object)
# normal method
# (PyMethodDescr_Type, "method_descriptor")
test_unbound_method(_pickle.Pickler.dump)
d = _pickle.Pickler(io.StringIO())
test_callable(d.dump)
# static method
test_callable(str.maketrans)
test_callable('abc'.maketrans)
# class method
test_callable(dict.fromkeys)
test_callable({}.fromkeys)
# wrapper around slot (PyWrapperDescr_Type, "wrapper_descriptor")
test_unbound_method(type.__call__)
test_unbound_method(int.__add__)
test_callable((3).__add__)
# _PyMethodWrapper_Type
# support for 'method-wrapper'
test_callable(min.__call__)
# This doesn't work now.
# (We don't have a valid signature for "type" in 3.4)
with self.assertRaisesRegex(ValueError, "no signature found"):
class ThisWorksNow:
__call__ = type
test_callable(ThisWorksNow())
# Regression test for issue #20786
test_unbound_method(dict.__delitem__)
test_unbound_method(property.__delete__)
@cpython_only
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_signature_on_decorated_builtins(self):
import _testcapi
func = _testcapi.docstring_with_signature_with_defaults
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> int:
return func(*args, **kwargs)
return wrapper
decorated_func = decorator(func)
self.assertEqual(inspect.signature(func),
inspect.signature(decorated_func))
@cpython_only
def test_signature_on_builtins_no_signature(self):
import _testcapi
with self.assertRaisesRegex(ValueError, 'no signature found for builtin'):
inspect.signature(_testcapi.docstring_no_signature)
def test_signature_on_non_function(self):
with self.assertRaisesRegex(TypeError, 'is not a callable object'):
inspect.signature(42)
with self.assertRaisesRegex(TypeError, 'is not a Python function'):
inspect.Signature.from_function(42)
def test_signature_from_builtin_errors(self):
with self.assertRaisesRegex(TypeError, 'is not a Python builtin'):
inspect.Signature.from_builtin(42)
def test_signature_from_functionlike_object(self):
def func(a,b, *args, kwonly=True, kwonlyreq, **kwargs):
pass
class funclike:
# Has to be callable, and have correct
# __code__, __annotations__, __defaults__, __name__,
# and __kwdefaults__ attributes
def __init__(self, func):
self.__name__ = func.__name__
self.__code__ = func.__code__
self.__annotations__ = func.__annotations__
self.__defaults__ = func.__defaults__
self.__kwdefaults__ = func.__kwdefaults__
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
sig_func = inspect.Signature.from_function(func)
sig_funclike = inspect.Signature.from_function(funclike(func))
self.assertEqual(sig_funclike, sig_func)
sig_funclike = inspect.signature(funclike(func))
self.assertEqual(sig_funclike, sig_func)
# If object is not a duck type of function, then
# signature will try to get a signature for its '__call__'
# method
fl = funclike(func)
del fl.__defaults__
self.assertEqual(self.signature(fl),
((('args', ..., ..., "var_positional"),
('kwargs', ..., ..., "var_keyword")),
...))
# Test with cython-like builtins:
_orig_isdesc = inspect.ismethoddescriptor
def _isdesc(obj):
if hasattr(obj, '_builtinmock'):
return True
return _orig_isdesc(obj)
with unittest.mock.patch('inspect.ismethoddescriptor', _isdesc):
builtin_func = funclike(func)
# Make sure that our mock setup is working
self.assertFalse(inspect.ismethoddescriptor(builtin_func))
builtin_func._builtinmock = True
self.assertTrue(inspect.ismethoddescriptor(builtin_func))
self.assertEqual(inspect.signature(builtin_func), sig_func)
def test_signature_functionlike_class(self):
# We only want to duck type function-like objects,
# not classes.
def func(a,b, *args, kwonly=True, kwonlyreq, **kwargs):
pass
class funclike:
def __init__(self, marker):
pass
__name__ = func.__name__
__code__ = func.__code__
__annotations__ = func.__annotations__
__defaults__ = func.__defaults__
__kwdefaults__ = func.__kwdefaults__
with self.assertRaisesRegex(TypeError, 'is not a Python function'):
inspect.Signature.from_function(funclike)
self.assertEqual(str(inspect.signature(funclike)), '(marker)')
def test_signature_on_method(self):
class Test:
def __init__(*args):
pass
def m1(self, arg1, arg2=1) -> int:
pass
def m2(*args):
pass
def __call__(*, a):
pass
self.assertEqual(self.signature(Test().m1),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "positional_or_keyword")),
int))
self.assertEqual(self.signature(Test().m2),
((('args', ..., ..., "var_positional"),),
...))
self.assertEqual(self.signature(Test),
((('args', ..., ..., "var_positional"),),
...))
with self.assertRaisesRegex(ValueError, 'invalid method signature'):
self.signature(Test())
def test_signature_on_classmethod(self):
class Test:
@classmethod
def foo(cls, arg1, *, arg2=1):
pass
meth = Test().foo
self.assertEqual(self.signature(meth),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "keyword_only")),
...))
meth = Test.foo
self.assertEqual(self.signature(meth),
((('arg1', ..., ..., "positional_or_keyword"),
('arg2', 1, ..., "keyword_only")),
...))
def test_signature_on_staticmethod(self):
class Test:
@staticmethod
def foo(cls, *, arg):
pass
meth = Test().foo
self.assertEqual(self.signature(meth),
((('cls', ..., ..., "positional_or_keyword"),
('arg', ..., ..., "keyword_only")),
...))
meth = Test.foo
self.assertEqual(self.signature(meth),
((('cls', ..., ..., "positional_or_keyword"),
('arg', ..., ..., "keyword_only")),
...))
def test_signature_on_partial(self):
from functools import partial
Parameter = inspect.Parameter
def test():
pass
self.assertEqual(self.signature(partial(test)), ((), ...))
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(partial(test, 1))
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(partial(test, a=1))
def test(a, b, *, c, d):
pass
self.assertEqual(self.signature(partial(test)),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword"),
('c', ..., ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 1)),
((('b', ..., ..., "positional_or_keyword"),
('c', ..., ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 1, c=2)),
((('b', ..., ..., "positional_or_keyword"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, b=1, c=2)),
((('a', ..., ..., "positional_or_keyword"),
('b', 1, ..., "keyword_only"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, 0, b=1, c=2)),
((('b', 1, ..., "keyword_only"),
('c', 2, ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
self.assertEqual(self.signature(partial(test, a=1)),
((('a', 1, ..., "keyword_only"),
('b', ..., ..., "keyword_only"),
('c', ..., ..., "keyword_only"),
('d', ..., ..., "keyword_only")),
...))
def test(a, *args, b, **kwargs):
pass
self.assertEqual(self.signature(partial(test, 1)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, a=1)),
((('a', 1, ..., "keyword_only"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3, test=True)),
((('args', ..., ..., "var_positional"),
('b', ..., ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, 1, 2, 3, test=1, b=0)),
((('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, b=0)),
((('a', ..., ..., "positional_or_keyword"),
('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
self.assertEqual(self.signature(partial(test, b=0, test=1)),
((('a', ..., ..., "positional_or_keyword"),
('args', ..., ..., "var_positional"),
('b', 0, ..., "keyword_only"),
('kwargs', ..., ..., "var_keyword")),
...))
def test(a, b, c:int) -> 42:
pass
sig = test.__signature__ = inspect.signature(test)
self.assertEqual(self.signature(partial(partial(test, 1))),
((('b', ..., ..., "positional_or_keyword"),
('c', ..., int, "positional_or_keyword")),
42))
self.assertEqual(self.signature(partial(partial(test, 1), 2)),
((('c', ..., int, "positional_or_keyword"),),
42))
psig = inspect.signature(partial(partial(test, 1), 2))
def foo(a):
return a
_foo = partial(partial(foo, a=10), a=20)
self.assertEqual(self.signature(_foo),
((('a', 20, ..., "keyword_only"),),
...))
# check that we don't have any side-effects in signature(),
# and the partial object is still functioning
self.assertEqual(_foo(), 20)
def foo(a, b, c):
return a, b, c
_foo = partial(partial(foo, 1, b=20), b=30)
self.assertEqual(self.signature(_foo),
((('b', 30, ..., "keyword_only"),
('c', ..., ..., "keyword_only")),
...))
self.assertEqual(_foo(c=10), (1, 30, 10))
def foo(a, b, c, *, d):
return a, b, c, d
_foo = partial(partial(foo, d=20, c=20), b=10, d=30)
self.assertEqual(self.signature(_foo),
((('a', ..., ..., "positional_or_keyword"),
('b', 10, ..., "keyword_only"),
('c', 20, ..., "keyword_only"),
('d', 30, ..., "keyword_only"),
),
...))
ba = inspect.signature(_foo).bind(a=200, b=11)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (200, 11, 20, 30))
def foo(a=1, b=2, c=3):
return a, b, c
_foo = partial(foo, c=13) # (a=1, b=2, *, c=13)
ba = inspect.signature(_foo).bind(a=11)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 2, 13))
ba = inspect.signature(_foo).bind(11, 12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 12, 13))
ba = inspect.signature(_foo).bind(11, b=12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (11, 12, 13))
ba = inspect.signature(_foo).bind(b=12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (1, 12, 13))
_foo = partial(_foo, b=10, c=20)
ba = inspect.signature(_foo).bind(12)
self.assertEqual(_foo(*ba.args, **ba.kwargs), (12, 10, 20))
def foo(a, b, c, d, **kwargs):
pass
sig = inspect.signature(foo)
params = sig.parameters.copy()
params['a'] = params['a'].replace(kind=Parameter.POSITIONAL_ONLY)
params['b'] = params['b'].replace(kind=Parameter.POSITIONAL_ONLY)
foo.__signature__ = inspect.Signature(params.values())
sig = inspect.signature(foo)
self.assertEqual(str(sig), '(a, b, /, c, d, **kwargs)')
self.assertEqual(self.signature(partial(foo, 1)),
((('b', ..., ..., 'positional_only'),
('c', ..., ..., 'positional_or_keyword'),
('d', ..., ..., 'positional_or_keyword'),
('kwargs', ..., ..., 'var_keyword')),
...))
self.assertEqual(self.signature(partial(foo, 1, 2)),
((('c', ..., ..., 'positional_or_keyword'),
('d', ..., ..., 'positional_or_keyword'),
('kwargs', ..., ..., 'var_keyword')),
...))
self.assertEqual(self.signature(partial(foo, 1, 2, 3)),
((('d', ..., ..., 'positional_or_keyword'),
('kwargs', ..., ..., 'var_keyword')),
...))
self.assertEqual(self.signature(partial(foo, 1, 2, c=3)),
((('c', 3, ..., 'keyword_only'),
('d', ..., ..., 'keyword_only'),
('kwargs', ..., ..., 'var_keyword')),
...))
self.assertEqual(self.signature(partial(foo, 1, c=3)),
((('b', ..., ..., 'positional_only'),
('c', 3, ..., 'keyword_only'),
('d', ..., ..., 'keyword_only'),
('kwargs', ..., ..., 'var_keyword')),
...))
def test_signature_on_partialmethod(self):
from functools import partialmethod
class Spam:
def test():
pass
ham = partialmethod(test)
with self.assertRaisesRegex(ValueError, "has incorrect arguments"):
inspect.signature(Spam.ham)
class Spam:
def test(it, a, *, c) -> 'spam':
pass
ham = partialmethod(test, c=1)
self.assertEqual(self.signature(Spam.ham),
((('it', ..., ..., 'positional_or_keyword'),
('a', ..., ..., 'positional_or_keyword'),
('c', 1, ..., 'keyword_only')),
'spam'))
self.assertEqual(self.signature(Spam().ham),
((('a', ..., ..., 'positional_or_keyword'),
('c', 1, ..., 'keyword_only')),
'spam'))
def test_signature_on_fake_partialmethod(self):
def foo(a): pass
foo._partialmethod = 'spam'
self.assertEqual(str(inspect.signature(foo)), '(a)')
def test_signature_on_decorated(self):
import functools
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> int:
return func(*args, **kwargs)
return wrapper
class Foo:
@decorator
def bar(self, a, b):
pass
self.assertEqual(self.signature(Foo.bar),
((('self', ..., ..., "positional_or_keyword"),
('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(Foo().bar),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
# Test that we handle method wrappers correctly
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs) -> int:
return func(42, *args, **kwargs)
sig = inspect.signature(func)
new_params = tuple(sig.parameters.values())[1:]
wrapper.__signature__ = sig.replace(parameters=new_params)
return wrapper
class Foo:
@decorator
def __call__(self, a, b):
pass
self.assertEqual(self.signature(Foo.__call__),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(Foo().__call__),
((('b', ..., ..., "positional_or_keyword"),),
...))
# Test we handle __signature__ partway down the wrapper stack
def wrapped_foo_call():
pass
wrapped_foo_call.__wrapped__ = Foo.__call__
self.assertEqual(self.signature(wrapped_foo_call),
((('a', ..., ..., "positional_or_keyword"),
('b', ..., ..., "positional_or_keyword")),
...))
def test_signature_on_class(self):
class C:
def __init__(self, a):
pass
self.assertEqual(self.signature(C),
((('a', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __call__(cls, a):
pass
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(C),
((('a', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __new__(mcls, name, bases, dct, *, foo=1):
return super().__new__(mcls, name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(C),
((('b', ..., ..., "positional_or_keyword"),),
...))
self.assertEqual(self.signature(CM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('foo', 1, ..., "keyword_only")),
...))
class CMM(type):
def __new__(mcls, name, bases, dct, *, foo=1):
return super().__new__(mcls, name, bases, dct)
def __call__(cls, nm, bs, dt):
return type(nm, bs, dt)
class CM(type, metaclass=CMM):
def __new__(mcls, name, bases, dct, *, bar=2):
return super().__new__(mcls, name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(CMM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('foo', 1, ..., "keyword_only")),
...))
self.assertEqual(self.signature(CM),
((('nm', ..., ..., "positional_or_keyword"),
('bs', ..., ..., "positional_or_keyword"),
('dt', ..., ..., "positional_or_keyword")),
...))
self.assertEqual(self.signature(C),
((('b', ..., ..., "positional_or_keyword"),),
...))
class CM(type):
def __init__(cls, name, bases, dct, *, bar=2):
return super().__init__(name, bases, dct)
class C(metaclass=CM):
def __init__(self, b):
pass
self.assertEqual(self.signature(CM),
((('name', ..., ..., "positional_or_keyword"),
('bases', ..., ..., "positional_or_keyword"),
('dct', ..., ..., "positional_or_keyword"),
('bar', 2, ..., "keyword_only")),
...))
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_signature_on_class_without_init(self):
# Test classes without user-defined __init__ or __new__
class C: pass
self.assertEqual(str(inspect.signature(C)), '()')
class D(C): pass
self.assertEqual(str(inspect.signature(D)), '()')
# Test meta-classes without user-defined __init__ or __new__
class C(type): pass
class D(C): pass
with self.assertRaisesRegex(ValueError, "callable.*is not supported"):
self.assertEqual(inspect.signature(C), None)
with self.assertRaisesRegex(ValueError, "callable.*is not supported"):
self.assertEqual(inspect.signature(D), None)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_signature_on_builtin_class(self):
self.assertEqual(str(inspect.signature(_pickle.Pickler)),
'(file, protocol=None, fix_imports=True)')
class P(_pickle.Pickler): pass
class EmptyTrait: pass
class P2(EmptyTrait, P): pass
self.assertEqual(str(inspect.signature(P)),
'(file, protocol=None, fix_imports=True)')
self.assertEqual(str(inspect.signature(P2)),
'(file, protocol=None, fix_imports=True)')
class P3(P2):
def __init__(self, spam):
pass
self.assertEqual(str(inspect.signature(P3)), '(spam)')
class MetaP(type):
def __call__(cls, foo, bar):
pass
class P4(P2, metaclass=MetaP):
pass
self.assertEqual(str(inspect.signature(P4)), '(foo, bar)')
def test_signature_on_callable_objects(self):
class Foo:
def __call__(self, a):
pass
self.assertEqual(self.signature(Foo()),
((('a', ..., ..., "positional_or_keyword"),),
...))
class Spam:
pass
with self.assertRaisesRegex(TypeError, "is not a callable object"):
inspect.signature(Spam())
class Bar(Spam, Foo):
pass
self.assertEqual(self.signature(Bar()),
((('a', ..., ..., "positional_or_keyword"),),
...))
class Wrapped:
pass
Wrapped.__wrapped__ = lambda a: None
self.assertEqual(self.signature(Wrapped),
((('a', ..., ..., "positional_or_keyword"),),
...))
# wrapper loop:
Wrapped.__wrapped__ = Wrapped
with self.assertRaisesRegex(ValueError, 'wrapper loop'):
self.signature(Wrapped)
def test_signature_on_lambdas(self):
self.assertEqual(self.signature((lambda a=10: a)),
((('a', 10, ..., "positional_or_keyword"),),
...))
def test_signature_equality(self):
def foo(a, *, b:int) -> float: pass
self.assertNotEqual(inspect.signature(foo), 42)
def bar(a, *, b:int) -> float: pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int) -> int: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int): pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, b:int=42) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, *, c) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def bar(a, b:int) -> float: pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def spam(b:int, a) -> float: pass
self.assertNotEqual(inspect.signature(spam), inspect.signature(bar))
def foo(*, a, b, c): pass
def bar(*, c, b, a): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(*, a=1, b, c): pass
def bar(*, c, b, a=1): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *, a=1, b, c): pass
def bar(pos, *, c, b, a=1): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *, a, b, c): pass
def bar(pos, *, c, b, a=1): pass
self.assertNotEqual(inspect.signature(foo), inspect.signature(bar))
def foo(pos, *args, a=42, b, c, **kwargs:int): pass
def bar(pos, *args, c, b, a=42, **kwargs:int): pass
self.assertEqual(inspect.signature(foo), inspect.signature(bar))
def test_signature_unhashable(self):
def foo(a): pass
sig = inspect.signature(foo)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(sig)
def test_signature_str(self):
def foo(a:int=1, *, b, c=None, **kwargs) -> 42:
pass
self.assertEqual(str(inspect.signature(foo)),
'(a:int=1, *, b, c=None, **kwargs) -> 42')
def foo(a:int=1, *args, b, c=None, **kwargs) -> 42:
pass
self.assertEqual(str(inspect.signature(foo)),
'(a:int=1, *args, b, c=None, **kwargs) -> 42')
def foo():
pass
self.assertEqual(str(inspect.signature(foo)), '()')
def test_signature_str_positional_only(self):
P = inspect.Parameter
S = inspect.Signature
def test(a_po, *, b, **kwargs):
return a_po, kwargs
sig = inspect.signature(test)
new_params = list(sig.parameters.values())
new_params[0] = new_params[0].replace(kind=P.POSITIONAL_ONLY)
test.__signature__ = sig.replace(parameters=new_params)
self.assertEqual(str(inspect.signature(test)),
'(a_po, /, *, b, **kwargs)')
self.assertEqual(str(S(parameters=[P('foo', P.POSITIONAL_ONLY)])),
'(foo, /)')
self.assertEqual(str(S(parameters=[
P('foo', P.POSITIONAL_ONLY),
P('bar', P.VAR_KEYWORD)])),
'(foo, /, **bar)')
self.assertEqual(str(S(parameters=[
P('foo', P.POSITIONAL_ONLY),
P('bar', P.VAR_POSITIONAL)])),
'(foo, /, *bar)')
def test_signature_replace_anno(self):
def test() -> 42:
pass
sig = inspect.signature(test)
sig = sig.replace(return_annotation=None)
self.assertIs(sig.return_annotation, None)
sig = sig.replace(return_annotation=sig.empty)
self.assertIs(sig.return_annotation, sig.empty)
sig = sig.replace(return_annotation=42)
self.assertEqual(sig.return_annotation, 42)
self.assertEqual(sig, inspect.signature(test))
def test_signature_on_mangled_parameters(self):
class Spam:
def foo(self, __p1:1=2, *, __p2:2=3):
pass
class Ham(Spam):
pass
self.assertEqual(self.signature(Spam.foo),
((('self', ..., ..., "positional_or_keyword"),
('_Spam__p1', 2, 1, "positional_or_keyword"),
('_Spam__p2', 3, 2, "keyword_only")),
...))
self.assertEqual(self.signature(Spam.foo),
self.signature(Ham.foo))
class TestParameterObject(unittest.TestCase):
def test_signature_parameter_kinds(self):
P = inspect.Parameter
self.assertTrue(P.POSITIONAL_ONLY < P.POSITIONAL_OR_KEYWORD < \
P.VAR_POSITIONAL < P.KEYWORD_ONLY < P.VAR_KEYWORD)
self.assertEqual(str(P.POSITIONAL_ONLY), 'POSITIONAL_ONLY')
self.assertTrue('POSITIONAL_ONLY' in repr(P.POSITIONAL_ONLY))
def test_signature_parameter_object(self):
p = inspect.Parameter('foo', default=10,
kind=inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(p.name, 'foo')
self.assertEqual(p.default, 10)
self.assertIs(p.annotation, p.empty)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
with self.assertRaisesRegex(ValueError, 'invalid value'):
inspect.Parameter('foo', default=10, kind='123')
with self.assertRaisesRegex(ValueError, 'not a valid parameter name'):
inspect.Parameter('1', kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(TypeError, 'name must be a str'):
inspect.Parameter(None, kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError,
'is not a valid parameter name'):
inspect.Parameter('$', kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
inspect.Parameter('a', default=42,
kind=inspect.Parameter.VAR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
inspect.Parameter('a', default=42,
kind=inspect.Parameter.VAR_POSITIONAL)
p = inspect.Parameter('a', default=42,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD)
with self.assertRaisesRegex(ValueError, 'cannot have default values'):
p.replace(kind=inspect.Parameter.VAR_POSITIONAL)
self.assertTrue(repr(p).startswith('<Parameter'))
def test_signature_parameter_equality(self):
P = inspect.Parameter
p = P('foo', default=42, kind=inspect.Parameter.KEYWORD_ONLY)
self.assertEqual(p, p)
self.assertNotEqual(p, 42)
self.assertEqual(p, P('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY))
def test_signature_parameter_unhashable(self):
p = inspect.Parameter('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(p)
def test_signature_parameter_replace(self):
p = inspect.Parameter('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY)
self.assertIsNot(p, p.replace())
self.assertEqual(p, p.replace())
p2 = p.replace(annotation=1)
self.assertEqual(p2.annotation, 1)
p2 = p2.replace(annotation=p2.empty)
self.assertEqual(p, p2)
p2 = p2.replace(name='bar')
self.assertEqual(p2.name, 'bar')
self.assertNotEqual(p2, p)
with self.assertRaisesRegex(ValueError,
'name is a required attribute'):
p2 = p2.replace(name=p2.empty)
p2 = p2.replace(name='foo', default=None)
self.assertIs(p2.default, None)
self.assertNotEqual(p2, p)
p2 = p2.replace(name='foo', default=p2.empty)
self.assertIs(p2.default, p2.empty)
p2 = p2.replace(default=42, kind=p2.POSITIONAL_OR_KEYWORD)
self.assertEqual(p2.kind, p2.POSITIONAL_OR_KEYWORD)
self.assertNotEqual(p2, p)
with self.assertRaisesRegex(ValueError, 'invalid value for'):
p2 = p2.replace(kind=p2.empty)
p2 = p2.replace(kind=p2.KEYWORD_ONLY)
self.assertEqual(p2, p)
def test_signature_parameter_positional_only(self):
with self.assertRaisesRegex(TypeError, 'name must be a str'):
inspect.Parameter(None, kind=inspect.Parameter.POSITIONAL_ONLY)
def test_signature_parameter_immutability(self):
p = inspect.Parameter('spam', kind=inspect.Parameter.KEYWORD_ONLY)
with self.assertRaises(AttributeError):
p.foo = 'bar'
with self.assertRaises(AttributeError):
p.kind = 123
class TestSignatureBind(unittest.TestCase):
@staticmethod
def call(func, *args, **kwargs):
sig = inspect.signature(func)
ba = sig.bind(*args, **kwargs)
return func(*ba.args, **ba.kwargs)
def test_signature_bind_empty(self):
def test():
return 42
self.assertEqual(self.call(test), 42)
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1)
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1, spam=10)
with self.assertRaisesRegex(TypeError, 'too many keyword arguments'):
self.call(test, spam=1)
def test_signature_bind_var(self):
def test(*args, **kwargs):
return args, kwargs
self.assertEqual(self.call(test), ((), {}))
self.assertEqual(self.call(test, 1), ((1,), {}))
self.assertEqual(self.call(test, 1, 2), ((1, 2), {}))
self.assertEqual(self.call(test, foo='bar'), ((), {'foo': 'bar'}))
self.assertEqual(self.call(test, 1, foo='bar'), ((1,), {'foo': 'bar'}))
self.assertEqual(self.call(test, args=10), ((), {'args': 10}))
self.assertEqual(self.call(test, 1, 2, foo='bar'),
((1, 2), {'foo': 'bar'}))
def test_signature_bind_just_args(self):
def test(a, b, c):
return a, b, c
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
with self.assertRaisesRegex(TypeError, 'too many positional arguments'):
self.call(test, 1, 2, 3, 4)
with self.assertRaisesRegex(TypeError, "'b' parameter lacking default"):
self.call(test, 1)
with self.assertRaisesRegex(TypeError, "'a' parameter lacking default"):
self.call(test)
def test(a, b, c=10):
return a, b, c
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
self.assertEqual(self.call(test, 1, 2), (1, 2, 10))
def test(a=1, b=2, c=3):
return a, b, c
self.assertEqual(self.call(test, a=10, c=13), (10, 2, 13))
self.assertEqual(self.call(test, a=10), (10, 2, 3))
self.assertEqual(self.call(test, b=10), (1, 10, 3))
def test_signature_bind_varargs_order(self):
def test(*args):
return args
self.assertEqual(self.call(test), ())
self.assertEqual(self.call(test, 1, 2, 3), (1, 2, 3))
def test_signature_bind_args_and_varargs(self):
def test(a, b, c=3, *args):
return a, b, c, args
self.assertEqual(self.call(test, 1, 2, 3, 4, 5), (1, 2, 3, (4, 5)))
self.assertEqual(self.call(test, 1, 2), (1, 2, 3, ()))
self.assertEqual(self.call(test, b=1, a=2), (2, 1, 3, ()))
self.assertEqual(self.call(test, 1, b=2), (1, 2, 3, ()))
with self.assertRaisesRegex(TypeError,
"multiple values for argument 'c'"):
self.call(test, 1, 2, 3, c=4)
def test_signature_bind_just_kwargs(self):
def test(**kwargs):
return kwargs
self.assertEqual(self.call(test), {})
self.assertEqual(self.call(test, foo='bar', spam='ham'),
{'foo': 'bar', 'spam': 'ham'})
def test_signature_bind_args_and_kwargs(self):
def test(a, b, c=3, **kwargs):
return a, b, c, kwargs
self.assertEqual(self.call(test, 1, 2), (1, 2, 3, {}))
self.assertEqual(self.call(test, 1, 2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, b=2, a=1, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, a=1, b=2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, b=2, foo='bar', spam='ham'),
(1, 2, 3, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, b=2, c=4, foo='bar', spam='ham'),
(1, 2, 4, {'foo': 'bar', 'spam': 'ham'}))
self.assertEqual(self.call(test, 1, 2, 4, foo='bar'),
(1, 2, 4, {'foo': 'bar'}))
self.assertEqual(self.call(test, c=5, a=4, b=3),
(4, 3, 5, {}))
def test_signature_bind_kwonly(self):
def test(*, foo):
return foo
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1)
self.assertEqual(self.call(test, foo=1), 1)
def test(a, *, foo=1, bar):
return foo
with self.assertRaisesRegex(TypeError,
"'bar' parameter lacking default value"):
self.call(test, 1)
def test(foo, *, bar):
return foo, bar
self.assertEqual(self.call(test, 1, bar=2), (1, 2))
self.assertEqual(self.call(test, bar=2, foo=1), (1, 2))
with self.assertRaisesRegex(TypeError,
'too many keyword arguments'):
self.call(test, bar=2, foo=1, spam=10)
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1, 2)
with self.assertRaisesRegex(TypeError,
'too many positional arguments'):
self.call(test, 1, 2, bar=2)
with self.assertRaisesRegex(TypeError,
'too many keyword arguments'):
self.call(test, 1, bar=2, spam='ham')
with self.assertRaisesRegex(TypeError,
"'bar' parameter lacking default value"):
self.call(test, 1)
def test(foo, *, bar, **bin):
return foo, bar, bin
self.assertEqual(self.call(test, 1, bar=2), (1, 2, {}))
self.assertEqual(self.call(test, foo=1, bar=2), (1, 2, {}))
self.assertEqual(self.call(test, 1, bar=2, spam='ham'),
(1, 2, {'spam': 'ham'}))
self.assertEqual(self.call(test, spam='ham', foo=1, bar=2),
(1, 2, {'spam': 'ham'}))
with self.assertRaisesRegex(TypeError,
"'foo' parameter lacking default value"):
self.call(test, spam='ham', bar=2)
self.assertEqual(self.call(test, 1, bar=2, bin=1, spam=10),
(1, 2, {'bin': 1, 'spam': 10}))
def test_signature_bind_arguments(self):
def test(a, *args, b, z=100, **kwargs):
pass
sig = inspect.signature(test)
ba = sig.bind(10, 20, b=30, c=40, args=50, kwargs=60)
# we won't have 'z' argument in the bound arguments object, as we didn't
# pass it to the 'bind'
self.assertEqual(tuple(ba.arguments.items()),
(('a', 10), ('args', (20,)), ('b', 30),
('kwargs', {'c': 40, 'args': 50, 'kwargs': 60})))
self.assertEqual(ba.kwargs,
{'b': 30, 'c': 40, 'args': 50, 'kwargs': 60})
self.assertEqual(ba.args, (10, 20))
def test_signature_bind_positional_only(self):
P = inspect.Parameter
def test(a_po, b_po, c_po=3, foo=42, *, bar=50, **kwargs):
return a_po, b_po, c_po, foo, bar, kwargs
sig = inspect.signature(test)
new_params = collections.OrderedDict(tuple(sig.parameters.items()))
for name in ('a_po', 'b_po', 'c_po'):
new_params[name] = new_params[name].replace(kind=P.POSITIONAL_ONLY)
new_sig = sig.replace(parameters=new_params.values())
test.__signature__ = new_sig
self.assertEqual(self.call(test, 1, 2, 4, 5, bar=6),
(1, 2, 4, 5, 6, {}))
self.assertEqual(self.call(test, 1, 2),
(1, 2, 3, 42, 50, {}))
self.assertEqual(self.call(test, 1, 2, foo=4, bar=5),
(1, 2, 3, 4, 5, {}))
with self.assertRaisesRegex(TypeError, "but was passed as a keyword"):
self.call(test, 1, 2, foo=4, bar=5, c_po=10)
with self.assertRaisesRegex(TypeError, "parameter is positional only"):
self.call(test, 1, 2, c_po=4)
with self.assertRaisesRegex(TypeError, "parameter is positional only"):
self.call(test, a_po=1, b_po=2)
def test_signature_bind_with_self_arg(self):
# Issue #17071: one of the parameters is named "self
def test(a, self, b):
pass
sig = inspect.signature(test)
ba = sig.bind(1, 2, 3)
self.assertEqual(ba.args, (1, 2, 3))
ba = sig.bind(1, self=2, b=3)
self.assertEqual(ba.args, (1, 2, 3))
def test_signature_bind_vararg_name(self):
def test(a, *args):
return a, args
sig = inspect.signature(test)
with self.assertRaisesRegex(TypeError, "too many keyword arguments"):
sig.bind(a=0, args=1)
def test(*args, **kwargs):
return args, kwargs
self.assertEqual(self.call(test, args=1), ((), {'args': 1}))
sig = inspect.signature(test)
ba = sig.bind(args=1)
self.assertEqual(ba.arguments, {'kwargs': {'args': 1}})
class TestBoundArguments(unittest.TestCase):
def test_signature_bound_arguments_unhashable(self):
def foo(a): pass
ba = inspect.signature(foo).bind(1)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(ba)
def test_signature_bound_arguments_equality(self):
def foo(a): pass
ba = inspect.signature(foo).bind(1)
self.assertEqual(ba, ba)
ba2 = inspect.signature(foo).bind(1)
self.assertEqual(ba, ba2)
ba3 = inspect.signature(foo).bind(2)
self.assertNotEqual(ba, ba3)
ba3.arguments['a'] = 1
self.assertEqual(ba, ba3)
def bar(b): pass
ba4 = inspect.signature(bar).bind(1)
self.assertNotEqual(ba, ba4)
class TestSignaturePrivateHelpers(unittest.TestCase):
def test_signature_get_bound_param(self):
getter = inspect._signature_get_bound_param
self.assertEqual(getter('($self)'), 'self')
self.assertEqual(getter('($self, obj)'), 'self')
self.assertEqual(getter('($cls, /, obj)'), 'cls')
def _strip_non_python_syntax(self, input,
clean_signature, self_parameter, last_positional_only):
computed_clean_signature, \
computed_self_parameter, \
computed_last_positional_only = \
inspect._signature_strip_non_python_syntax(input)
self.assertEqual(computed_clean_signature, clean_signature)
self.assertEqual(computed_self_parameter, self_parameter)
self.assertEqual(computed_last_positional_only, last_positional_only)
def test_signature_strip_non_python_syntax(self):
self._strip_non_python_syntax(
"($module, /, path, mode, *, dir_fd=None, " +
"effective_ids=False,\n follow_symlinks=True)",
"(module, path, mode, *, dir_fd=None, " +
"effective_ids=False, follow_symlinks=True)",
0,
0)
self._strip_non_python_syntax(
"($module, word, salt, /)",
"(module, word, salt)",
0,
2)
self._strip_non_python_syntax(
"(x, y=None, z=None, /)",
"(x, y=None, z=None)",
None,
2)
self._strip_non_python_syntax(
"(x, y=None, z=None)",
"(x, y=None, z=None)",
None,
None)
self._strip_non_python_syntax(
"(x,\n y=None,\n z = None )",
"(x, y=None, z=None)",
None,
None)
self._strip_non_python_syntax(
"",
"",
None,
None)
self._strip_non_python_syntax(
None,
None,
None,
None)
class TestUnwrap(unittest.TestCase):
def test_unwrap_one(self):
def func(a, b):
return a + b
wrapper = functools.lru_cache(maxsize=20)(func)
self.assertIs(inspect.unwrap(wrapper), func)
def test_unwrap_several(self):
def func(a, b):
return a + b
wrapper = func
for __ in range(10):
@functools.wraps(wrapper)
def wrapper():
pass
self.assertIsNot(wrapper.__wrapped__, func)
self.assertIs(inspect.unwrap(wrapper), func)
def test_stop(self):
def func1(a, b):
return a + b
@functools.wraps(func1)
def func2():
pass
@functools.wraps(func2)
def wrapper():
pass
func2.stop_here = 1
unwrapped = inspect.unwrap(wrapper,
stop=(lambda f: hasattr(f, "stop_here")))
self.assertIs(unwrapped, func2)
def test_cycle(self):
def func1(): pass
func1.__wrapped__ = func1
with self.assertRaisesRegex(ValueError, 'wrapper loop'):
inspect.unwrap(func1)
def func2(): pass
func2.__wrapped__ = func1
func1.__wrapped__ = func2
with self.assertRaisesRegex(ValueError, 'wrapper loop'):
inspect.unwrap(func1)
with self.assertRaisesRegex(ValueError, 'wrapper loop'):
inspect.unwrap(func2)
def test_unhashable(self):
def func(): pass
func.__wrapped__ = None
class C:
__hash__ = None
__wrapped__ = func
self.assertIsNone(inspect.unwrap(C()))
class TestMain(unittest.TestCase):
def test_only_source(self):
module = importlib.import_module('unittest')
rc, out, err = assert_python_ok('-m', 'inspect',
'unittest')
lines = out.decode().splitlines()
# ignore the final newline
self.assertEqual(lines[:-1], inspect.getsource(module).splitlines())
self.assertEqual(err, b'')
def test_custom_getattr(self):
def foo():
pass
foo.__signature__ = 42
with self.assertRaises(TypeError):
inspect.signature(foo)
@unittest.skipIf(ThreadPoolExecutor is None,
'threads required to test __qualname__ for source files')
def test_qualname_source(self):
rc, out, err = assert_python_ok('-m', 'inspect',
'concurrent.futures:ThreadPoolExecutor')
lines = out.decode().splitlines()
# ignore the final newline
self.assertEqual(lines[:-1],
inspect.getsource(ThreadPoolExecutor).splitlines())
self.assertEqual(err, b'')
def test_builtins(self):
module = importlib.import_module('unittest')
_, out, err = assert_python_failure('-m', 'inspect',
'sys')
lines = err.decode().splitlines()
self.assertEqual(lines, ["Can't get info for builtin modules."])
def test_details(self):
module = importlib.import_module('unittest')
rc, out, err = assert_python_ok('-m', 'inspect',
'unittest', '--details')
output = out.decode()
# Just a quick sanity check on the output
self.assertIn(module.__name__, output)
self.assertIn(module.__file__, output)
if not sys.flags.optimize:
self.assertIn(module.__cached__, output)
self.assertEqual(err, b'')
class TestReload(unittest.TestCase):
src_before = textwrap.dedent("""\
def foo():
print("Bla")
""")
src_after = textwrap.dedent("""\
def foo():
print("Oh no!")
""")
def assertInspectEqual(self, path, source):
inspected_src = inspect.getsource(source)
with open(path) as src:
self.assertEqual(
src.read().splitlines(True),
inspected_src.splitlines(True)
)
def test_getsource_reload(self):
# see issue 1218234
with _ready_to_import('reload_bug', self.src_before) as (name, path):
module = importlib.import_module(name)
self.assertInspectEqual(path, module)
with open(path, 'w') as src:
src.write(self.src_after)
self.assertInspectEqual(path, module)
def test_main():
run_unittest(
TestDecorators, TestRetrievingSourceCode, TestOneliners, TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates,
TestGetcallargsFunctions, TestGetcallargsMethods,
TestGetcallargsUnboundMethods, TestGetattrStatic, TestGetGeneratorState,
TestNoEOL, TestSignatureObject, TestSignatureBind, TestParameterObject,
TestBoundArguments, TestSignaturePrivateHelpers, TestGetClosureVars,
TestUnwrap, TestMain, TestReload
)
if __name__ == "__main__":
test_main()
|
gpl-2.0
|
vladimir-ipatov/ganeti
|
test/py/cmdlib/backup_unittest.py
|
1
|
7854
|
#!/usr/bin/python
#
# Copyright (C) 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Tests for LUBackup*"""
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
from ganeti import query
from testsupport import *
import testutils
class TestLUBackupQuery(CmdlibTestCase):
def setUp(self):
super(TestLUBackupQuery, self).setUp()
self.fields = query._BuildExportFields().keys()
def testFailingExportList(self):
self.rpc.call_export_list.return_value = \
self.RpcResultsBuilder() \
.AddFailedNode(self.master) \
.Build()
op = opcodes.OpBackupQuery(nodes=[self.master.name])
ret = self.ExecOpCode(op)
self.assertEqual({self.master.name: False}, ret)
def testQueryOneNode(self):
self.rpc.call_export_list.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master,
["mock_export1", "mock_export2"]) \
.Build()
op = opcodes.OpBackupQuery(nodes=[self.master.name])
ret = self.ExecOpCode(op)
self.assertEqual({self.master.name: ["mock_export1", "mock_export2"]}, ret)
def testQueryAllNodes(self):
node = self.cfg.AddNewNode()
self.rpc.call_export_list.return_value = \
self.RpcResultsBuilder() \
.AddSuccessfulNode(self.master, ["mock_export1"]) \
.AddSuccessfulNode(node, ["mock_export2"]) \
.Build()
op = opcodes.OpBackupQuery()
ret = self.ExecOpCode(op)
self.assertEqual({
self.master.name: ["mock_export1"],
node.name: ["mock_export2"]
}, ret)
class TestLUBackupPrepare(CmdlibTestCase):
@patchUtils("instance_utils")
def testPrepareLocalExport(self, utils):
utils.ReadOneLineFile.return_value = "cluster_secret"
inst = self.cfg.AddNewInstance()
op = opcodes.OpBackupPrepare(instance_name=inst.name,
mode=constants.EXPORT_MODE_LOCAL)
self.ExecOpCode(op)
@patchUtils("instance_utils")
def testPrepareRemoteExport(self, utils):
utils.ReadOneLineFile.return_value = "cluster_secret"
inst = self.cfg.AddNewInstance()
self.rpc.call_x509_cert_create.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(inst.primary_node,
("key_name",
testutils.ReadTestData("cert1.pem")))
op = opcodes.OpBackupPrepare(instance_name=inst.name,
mode=constants.EXPORT_MODE_REMOTE)
self.ExecOpCode(op)
class TestLUBackupExportBase(CmdlibTestCase):
def setUp(self):
super(TestLUBackupExportBase, self).setUp()
self.rpc.call_instance_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, True)
self.rpc.call_blockdev_assemble.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, ("/dev/mock_path",
"/dev/mock_link_name"))
self.rpc.call_blockdev_shutdown.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
self.rpc.call_blockdev_snapshot.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, ("mock_vg", "mock_id"))
self.rpc.call_blockdev_remove.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
self.rpc.call_export_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, "export_daemon")
def ImpExpStatus(node_uuid, name):
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
[objects.ImportExportStatus(
exit_status=0
)])
self.rpc.call_impexp_status.side_effect = ImpExpStatus
def ImpExpCleanup(node_uuid, name):
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid)
self.rpc.call_impexp_cleanup.side_effect = ImpExpCleanup
self.rpc.call_finalize_export.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
def testRemoveRunningInstanceWithoutShutdown(self):
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
op = opcodes.OpBackupExport(instance_name=inst.name,
target_node=self.master.name,
shutdown=False,
remove_instance=True)
self.ExecOpCodeExpectOpPrereqError(
op, "Can not remove instance without shutting it down before")
def testUnsupportedDiskTemplate(self):
inst = self.cfg.AddNewInstance(disk_template=constants.DT_FILE)
op = opcodes.OpBackupExport(instance_name=inst.name,
target_node=self.master.name)
self.ExecOpCodeExpectOpPrereqError(
op, "Export not supported for instances with file-based disks")
class TestLUBackupExportLocalExport(TestLUBackupExportBase):
def setUp(self):
super(TestLUBackupExportLocalExport, self).setUp()
self.inst = self.cfg.AddNewInstance()
self.target_node = self.cfg.AddNewNode()
self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_LOCAL,
instance_name=self.inst.name,
target_node=self.target_node.name)
self.rpc.call_import_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.target_node, "import_daemon")
def testExportWithShutdown(self):
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
op = self.CopyOpCode(self.op, instance_name=inst.name, shutdown=True)
self.ExecOpCode(op)
def testExportDeactivatedDisks(self):
self.ExecOpCode(self.op)
def testExportRemoveInstance(self):
op = self.CopyOpCode(self.op, remove_instance=True)
self.ExecOpCode(op)
class TestLUBackupExportRemoteExport(TestLUBackupExportBase):
def setUp(self):
super(TestLUBackupExportRemoteExport, self).setUp()
self.inst = self.cfg.AddNewInstance()
self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_REMOTE,
instance_name=self.inst.name,
target_node=[],
x509_key_name=["mock_key_name"],
destination_x509_ca="mock_dest_ca")
def testRemoteExportWithoutX509KeyName(self):
op = self.CopyOpCode(self.op, x509_key_name=self.REMOVE)
self.ExecOpCodeExpectOpPrereqError(op,
"Missing X509 key name for encryption")
def testRemoteExportWithoutX509DestCa(self):
op = self.CopyOpCode(self.op, destination_x509_ca=self.REMOVE)
self.ExecOpCodeExpectOpPrereqError(op,
"Missing destination X509 CA")
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
gpl-2.0
|
radicalbit/ambari
|
ambari-server/src/main/resources/common-services/HDFS/3.0.0.3.0/package/scripts/balancer-emulator/hdfs-command.py
|
8
|
1394
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import time
import sys
from threading import Thread
def write_function(path, handle, interval):
with open(path) as f:
for line in f:
handle.write(line)
handle.flush()
time.sleep(interval)
thread = Thread(target = write_function, args = ('balancer.out', sys.stdout, 1.5))
thread.start()
threaderr = Thread(target = write_function, args = ('balancer.err', sys.stderr, 1.5 * 0.023))
threaderr.start()
thread.join()
def rebalancer_out():
write_function('balancer.out', sys.stdout)
def rebalancer_err():
write_function('balancer.err', sys.stdout)
|
apache-2.0
|
johnbolia/plyer
|
plyer/facades/orientation.py
|
2
|
1830
|
'''
Orientation
==========
The :class:`Orientation` provides access to public methods to set orientation
of your device.
.. note::
These settings are generally guidelines, the operating
system may choose to ignore them, or they may be overridden by
other system components.
.. versionadded:: 1.2.4
Simple Examples
---------------
To set landscape::
>>> from plyer import orientation
>>> orientation.set_landscape()
To set portrait::
>>> orientation.set_portrait()
To set sensor::
>>> orientation.set_sensor()
'''
class Orientation(object):
'''
Orientation facade.
'''
def set_landscape(self, reverse=False):
'''
Rotate the app to a landscape orientation.
:param reverse: If True, uses the opposite of the natural
orientation.
'''
self._set_landscape(reverse=reverse)
def set_portrait(self, reverse=False):
'''
Rotate the app to a portrait orientation.
:param reverse: If True, uses the opposite of the natural
orientation.
'''
self._set_portrait(reverse=reverse)
def set_sensor(self, mode='any'):
'''
Rotate freely following sensor information from the device.
:param mode: The rotation mode, should be one of 'any' (rotate
to any orientation), 'landscape' (choose nearest
landscape mode) or 'portrait' (choose nearest
portrait mode). Defaults to 'any'.
'''
self._set_sensor(mode=mode)
# private
def _set_landscape(self, **kwargs):
raise NotImplementedError()
def _set_portrait(self, **kwargs):
raise NotImplementedError()
def _set_sensor(self, **kwargs):
raise NotImplementedError()
|
mit
|
AlceConsorcio/account-closing
|
account_cutoff_prepaid/account_cutoff.py
|
6
|
7911
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Cut-off Prepaid module for OpenERP
# Copyright (C) 2013 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
from datetime import datetime
class account_cutoff(orm.Model):
_inherit = 'account.cutoff'
_columns = {
'source_journal_ids': fields.many2many(
'account.journal', id1='cutoff_id', id2='journal_id',
string='Source Journals', readonly=True,
states={'draft': [('readonly', False)]}),
}
def _get_default_source_journals(self, cr, uid, context=None):
if context is None:
context = {}
journal_obj = self.pool['account.journal']
res = []
type = context.get('type')
mapping = {
'prepaid_expense': ('purchase', 'purchase_refund'),
'prepaid_revenue': ('sale', 'sale_refund'),
}
if type in mapping:
src_journal_ids = journal_obj.search(
cr, uid, [('type', 'in', mapping[type])])
if src_journal_ids:
res = src_journal_ids
return res
_defaults = {
'source_journal_ids': _get_default_source_journals,
}
_sql_constraints = [(
'date_type_company_uniq',
'unique(cutoff_date, company_id, type)',
'A cut-off of the same type already exists with this cut-off date !'
)]
def _prepare_prepaid_lines(
self, cr, uid, ids, aml, cur_cutoff, mapping, context=None):
start_date = datetime.strptime(aml['start_date'], '%Y-%m-%d')
end_date = datetime.strptime(aml['end_date'], '%Y-%m-%d')
cutoff_date_str = cur_cutoff['cutoff_date']
cutoff_date = datetime.strptime(cutoff_date_str, '%Y-%m-%d')
# Here, we compute the amount of the cutoff
# That's the important part !
total_days = (end_date - start_date).days + 1
if aml['start_date'] > cutoff_date_str:
after_cutoff_days = total_days
cutoff_amount = -1 * (aml['credit'] - aml['debit'])
else:
after_cutoff_days = (end_date - cutoff_date).days
if total_days:
cutoff_amount = -1 * (aml['credit'] - aml['debit'])\
* after_cutoff_days / total_days
else:
raise orm.except_orm(
_('Error:'),
"Should never happen. Total days should always be > 0")
# we use account mapping here
if aml['account_id'][0] in mapping:
cutoff_account_id = mapping[aml['account_id'][0]]
else:
cutoff_account_id = aml['account_id'][0]
res = {
'parent_id': ids[0],
'move_line_id': aml['id'],
'partner_id': aml['partner_id'] and aml['partner_id'][0] or False,
'name': aml['name'],
'start_date': aml['start_date'],
'end_date': aml['end_date'],
'account_id': aml['account_id'][0],
'cutoff_account_id': cutoff_account_id,
'analytic_account_id': (aml['analytic_account_id'][0]
if aml['analytic_account_id'] else False),
'total_days': total_days,
'after_cutoff_days': after_cutoff_days,
'amount': aml['credit'] - aml['debit'],
'currency_id': cur_cutoff['company_currency_id'][0],
'cutoff_amount': cutoff_amount,
}
return res
def get_prepaid_lines(self, cr, uid, ids, context=None):
assert len(ids) == 1,\
'This function should only be used for a single id at a time'
aml_obj = self.pool['account.move.line']
line_obj = self.pool['account.cutoff.line']
mapping_obj = self.pool['account.cutoff.mapping']
cur_cutoff = self.read(
cr, uid, ids[0], [
'line_ids', 'source_journal_ids', 'cutoff_date', 'company_id',
'type', 'company_currency_id'
],
context=context)
src_journal_ids = cur_cutoff['source_journal_ids']
if not src_journal_ids:
raise orm.except_orm(
_('Error:'), _("You should set at least one Source Journal."))
cutoff_date_str = cur_cutoff['cutoff_date']
# Delete existing lines
if cur_cutoff['line_ids']:
line_obj.unlink(cr, uid, cur_cutoff['line_ids'], context=context)
# Search for account move lines in the source journals
aml_ids = aml_obj.search(cr, uid, [
('start_date', '!=', False),
('journal_id', 'in', src_journal_ids),
('end_date', '>', cutoff_date_str),
('date', '<=', cutoff_date_str)
], context=context)
# Create mapping dict
mapping = mapping_obj._get_mapping_dict(
cr, uid, cur_cutoff['company_id'][0], cur_cutoff['type'],
context=context)
# Loop on selected account move lines to create the cutoff lines
for aml in aml_obj.read(
cr, uid, aml_ids, [
'credit', 'debit', 'start_date', 'end_date', 'account_id',
'analytic_account_id', 'partner_id', 'name'
],
context=context):
line_obj.create(
cr, uid, self._prepare_prepaid_lines(
cr, uid, ids, aml, cur_cutoff, mapping, context=context),
context=context)
return True
def _inherit_default_cutoff_account_id(self, cr, uid, context=None):
if context is None:
context = {}
account_id = super(account_cutoff, self).\
_inherit_default_cutoff_account_id(cr, uid, context=context)
type = context.get('type')
company = self.pool['res.users'].browse(
cr, uid, uid, context=context).company_id
if type == 'prepaid_revenue':
account_id = company.default_prepaid_revenue_account_id.id or False
elif type == 'prepaid_expense':
account_id = company.default_prepaid_expense_account_id.id or False
return account_id
class account_cutoff_line(orm.Model):
_inherit = 'account.cutoff.line'
_columns = {
'move_line_id': fields.many2one(
'account.move.line', 'Accout Move Line', readonly=True),
'move_date': fields.related(
'move_line_id', 'date', type='date',
string='Account Move Date', readonly=True),
'invoice_id': fields.related(
'move_line_id', 'invoice', type='many2one',
relation='account.invoice', string='Invoice', readonly=True),
'start_date': fields.date('Start Date', readonly=True),
'end_date': fields.date('End Date', readonly=True),
'total_days': fields.integer('Total Number of Days', readonly=True),
'after_cutoff_days': fields.integer(
'Number of Days after Cut-off Date', readonly=True),
}
|
agpl-3.0
|
evidation-health/bokeh
|
bokeh/tests/test_sources.py
|
26
|
3245
|
from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource, ServerDataSource
class TestColumnDataSourcs(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
class TestServerDataSources(unittest.TestCase):
def test_basic(self):
ds = ServerDataSource()
self.assertTrue(isinstance(ds, DataSource))
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
jriehl/numba
|
examples/mergesort.py
|
2
|
3166
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
An inplace and an out-of-place implementation of recursive mergesort.
This is not an efficient sort implementation.
The purpose is to demonstrate recursion support.
"""
from __future__ import print_function, division, absolute_import
from timeit import default_timer as timer
import numpy as np
from numba import njit
@njit
def mergesort_inplace(arr):
"Inplace mergesort"
assert arr.ndim == 1
if arr.size > 2:
mid = arr.size // 2
first = arr[:mid]
second = arr[mid:]
mergesort_inplace(first)
mergesort_inplace(second)
left = 0
right = mid
while left < mid and right < arr.size:
if arr[left] <= arr[right]:
left += 1
else:
temp = arr[right]
right += 1
# copy left array to the right by one
for i in range(mid, left, -1):
arr[i] = arr[i - 1]
arr[left] = temp
left += 1
mid += 1
elif arr.size == 2:
a, b = arr
arr[0], arr[1] = ((a, b) if a <= b else (b, a))
return arr
@njit
def mergesort(arr):
"mergesort"
assert arr.ndim == 1
if arr.size > 2:
mid = arr.size // 2
first = mergesort(arr[:mid].copy())
second = mergesort(arr[mid:].copy())
left = right = 0
writeidx = 0
while left < first.size and right < second.size:
if first[left] <= second[right]:
arr[writeidx] = first[left]
left += 1
else:
arr[writeidx] = second[right]
right += 1
writeidx += 1
while left < first.size:
arr[writeidx] = first[left]
writeidx += 1
left += 1
while right < second.size:
arr[writeidx] = second[right]
writeidx += 1
right += 1
elif arr.size == 2:
a, b = arr
arr[0], arr[1] = ((a, b) if a <= b else (b, a))
return arr
def run(mergesort):
print(('Running %s' % mergesort.py_func.__name__).center(80, '='))
# Small case (warmup)
print("Warmup")
arr = np.random.random(6)
expect = arr.copy()
expect.sort()
print("unsorted", arr)
res = mergesort(arr)
print(" sorted", res)
# Test correstness
assert np.all(expect == res)
print()
# Large case
nelem = 10**3
print("Sorting %d float64" % nelem)
arr = np.random.random(nelem)
expect = arr.copy()
# Run pure python version
ts = timer()
mergesort.py_func(arr.copy())
te = timer()
print('python took %.3fms' % (1000 * (te - ts)))
# Run numpy version
ts = timer()
expect.sort()
te = timer()
print('numpy took %.3fms' % (1000 * (te - ts)))
# Run numba version
ts = timer()
res = mergesort(arr)
te = timer()
print('numba took %.3fms' % (1000 * (te - ts)))
# Test correstness
assert np.all(expect == res)
def main():
run(mergesort)
run(mergesort_inplace)
if __name__ == '__main__':
main()
|
bsd-2-clause
|
nischalsheth/contrail-controller
|
src/config/common/svc_info.py
|
6
|
2572
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
_MGMT_STR = "management"
_LEFT_STR = "left"
_RIGHT_STR = "right"
_SVC_VN_MGMT = "svc-vn-mgmt"
_SVC_VN_LEFT = "svc-vn-left"
_SVC_VN_RIGHT = "svc-vn-right"
_VN_MGMT_SUBNET_CIDR = '10.250.1.0/24'
_VN_LEFT_SUBNET_CIDR = '10.250.2.0/24'
_VN_RIGHT_SUBNET_CIDR = '10.250.3.0/24'
_VN_MGMT_SUBNET_CIDR6 = 'fd12:3456:789a:1::/64'
_VN_LEFT_SUBNET_CIDR6 = 'fd12:3456:789a:2::/64'
_VN_RIGHT_SUBNET_CIDR6 = 'fd12:3456:789a:3::/64'
_VN_SNAT_PREFIX_NAME = 'snat-si-left'
_VN_SNAT_SUBNET_CIDR = '100.64.0.0/29'
_CHECK_SVC_VM_HEALTH_INTERVAL = 60
_VM_INSTANCE_TYPE = 'virtual-machine'
_NETNS_INSTANCE_TYPE = 'network-namespace'
_SNAT_SVC_TYPE = 'source-nat'
_LB_SVC_TYPE = 'loadbalancer'
_ACTIVE_LOCAL_PREFERENCE = 200
_STANDBY_LOCAL_PREFERENCE = 100
# Version from the vrouter agent can manage service instances
_VROUTER_NETNS_SUPPORTED_VERSION = '1.10'
def get_management_if_str():
return _MGMT_STR
def get_left_if_str():
return _LEFT_STR
def get_right_if_str():
return _RIGHT_STR
def get_if_str_list():
if_str_list = []
if_str_list.append(get_management_if_str())
if_str_list.append(get_left_if_str())
if_str_list.append(get_right_if_str())
return if_str_list
def get_management_vn_name():
return _SVC_VN_MGMT
def get_left_vn_name():
return _SVC_VN_LEFT
def get_right_vn_name():
return _SVC_VN_RIGHT
def get_shared_vn_list():
shared_vn_list = []
shared_vn_list.append(get_management_vn_name())
shared_vn_list.append(get_left_vn_name())
shared_vn_list.append(get_right_vn_name())
return shared_vn_list
def get_management_vn_subnet():
return _VN_MGMT_SUBNET_CIDR
def get_left_vn_subnet():
return _VN_LEFT_SUBNET_CIDR
def get_right_vn_subnet():
return _VN_RIGHT_SUBNET_CIDR
def get_management_vn_subnet6():
return _VN_MGMT_SUBNET_CIDR6
def get_left_vn_subnet6():
return _VN_LEFT_SUBNET_CIDR6
def get_right_vn_subnet6():
return _VN_RIGHT_SUBNET_CIDR6
def get_snat_left_vn_prefix():
return _VN_SNAT_PREFIX_NAME
def get_snat_left_subnet():
return _VN_SNAT_SUBNET_CIDR
def get_vm_instance_type():
return _VM_INSTANCE_TYPE
def get_netns_instance_type():
return _NETNS_INSTANCE_TYPE
def get_snat_service_type():
return _SNAT_SVC_TYPE
def get_lb_service_type():
return _LB_SVC_TYPE
def get_vm_health_interval():
return _CHECK_SVC_VM_HEALTH_INTERVAL
def get_active_preference():
return _ACTIVE_LOCAL_PREFERENCE
def get_standby_preference():
return _STANDBY_LOCAL_PREFERENCE
|
apache-2.0
|
ericholscher/merchant
|
billing/forms/authorize_net_forms.py
|
7
|
1214
|
from django import forms
class AuthorizeNetDPMForm(forms.Form):
x_card_num = forms.CharField(max_length=16, label="Credit Card #")
x_exp_date = forms.CharField(max_length=5, label="Exp Date (mm/yy)")
x_card_code = forms.CharField(max_length=4, label="CVV")
x_first_name = forms.CharField(max_length=50, label="First Name")
x_last_name = forms.CharField(max_length=50, label="Last Name")
x_address = forms.CharField(widget=forms.Textarea, max_length=60, label="Address")
x_city = forms.CharField(max_length=40, label="City")
x_state = forms.CharField(max_length=40, label="State")
x_zip = forms.CharField(max_length=20, label="Zip")
x_country = forms.CharField(max_length=60, label="Country")
x_amount = forms.CharField(label="Amount (in USD)")
x_login = forms.CharField(widget=forms.HiddenInput(), required=False)
x_fp_sequence = forms.CharField(widget=forms.HiddenInput(), required=False)
x_fp_timestamp = forms.CharField(widget=forms.HiddenInput())
x_fp_hash = forms.CharField(widget=forms.HiddenInput())
x_type = forms.CharField(widget=forms.HiddenInput())
x_relay_response = forms.CharField(initial="TRUE", widget=forms.HiddenInput())
|
bsd-3-clause
|
nrackleff/capstone
|
vendor/psy/psysh/test/tools/vis.py
|
710
|
3428
|
"""
vis.py
======
Ctypes based module to access libbsd's strvis & strunvis functions.
The `vis` function is the equivalent of strvis.
The `unvis` function is the equivalent of strunvis.
All functions accept unicode string as input and return a unicode string.
Constants:
----------
* to select alternate encoding format
`VIS_OCTAL`: use octal \ddd format
`VIS_CSTYLE`: use \[nrft0..] where appropiate
* to alter set of characters encoded
(default is to encode all non-graphic except space, tab, and newline).
`VIS_SP`: also encode space
`VIS_TAB`: also encode tab
`VIS_NL`: also encode newline
`VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL)
`VIS_SAFE`: only encode "unsafe" characters
* other
`VIS_NOSLASH`: inhibit printing '\'
`VIS_HTTP1808`: http-style escape % hex hex
`VIS_HTTPSTYLE`: http-style escape % hex hex
`VIS_MIMESTYLE`: mime-style escape = HEX HEX
`VIS_HTTP1866`: http-style &#num; or &string;
`VIS_NOESCAPE`: don't decode `\'
`VIS_GLOB`: encode glob(3) magic characters
:Authors:
- ju1ius (http://github.com/ju1ius)
:Version: 1
:Date: 2014-01-05
"""
from ctypes import CDLL, c_char_p, c_int
from ctypes.util import find_library
__all__ = [
'vis', 'unvis',
'VIS_OCTAL', 'VIS_CSTYLE',
'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE',
'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE',
'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB'
]
#############################################################
# Constants from bsd/vis.h
#############################################################
#to select alternate encoding format
VIS_OCTAL = 0x0001
VIS_CSTYLE = 0x0002
# to alter set of characters encoded
# (default is to encode all non-graphic except space, tab, and newline).
VIS_SP = 0x0004
VIS_TAB = 0x0008
VIS_NL = 0x0010
VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL
VIS_SAFE = 0x0020
# other
VIS_NOSLASH = 0x0040
VIS_HTTP1808 = 0x0080
VIS_HTTPSTYLE = 0x0080
VIS_MIMESTYLE = 0x0100
VIS_HTTP1866 = 0x0200
VIS_NOESCAPE = 0x0400
VIS_GLOB = 0x1000
#############################################################
# Import libbsd/vis functions
#############################################################
_libbsd = CDLL(find_library('bsd'))
_strvis = _libbsd.strvis
_strvis.argtypes = [c_char_p, c_char_p, c_int]
_strvis.restype = c_int
_strunvis = _libbsd.strunvis
_strvis.argtypes = [c_char_p, c_char_p]
_strvis.restype = c_int
def vis(src, flags=VIS_WHITE):
"""
Encodes the string `src` into libbsd's vis encoding.
`flags` must be one of the VIS_* constants
C definition:
int strvis(char *dst, char *src, int flags);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src) * 4))
src_p = c_char_p(src)
flags = c_int(flags)
bytes_written = _strvis(dst_p, src_p, flags)
if -1 == bytes_written:
raise RuntimeError('vis failed to encode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
def unvis(src):
"""
Decodes a string encoded by vis.
C definition:
int strunvis(char *dst, char *src);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src)))
src_p = c_char_p(src)
bytes_written = _strunvis(dst_p, src_p)
if -1 == bytes_written:
raise RuntimeError('unvis failed to decode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
|
gpl-2.0
|
aimas/TuniErp-8.0
|
addons/portal/mail_thread.py
|
390
|
2004
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-TODAY OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv
class mail_thread(osv.AbstractModel):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'mail.thread'
def _get_inbox_action_xml_id(self, cr, uid, context=None):
""" For a given message, return an action that either
- opens the form view of the related document if model, res_id, and
read access to the document
- opens the Inbox with a default search on the conversation if model,
res_id
- opens the Inbox with context propagated
"""
cur_user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
# if uid is a portal user -> action is different
if any(group.is_portal for group in cur_user.groups_id):
return ('portal', 'action_mail_inbox_feeds_portal')
else:
return super(mail_thread, self)._get_inbox_action_xml_id(cr, uid, context=context)
|
agpl-3.0
|
WhisperingGibbon/Photonic3D
|
docs/conf.py
|
13
|
9208
|
# -*- coding: utf-8 -*-
#
# Photonic3D documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 21 17:22:30 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Photonic3D'
copyright = u'Photonic3D'
author = u'Photonic3D'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.278'
# The full version, including alpha/beta/rc tags.
release = '.278'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Photonic3Ddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Photonic3D.tex', u'Photonic3D Documentation',
u'WesGilster, jmkao, kloknibor, ergobot', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'photonic3d', u'Photonic3D Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Photonic3D', u'Photonic3D Documentation',
author, 'Photonic3D', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
gpl-3.0
|
AnimationInVR/avango
|
attic/avango-vrpn/python/avango/vrpn/_dtrack_device.py
|
6
|
2118
|
import avango.script
from avango.script import field_has_changed
from _vrpn import *
from _dtrack_device import *
class DTrackTaregtIdName(avango.script.Script):
Id = avango.SFLong()
Name = avango.SFString()
def __str__(self):
return str(self.Id.value) + " " + self.Name.value
class DTrackDevice(avango.script.Script):
TrackerInfo = MFTrackerInformation()
Targets = avango.script.MFObject()
TargetIdName = avango.script.MFObject()
def __init__(self):
self.super(DTrackDevice).__init__()
self.id_name_dict = {}
def populate_interested_target_ids(self, targetIDs, clear):
if clear:
self.TargetIdName.value = []
for p in targetIDs:
assert(len(p)==2)
self.TargetIdName.value.append(DTrackTaregtIdName(Id=p[0],Name=p[1]))
def get_name_from_id(self, id):
for p in self.TargetIdName.value:
if id == p.Id.value:
return True, p.Name.value
return False, ""
def get_target_by_name(self, name):
for target in self.Targets.value:
if name == target.TargetName.value:
return True, target
return False, None
@field_has_changed(TrackerInfo)
def tracker_information_changed(self):
#loop over all tracker information object (you do not know which one has been changed)
for trackerInfo in self.TrackerInfo.value:
trackerNum = trackerInfo.Number.value
#check if the target is known
targetFoundInDTrackConfig, targetName = self.get_name_from_id(trackerNum)
if targetFoundInDTrackConfig :
#check if the target is in the subscription list
targetFound, target = self.get_target_by_name(targetName)
if targetFound:
target.Id = trackerInfo.Number.value
target.DTrackMatrix.value = trackerInfo.Matrix.value
target.touch()
|
lgpl-3.0
|
googlefonts/fontbakery
|
Lib/fontbakery/designers_pb2.py
|
3
|
3980
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: designers.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='designers.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x0f\x64\x65signers.proto\"Q\n\x11\x44\x65signerInfoProto\x12\x10\n\x08\x64\x65signer\x18\x01 \x01(\t\x12\x0c\n\x04link\x18\x02 \x01(\t\x12\x1c\n\x06\x61vatar\x18\x03 \x01(\x0b\x32\x0c.AvatarProto\" \n\x0b\x41vatarProto\x12\x11\n\tfile_name\x18\x01 \x01(\t')
)
_DESIGNERINFOPROTO = _descriptor.Descriptor(
name='DesignerInfoProto',
full_name='DesignerInfoProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='designer', full_name='DesignerInfoProto.designer', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='link', full_name='DesignerInfoProto.link', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='avatar', full_name='DesignerInfoProto.avatar', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=100,
)
_AVATARPROTO = _descriptor.Descriptor(
name='AvatarProto',
full_name='AvatarProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_name', full_name='AvatarProto.file_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=102,
serialized_end=134,
)
_DESIGNERINFOPROTO.fields_by_name['avatar'].message_type = _AVATARPROTO
DESCRIPTOR.message_types_by_name['DesignerInfoProto'] = _DESIGNERINFOPROTO
DESCRIPTOR.message_types_by_name['AvatarProto'] = _AVATARPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DesignerInfoProto = _reflection.GeneratedProtocolMessageType('DesignerInfoProto', (_message.Message,), dict(
DESCRIPTOR = _DESIGNERINFOPROTO,
__module__ = 'designers_pb2'
# @@protoc_insertion_point(class_scope:DesignerInfoProto)
))
_sym_db.RegisterMessage(DesignerInfoProto)
AvatarProto = _reflection.GeneratedProtocolMessageType('AvatarProto', (_message.Message,), dict(
DESCRIPTOR = _AVATARPROTO,
__module__ = 'designers_pb2'
# @@protoc_insertion_point(class_scope:AvatarProto)
))
_sym_db.RegisterMessage(AvatarProto)
# @@protoc_insertion_point(module_scope)
|
apache-2.0
|
dpmehta02/linkedin-scrapy
|
linkedin/spiders/linkedin_spider.py
|
1
|
3589
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from linkedin.items import LinkedinItem
class LinkedinSpider(CrawlSpider):
"""
Define the crawler's start URIs, set its follow rules, parse HTML
and assign values to an item. Processing occurs in ../pipelines.py
"""
name = "linkedin"
allowed_domains = ["linkedin.com"]
# Uncomment the following lines for full spidering
'''
centilist_one = (i for i in xrange(1,100))
centilist_two = (i for i in xrange(1,100))
centilist_three = (i for i in xrange(1,100))
start_urls = ["http://www.linkedin.com/directory/people-%s-%d-%d-%d"
% (alphanum, num_one, num_two, num_three)
for alphanum in "abcdefghijklmnopqrstuvwxyz"
for num_one in centilist_one
for num_two in centilist_two
for num_three in centilist_three
]
'''
# Temporary start_urls for testing; remove and use the above start_urls in production
start_urls = ["http://www.linkedin.com/directory/people-a-23-23-2"]
# TODO: allow /in/name urls too?
rules = (Rule(SgmlLinkExtractor(allow=('\/pub\/.+')), callback='parse_item'))
def parse_item(self, response):
if response:
hxs = HtmlXPathSelector(response)
item = LinkedinItem()
# TODO: is this the best way to check that we're scraping the right page?
item['full_name'] = hxs.select('//*[@id="name"]/span/span/text()').extract()
if not item['full_name']:
# recursively parse list of duplicate profiles
# NOTE: Results page only displays 25 of possibly many more names;
# LinkedIn requests authentication to see the rest. Need to resolve
# TODO: add error checking here to ensure I'm getting the right links
# and links from "next>>" pages
multi_profile_urls = hxs.select('//*[@id="result-set"]/li/h2/strong/ \
a/@href').extract()
for profile_url in multi_profile_urls:
yield Request(profile_url, callback=self.parse_item)
else:
item['first_name'],
item['last_name'],
item['full_name'],
item['headline_title'],
item['locality'],
item['industry'],
item['current_roles'] = item['full_name'][0],
item['full_name'][1],
hxs.select('//*[@id="name"]/span/span/text()').extract(),
hxs.select('//*[@id="member-1"]/p/text()').extract(),
hxs.select('//*[@id="headline"]/dd[1]/span/text()').extract(),
hxs.select('//*[@id="headline"]/dd[2]/text()').extract(),
hxs.select('//*[@id="overview"]/dd[1]/ul/li/text()').extract()
# TODO: add metadata fields
if hxs.select('//*[@id="overview"]/dt[2]/text()').extract() == [u' \n Education\n ']:
item['education_institutions'] = hxs.select('//*[@id="overview"]/dd[2]/ul/li/text()').extract()
print item
else:
print "Uh oh, no response."
return
|
mit
|
ldong/vim_youcompleteme
|
third_party/requests/requests/packages/chardet/big5prober.py
|
2931
|
1684
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
|
gpl-3.0
|
shingonoide/odoo
|
addons/l10n_fr/wizard/fr_report_compute_resultant.py
|
374
|
2312
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrresultat', data=data, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
DavidWittman/ansible-modules-extras
|
net_infrastructure/a10_virtual_server.py
|
40
|
11515
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb virtual server objects
(c) 2014, Mischa Peters <mpeters@a10networks.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices
description:
- Manage slb virtual server objects on A10 Networks devices via aXAPI
author: Mischa Peters
notes:
- Requires A10 Networks aXAPI 2.1
requirements:
- urllib2
- re
options:
host:
description:
- hostname or ip of your A10 Networks device
required: true
default: null
aliases: []
choices: []
username:
description:
- admin account of your A10 Networks device
required: true
default: null
aliases: ['user', 'admin']
choices: []
password:
description:
- admin password of your A10 Networks device
required: true
default: null
aliases: ['pass', 'pwd']
choices: []
virtual_server:
description:
- slb virtual server name
required: true
default: null
aliases: ['vip', 'virtual']
choices: []
virtual_server_ip:
description:
- slb virtual server ip address
required: false
default: null
aliases: ['ip', 'address']
choices: []
virtual_server_status:
description:
- slb virtual server status
required: false
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
required: false
write_config:
description:
- If C(yes), any changes will cause a write of the running configuration
to non-volatile memory. This will save I(all) configuration changes,
including those that may have been made manually or through other modules,
so care should be taken when specifying C(yes).
required: false
default: "no"
choices: ["yes", "no"]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.a10 import *
main()
|
gpl-3.0
|
catapult-project/catapult-csm
|
third_party/google-endpoints/requests/packages/urllib3/contrib/ntlmpool.py
|
312
|
4478
|
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
from logging import getLogger
from ntlm import ntlm
from .. import HTTPSConnectionPool
from ..packages.six.moves.http_client import HTTPSConnection
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
self.num_connections, self.host, self.authurl)
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', reshdr)
log.debug('Response data: %s [...]', res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', dict(res.getheaders()))
log.debug('Response data: %s [...]', res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
bsd-3-clause
|
RPI-OPENEDX/edx-platform
|
common/djangoapps/course_action_state/migrations/0001_initial.py
|
50
|
1808
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
import xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseRerunState',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
('course_key', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('action', models.CharField(max_length=100, db_index=True)),
('state', models.CharField(max_length=50)),
('should_display', models.BooleanField(default=False)),
('message', models.CharField(max_length=1000)),
('source_course_key', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('display_name', models.CharField(default=b'', max_length=255, blank=True)),
('created_user', models.ForeignKey(related_name='created_by_user+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('updated_user', models.ForeignKey(related_name='updated_by_user+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.AlterUniqueTogether(
name='coursererunstate',
unique_together=set([('course_key', 'action')]),
),
]
|
agpl-3.0
|
blaze/dask
|
dask/dataframe/hyperloglog.py
|
3
|
2433
|
"""Implementation of HyperLogLog
This implements the HyperLogLog algorithm for cardinality estimation, found
in
Philippe Flajolet, Éric Fusy, Olivier Gandouet and Frédéric Meunier.
"HyperLogLog: the analysis of a near-optimal cardinality estimation
algorithm". 2007 Conference on Analysis of Algorithms. Nice, France
(2007)
"""
import numpy as np
import pandas as pd
from pandas.util import hash_pandas_object
def compute_first_bit(a):
"Compute the position of the first nonzero bit for each int in an array."
# TODO: consider making this less memory-hungry
bits = np.bitwise_and.outer(a, 1 << np.arange(32))
bits = bits.cumsum(axis=1).astype(bool)
return 33 - bits.sum(axis=1)
def compute_hll_array(obj, b):
# b is the number of bits
if not 8 <= b <= 16:
raise ValueError("b should be between 8 and 16")
num_bits_discarded = 32 - b
m = 1 << b
# Get an array of the hashes
hashes = hash_pandas_object(obj, index=False)
if isinstance(hashes, pd.Series):
hashes = hashes._values
hashes = hashes.astype(np.uint32)
# Of the first b bits, which is the first nonzero?
j = hashes >> num_bits_discarded
first_bit = compute_first_bit(hashes)
# Pandas can do the max aggregation
df = pd.DataFrame({"j": j, "first_bit": first_bit})
series = df.groupby("j").max()["first_bit"]
# Return a dense array so we can concat them and get a result
# that is easy to deal with
return series.reindex(np.arange(m), fill_value=0).values.astype(np.uint8)
def reduce_state(Ms, b):
m = 1 << b
# We concatenated all of the states, now we need to get the max
# value for each j in both
Ms = Ms.reshape((len(Ms) // m), m)
return Ms.max(axis=0)
def estimate_count(Ms, b):
m = 1 << b
# Combine one last time
M = reduce_state(Ms, b)
# Estimate cardinality, no adjustments
alpha = 0.7213 / (1 + 1.079 / m)
E = alpha * m / (2.0 ** -(M.astype("f8"))).sum() * m
# ^^^^ starts as unsigned, need a signed type for
# negation operator to do something useful
# Apply adjustments for small / big cardinalities, if applicable
if E < 2.5 * m:
V = (M == 0).sum()
if V:
return m * np.log(m / V)
if E > 2 ** 32 / 30.0:
return -(2 ** 32) * np.log1p(-E / 2 ** 32)
return E
|
bsd-3-clause
|
phpcanada/phpcanada
|
src/AppBundle/Resources/private/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/tools/pretty_vcproj.py
|
2637
|
9586
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
mit
|
dls-controls/pymalcolm
|
tests/test_modules/test_ADCore/test_exposuredeadtimepart.py
|
1
|
2080
|
import unittest
from mock import MagicMock, call
from scanpointgenerator import CompoundGenerator, LineGenerator
from malcolm.core import PartRegistrar
from malcolm.modules.scanning.parts import ExposureDeadtimePart
def make_generator(duration):
line1 = LineGenerator("y", "mm", 0, 2, 3)
line2 = LineGenerator("x", "mm", 0, 2, 2)
compound = CompoundGenerator([line1, line2], [], [], duration=duration)
return compound
class TestExposureDeadtimePart(unittest.TestCase):
def setUp(self):
self.o = ExposureDeadtimePart(name="n", min_exposure=0.01)
def test_init(self):
registrar = MagicMock(spec=PartRegistrar)
self.o.setup(registrar)
assert registrar.add_attribute_model.mock_calls == [
call("readoutTime", self.o.readout_time, self.o.readout_time.set_value),
call(
"frequencyAccuracy",
self.o.frequency_accuracy,
self.o.frequency_accuracy.set_value,
),
call("exposure", self.o.exposure),
]
assert self.o.exposure.value == 0.0
def test_validate_exposure_too_fast(self):
tweak = self.o.on_validate(
generator=make_generator(duration=0.1), exposure=0.001
)
assert tweak.parameter == "exposure"
assert tweak.value == 0.01
def test_validate_no_duration(self):
with self.assertRaises(AssertionError) as cm:
self.o.on_validate(generator=make_generator(duration=0.0))
assert (
str(cm.exception)
== "Duration 0.0 for generator must be >0 to signify constant exposure"
)
def test_good_validate(self):
self.o.on_validate(generator=make_generator(duration=0.1))
def test_configure(self):
self.o.on_configure(exposure=0.099995)
assert self.o.exposure.value == 0.099995
def test_report_status(self):
info = self.o.on_report_status()
assert info.readout_time == 0.0
assert info.frequency_accuracy == 50
assert info.min_exposure == 0.01
|
apache-2.0
|
sonuyos/couchpotato
|
libs/xmpp/features.py
|
199
|
8578
|
## features.py
##
## Copyright (C) 2003-2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: features.py,v 1.25 2009/04/07 07:11:48 snakeru Exp $
"""
This module contains variable stuff that is not worth splitting into separate modules.
Here is:
DISCO client and agents-to-DISCO and browse-to-DISCO emulators.
IBR and password manager.
jabber:iq:privacy methods
All these methods takes 'disp' first argument that should be already connected
(and in most cases already authorised) dispatcher instance.
"""
from protocol import *
REGISTER_DATA_RECEIVED='REGISTER DATA RECEIVED'
### DISCO ### http://jabber.org/protocol/disco ### JEP-0030 ####################
### Browse ### jabber:iq:browse ### JEP-0030 ###################################
### Agents ### jabber:iq:agents ### JEP-0030 ###################################
def _discover(disp,ns,jid,node=None,fb2b=0,fb2a=1):
""" Try to obtain info from the remote object.
If remote object doesn't support disco fall back to browse (if fb2b is true)
and if it doesnt support browse (or fb2b is not true) fall back to agents protocol
(if gb2a is true). Returns obtained info. Used internally. """
iq=Iq(to=jid,typ='get',queryNS=ns)
if node: iq.setQuerynode(node)
rep=disp.SendAndWaitForResponse(iq)
if fb2b and not isResultNode(rep): rep=disp.SendAndWaitForResponse(Iq(to=jid,typ='get',queryNS=NS_BROWSE)) # Fallback to browse
if fb2a and not isResultNode(rep): rep=disp.SendAndWaitForResponse(Iq(to=jid,typ='get',queryNS=NS_AGENTS)) # Fallback to agents
if isResultNode(rep): return [n for n in rep.getQueryPayload() if isinstance(n, Node)]
return []
def discoverItems(disp,jid,node=None):
""" Query remote object about any items that it contains. Return items list. """
""" According to JEP-0030:
query MAY have node attribute
item: MUST HAVE jid attribute and MAY HAVE name, node, action attributes.
action attribute of item can be either of remove or update value."""
ret=[]
for i in _discover(disp,NS_DISCO_ITEMS,jid,node):
if i.getName()=='agent' and i.getTag('name'): i.setAttr('name',i.getTagData('name'))
ret.append(i.attrs)
return ret
def discoverInfo(disp,jid,node=None):
""" Query remote object about info that it publishes. Returns identities and features lists."""
""" According to JEP-0030:
query MAY have node attribute
identity: MUST HAVE category and name attributes and MAY HAVE type attribute.
feature: MUST HAVE var attribute"""
identities , features = [] , []
for i in _discover(disp,NS_DISCO_INFO,jid,node):
if i.getName()=='identity': identities.append(i.attrs)
elif i.getName()=='feature': features.append(i.getAttr('var'))
elif i.getName()=='agent':
if i.getTag('name'): i.setAttr('name',i.getTagData('name'))
if i.getTag('description'): i.setAttr('name',i.getTagData('description'))
identities.append(i.attrs)
if i.getTag('groupchat'): features.append(NS_GROUPCHAT)
if i.getTag('register'): features.append(NS_REGISTER)
if i.getTag('search'): features.append(NS_SEARCH)
return identities , features
### Registration ### jabber:iq:register ### JEP-0077 ###########################
def getRegInfo(disp,host,info={},sync=True):
""" Gets registration form from remote host.
You can pre-fill the info dictionary.
F.e. if you are requesting info on registering user joey than specify
info as {'username':'joey'}. See JEP-0077 for details.
'disp' must be connected dispatcher instance."""
iq=Iq('get',NS_REGISTER,to=host)
for i in info.keys(): iq.setTagData(i,info[i])
if sync:
resp=disp.SendAndWaitForResponse(iq)
_ReceivedRegInfo(disp.Dispatcher,resp, host)
return resp
else: disp.SendAndCallForResponse(iq,_ReceivedRegInfo, {'agent': host})
def _ReceivedRegInfo(con, resp, agent):
iq=Iq('get',NS_REGISTER,to=agent)
if not isResultNode(resp): return
df=resp.getTag('query',namespace=NS_REGISTER).getTag('x',namespace=NS_DATA)
if df:
con.Event(NS_REGISTER,REGISTER_DATA_RECEIVED,(agent, DataForm(node=df)))
return
df=DataForm(typ='form')
for i in resp.getQueryPayload():
if type(i)<>type(iq): pass
elif i.getName()=='instructions': df.addInstructions(i.getData())
else: df.setField(i.getName()).setValue(i.getData())
con.Event(NS_REGISTER,REGISTER_DATA_RECEIVED,(agent, df))
def register(disp,host,info):
""" Perform registration on remote server with provided info.
disp must be connected dispatcher instance.
Returns true or false depending on registration result.
If registration fails you can get additional info from the dispatcher's owner
attributes lastErrNode, lastErr and lastErrCode.
"""
iq=Iq('set',NS_REGISTER,to=host)
if type(info)<>type({}): info=info.asDict()
for i in info.keys(): iq.setTag('query').setTagData(i,info[i])
resp=disp.SendAndWaitForResponse(iq)
if isResultNode(resp): return 1
def unregister(disp,host):
""" Unregisters with host (permanently removes account).
disp must be connected and authorized dispatcher instance.
Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_REGISTER,to=host,payload=[Node('remove')]))
if isResultNode(resp): return 1
def changePasswordTo(disp,newpassword,host=None):
""" Changes password on specified or current (if not specified) server.
disp must be connected and authorized dispatcher instance.
Returns true on success."""
if not host: host=disp._owner.Server
resp=disp.SendAndWaitForResponse(Iq('set',NS_REGISTER,to=host,payload=[Node('username',payload=[disp._owner.Server]),Node('password',payload=[newpassword])]))
if isResultNode(resp): return 1
### Privacy ### jabber:iq:privacy ### draft-ietf-xmpp-im-19 ####################
#type=[jid|group|subscription]
#action=[allow|deny]
def getPrivacyLists(disp):
""" Requests privacy lists from connected server.
Returns dictionary of existing lists on success."""
try:
dict={'lists':[]}
resp=disp.SendAndWaitForResponse(Iq('get',NS_PRIVACY))
if not isResultNode(resp): return
for list in resp.getQueryPayload():
if list.getName()=='list': dict['lists'].append(list.getAttr('name'))
else: dict[list.getName()]=list.getAttr('name')
return dict
except: pass
def getPrivacyList(disp,listname):
""" Requests specific privacy list listname. Returns list of XML nodes (rules)
taken from the server responce."""
try:
resp=disp.SendAndWaitForResponse(Iq('get',NS_PRIVACY,payload=[Node('list',{'name':listname})]))
if isResultNode(resp): return resp.getQueryPayload()[0]
except: pass
def setActivePrivacyList(disp,listname=None,typ='active'):
""" Switches privacy list 'listname' to specified type.
By default the type is 'active'. Returns true on success."""
if listname: attrs={'name':listname}
else: attrs={}
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[Node(typ,attrs)]))
if isResultNode(resp): return 1
def setDefaultPrivacyList(disp,listname=None):
""" Sets the default privacy list as 'listname'. Returns true on success."""
return setActivePrivacyList(disp,listname,'default')
def setPrivacyList(disp,list):
""" Set the ruleset. 'list' should be the simpleXML node formatted
according to RFC 3921 (XMPP-IM) (I.e. Node('list',{'name':listname},payload=[...]) )
Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[list]))
if isResultNode(resp): return 1
def delPrivacyList(disp,listname):
""" Deletes privacy list 'listname'. Returns true on success."""
resp=disp.SendAndWaitForResponse(Iq('set',NS_PRIVACY,payload=[Node('list',{'name':listname})]))
if isResultNode(resp): return 1
|
gpl-3.0
|
coder-james/mxnet
|
python/mxnet/contrib/tensorboard.py
|
14
|
2390
|
# coding: utf-8
"""TensorBoard functions that can be used to log various status during epoch."""
from __future__ import absolute_import
import logging
class LogMetricsCallback(object):
"""Log metrics periodically in TensorBoard.
This callback works almost same as `callback.Speedometer`, but write TensorBoard event file
for visualization. For more usage, please refer https://github.com/dmlc/tensorboard
Parameters
----------
logging_dir : str
TensorBoard event file directory.
After that, use `tensorboard --logdir=path/to/logs` to launch TensorBoard visualization.
prefix : str
Prefix for a metric name of `scalar` value.
You might want to use this param to leverage TensorBoard plot feature,
where TensorBoard plots different curves in one graph when they have same `name`.
The follow example shows the usage(how to compare a train and eval metric in a same graph).
Examples
--------
>>> # log train and eval metrics under different directories.
>>> training_log = 'logs/train'
>>> evaluation_log = 'logs/eval'
>>> # in this case, each training and evaluation metric pairs has same name,
>>> # you can add a prefix to make it separate.
>>> batch_end_callbacks = [mx.tensorboard.LogMetricsCallback(training_log)]
>>> eval_end_callbacks = [mx.tensorboard.LogMetricsCallback(evaluation_log)]
>>> # run
>>> model.fit(train,
>>> ...
>>> batch_end_callback = batch_end_callbacks,
>>> eval_end_callback = eval_end_callbacks)
>>> # Then use `tensorboard --logdir=logs/` to launch TensorBoard visualization.
"""
def __init__(self, logging_dir, prefix=None):
self.prefix = prefix
try:
from tensorboard import SummaryWriter
self.summary_writer = SummaryWriter(logging_dir)
except ImportError:
logging.error('You can install tensorboard via `pip install tensorboard`.')
def __call__(self, param):
"""Callback to log training speed and metrics in TensorBoard."""
if param.eval_metric is None:
return
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
if self.prefix is not None:
name = '%s-%s' % (self.prefix, name)
self.summary_writer.add_scalar(name, value)
|
apache-2.0
|
lordB8r/polls
|
ENV/lib/python2.7/site-packages/django/contrib/comments/moderation.py
|
101
|
13555
|
"""
A generic comment-moderation system which allows configuration of
moderation options on a per-model basis.
To use, do two things:
1. Create or import a subclass of ``CommentModerator`` defining the
options you want.
2. Import ``moderator`` from this module and register one or more
models, passing the models and the ``CommentModerator`` options
class you want to use.
Example
-------
First, we define a simple model class which might represent entries in
a Weblog::
from django.db import models
class Entry(models.Model):
title = models.CharField(maxlength=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField()
Then we create a ``CommentModerator`` subclass specifying some
moderation options::
from django.contrib.comments.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
And finally register it for moderation::
moderator.register(Entry, EntryModerator)
This sample class would apply two moderation steps to each new
comment submitted on an Entry:
* If the entry's ``enable_comments`` field is set to ``False``, the
comment will be rejected (immediately deleted).
* If the comment is successfully posted, an email notification of the
comment will be sent to site staff.
For a full list of built-in moderation options and other
configurability, see the documentation for the ``CommentModerator``
class.
"""
import datetime
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.comments import signals
from django.db.models.base import ModelBase
from django.template import Context, loader
from django.contrib import comments
from django.contrib.sites.models import get_current_site
from django.utils import timezone
class AlreadyModerated(Exception):
"""
Raised when a model which is already registered for moderation is
attempting to be registered again.
"""
pass
class NotModerated(Exception):
"""
Raised when a model which is not registered for moderation is
attempting to be unregistered.
"""
pass
class CommentModerator(object):
"""
Encapsulates comment-moderation options for a given model.
This class is not designed to be used directly, since it doesn't
enable any of the available moderation options. Instead, subclass
it and override attributes to enable different options::
``auto_close_field``
If this is set to the name of a ``DateField`` or
``DateTimeField`` on the model for which comments are
being moderated, new comments for objects of that model
will be disallowed (immediately deleted) when a certain
number of days have passed after the date specified in
that field. Must be used in conjunction with
``close_after``, which specifies the number of days past
which comments should be disallowed. Default value is
``None``.
``auto_moderate_field``
Like ``auto_close_field``, but instead of outright
deleting new comments when the requisite number of days
have elapsed, it will simply set the ``is_public`` field
of new comments to ``False`` before saving them. Must be
used in conjunction with ``moderate_after``, which
specifies the number of days past which comments should be
moderated. Default value is ``None``.
``close_after``
If ``auto_close_field`` is used, this must specify the
number of days past the value of the field specified by
``auto_close_field`` after which new comments for an
object should be disallowed. Default value is ``None``.
``email_notification``
If ``True``, any new comment on an object of this model
which survives moderation will generate an email to site
staff. Default value is ``False``.
``enable_field``
If this is set to the name of a ``BooleanField`` on the
model for which comments are being moderated, new comments
on objects of that model will be disallowed (immediately
deleted) whenever the value of that field is ``False`` on
the object the comment would be attached to. Default value
is ``None``.
``moderate_after``
If ``auto_moderate_field`` is used, this must specify the number
of days past the value of the field specified by
``auto_moderate_field`` after which new comments for an
object should be marked non-public. Default value is
``None``.
Most common moderation needs can be covered by changing these
attributes, but further customization can be obtained by
subclassing and overriding the following methods. Each method will
be called with three arguments: ``comment``, which is the comment
being submitted, ``content_object``, which is the object the
comment will be attached to, and ``request``, which is the
``HttpRequest`` in which the comment is being submitted::
``allow``
Should return ``True`` if the comment should be allowed to
post on the content object, and ``False`` otherwise (in
which case the comment will be immediately deleted).
``email``
If email notification of the new comment should be sent to
site staff or moderators, this method is responsible for
sending the email.
``moderate``
Should return ``True`` if the comment should be moderated
(in which case its ``is_public`` field will be set to
``False`` before saving), and ``False`` otherwise (in
which case the ``is_public`` field will not be changed).
Subclasses which want to introspect the model for which comments
are being moderated can do so through the attribute ``_model``,
which will be the model class.
"""
auto_close_field = None
auto_moderate_field = None
close_after = None
email_notification = False
enable_field = None
moderate_after = None
def __init__(self, model):
self._model = model
def _get_delta(self, now, then):
"""
Internal helper which will return a ``datetime.timedelta``
representing the time between ``now`` and ``then``. Assumes
``now`` is a ``datetime.date`` or ``datetime.datetime`` later
than ``then``.
If ``now`` and ``then`` are not of the same type due to one of
them being a ``datetime.date`` and the other being a
``datetime.datetime``, both will be coerced to
``datetime.date`` before calculating the delta.
"""
if now.__class__ is not then.__class__:
now = datetime.date(now.year, now.month, now.day)
then = datetime.date(then.year, then.month, then.day)
if now < then:
raise ValueError("Cannot determine moderation rules because date field is set to a value in the future")
return now - then
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after is not None:
close_after_date = getattr(content_object, self.auto_close_field)
if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after:
return False
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be
allowed to show up immediately, or should be marked non-public
and await approval.
Return ``True`` if the comment should be moderated (marked
non-public), ``False`` otherwise.
"""
if self.auto_moderate_field and self.moderate_after is not None:
moderate_after_date = getattr(content_object, self.auto_moderate_field)
if moderate_after_date is not None and self._get_delta(timezone.now(), moderate_after_date).days >= self.moderate_after:
return True
return False
def email(self, comment, content_object, request):
"""
Send email notification of a new comment to site staff when email
notifications have been requested.
"""
if not self.email_notification:
return
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({ 'comment': comment,
'content_object': content_object })
subject = '[%s] New comment posted on "%s"' % (get_current_site(request).name,
content_object)
message = t.render(c)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
class Moderator(object):
"""
Handles moderation of a set of models.
An instance of this class will maintain a list of one or more
models registered for comment moderation, and their associated
moderation classes, and apply moderation to all incoming comments.
To register a model, obtain an instance of ``Moderator`` (this
module exports one as ``moderator``), and call its ``register``
method, passing the model class and a moderation class (which
should be a subclass of ``CommentModerator``). Note that both of
these should be the actual classes, not instances of the classes.
To cease moderation for a model, call the ``unregister`` method,
passing the model class.
For convenience, both ``register`` and ``unregister`` can also
accept a list of model classes in place of a single model; this
allows easier registration of multiple models with the same
``CommentModerator`` class.
The actual moderation is applied in two phases: one prior to
saving a new comment, and the other immediately after saving. The
pre-save moderation may mark a comment as non-public or mark it to
be removed; the post-save moderation may delete a comment which
was disallowed (there is currently no way to prevent the comment
being saved once before removal) and, if the comment is still
around, will send any notification emails the comment generated.
"""
def __init__(self):
self._registry = {}
self.connect()
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model())
signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model())
def register(self, model_or_iterable, moderation_class):
"""
Register a model or a list of models for comment moderation,
using a particular moderation class.
Raise ``AlreadyModerated`` if any of the models are already
registered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.module_name)
self._registry[model] = moderation_class(model)
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model]
def pre_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary pre-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
content_object = comment.content_object
moderation_class = self._registry[model]
# Comment will be disallowed outright (HTTP 403 response)
if not moderation_class.allow(comment, content_object, request):
return False
if moderation_class.moderate(comment, content_object, request):
comment.is_public = False
def post_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary post-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
self._registry[model].email(comment, comment.content_object, request)
# Import this instance in your own code to use in registering
# your models for moderation.
moderator = Moderator()
|
mit
|
jscott1989/django-allauth
|
allauth/socialaccount/south_migrations/0006_auto__del_field_socialapp_site.py
|
80
|
6229
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SocialApp.site'
db.delete_column('socialaccount_socialapp', 'site_id')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'SocialApp.site'
raise RuntimeError("Cannot reverse this migration. 'SocialApp.site' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['socialaccount']
|
mit
|
glovebx/odoo
|
addons/point_of_sale/controllers/main.py
|
243
|
1576
|
# -*- coding: utf-8 -*-
import logging
import simplejson
import os
import openerp
import time
import random
import werkzeug.utils
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import module_boot, login_redirect
_logger = logging.getLogger(__name__)
class PosController(http.Controller):
@http.route('/pos/web', type='http', auth='user')
def a(self, debug=False, **k):
cr, uid, context, session = request.cr, request.uid, request.context, request.session
if not session.uid:
return login_redirect()
PosSession = request.registry['pos.session']
pos_session_ids = PosSession.search(cr, uid, [('state','=','opened'),('user_id','=',session.uid)], context=context)
if not pos_session_ids:
return werkzeug.utils.redirect('/web#action=point_of_sale.action_pos_session_opening')
PosSession.login(cr,uid,pos_session_ids,context=context)
modules = simplejson.dumps(module_boot(request.db))
init = """
var wc = new s.web.WebClient();
wc._title_changed = function() {}
wc.show_application = function(){
wc.action_manager.do_action("pos.ui");
};
wc.setElement($(document.body));
wc.start();
"""
html = request.registry.get('ir.ui.view').render(cr, session.uid,'point_of_sale.index',{
'modules': modules,
'init': init,
})
return html
|
agpl-3.0
|
tensorflow/nmt
|
nmt/utils/common_test_utils.py
|
4
|
4744
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility functions for tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from ..utils import iterator_utils
from ..utils import standard_hparams_utils
def create_test_hparams(unit_type="lstm",
encoder_type="uni",
num_layers=4,
attention="",
attention_architecture=None,
use_residual=False,
inference_indices=None,
num_translations_per_input=1,
beam_width=0,
init_op="uniform"):
"""Create training and inference test hparams."""
num_residual_layers = 0
if use_residual:
# TODO(rzhao): Put num_residual_layers computation logic into
# `model_utils.py`, so we can also test it here.
num_residual_layers = 2
standard_hparams = standard_hparams_utils.create_standard_hparams()
# Networks
standard_hparams.num_units = 5
standard_hparams.num_encoder_layers = num_layers
standard_hparams.num_decoder_layers = num_layers
standard_hparams.dropout = 0.5
standard_hparams.unit_type = unit_type
standard_hparams.encoder_type = encoder_type
standard_hparams.residual = use_residual
standard_hparams.num_residual_layers = num_residual_layers
# Attention mechanisms
standard_hparams.attention = attention
standard_hparams.attention_architecture = attention_architecture
# Train
standard_hparams.init_op = init_op
standard_hparams.num_train_steps = 1
standard_hparams.decay_scheme = ""
# Infer
standard_hparams.tgt_max_len_infer = 100
standard_hparams.beam_width = beam_width
standard_hparams.num_translations_per_input = num_translations_per_input
# Misc
standard_hparams.forget_bias = 0.0
standard_hparams.random_seed = 3
standard_hparams.language_model = False
# Vocab
standard_hparams.src_vocab_size = 5
standard_hparams.tgt_vocab_size = 5
standard_hparams.eos = "</s>"
standard_hparams.sos = "<s>"
standard_hparams.src_vocab_file = ""
standard_hparams.tgt_vocab_file = ""
standard_hparams.src_embed_file = ""
standard_hparams.tgt_embed_file = ""
# For inference.py test
standard_hparams.subword_option = "bpe"
standard_hparams.src = "src"
standard_hparams.tgt = "tgt"
standard_hparams.src_max_len = 400
standard_hparams.tgt_eos_id = 0
standard_hparams.inference_indices = inference_indices
return standard_hparams
def create_test_iterator(hparams, mode):
"""Create test iterator."""
src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant([hparams.eos, "a", "b", "c", "d"]))
tgt_vocab_mapping = tf.constant([hparams.sos, hparams.eos, "a", "b", "c"])
tgt_vocab_table = lookup_ops.index_table_from_tensor(tgt_vocab_mapping)
if mode == tf.contrib.learn.ModeKeys.INFER:
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_tensor(
tgt_vocab_mapping)
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["a a b b c", "a b b"]))
if mode != tf.contrib.learn.ModeKeys.INFER:
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["a b c b c", "a b c b"]))
return (
iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets),
src_vocab_table,
tgt_vocab_table)
else:
return (
iterator_utils.get_infer_iterator(
src_dataset=src_dataset,
src_vocab_table=src_vocab_table,
eos=hparams.eos,
batch_size=hparams.batch_size),
src_vocab_table,
tgt_vocab_table,
reverse_tgt_vocab_table)
|
apache-2.0
|
ros/catkin
|
cmake/test/download_checkmd5.py
|
1
|
5773
|
from __future__ import print_function
import errno
import hashlib
import os
import sys
try:
from urllib.request import addinfourl, BaseHandler, build_opener, Request, URLError
except ImportError:
from urllib2 import addinfourl, BaseHandler, build_opener, Request, URLError
from argparse import ArgumentParser
NAME = 'download_checkmd5.py'
class HTTPRangeHandler(BaseHandler):
def http_error_206(self, req, fp, code, msg, hdrs):
r = addinfourl(fp, hdrs, req.get_full_url())
r.code = code
r.msg = msg
return r
def http_error_416(self, req, fp, code, msg, hdrs):
raise URLError('Requested Range Not Satisfiable')
def download_with_resume(uri, dest):
handler = HTTPRangeHandler()
opener = build_opener(handler)
offset = 0
content_length = None
accept_ranges = False
while True:
req = Request(uri)
if offset:
req.add_header('Range', 'bytes=%d-' % offset)
src_file = None
try:
src_file = opener.open(req)
headers = src_file.info()
if not offset:
# on first connection check server capabilities
if 'Content-Length' in headers:
content_length = int(headers['Content-Length'])
if 'Accept-Ranges' in headers:
accept_ranges = headers['Accept-Ranges'] != 'none'
else:
# on resume verify that server understood range header and responded accordingly
if 'Content-Range' not in headers:
raise IOError('Download aborted and server does not support resuming download')
if int(headers['Content-Range'][len('bytes '):].split('-')[0]) != offset:
raise IOError('Download aborted because server replied with different content range then requested')
sys.stdout.write(' resume from %d...' % offset)
sys.stdout.flush()
with open(dest, 'ab' if offset else 'wb') as dst_file:
progress = False
while True:
data = src_file.read(8192)
if not data:
break
progress = True
dst_file.write(data)
offset += len(data)
if not progress:
# if no bytes have been received abort download
raise IOError("No progress when trying to download '%s'" % uri)
except Exception:
if src_file:
src_file.close()
raise
# when content length is unknown it is assumed that the download is complete
if content_length is None:
break
# or when enough data has been downloaded (> is especially a valid case)
if offset >= content_length:
break
if not accept_ranges:
raise IOError('Server does not accept ranges to resume download')
def download_md5(uri, dest):
"""Download file from uri to file dest."""
# Create intermediate directories as necessary, #2970
dirname = os.path.dirname(dest)
if len(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
sys.stdout.write('Downloading %s to %s...' % (uri, dest))
sys.stdout.flush()
try:
download_with_resume(uri, dest)
sys.stdout.write(' done.\n')
except Exception as e:
# delete partially downloaded data
if os.path.exists(dest):
os.unlink(dest)
sys.stdout.write(' failed (%s)!\n' % e)
raise
def checkmd5(dest, md5sum=None):
"""
Check file at dest against md5.
:returns (boolean, hexdigest): True if dest contents matches md5sum
"""
if not os.path.exists(dest):
return False, 'null'
with open(dest, 'rb') as f:
md5value = hashlib.md5()
while True:
buf = f.read(4096)
if not buf:
break
md5value.update(buf)
hexdigest = md5value.hexdigest()
print('Checking md5sum on %s' % (dest))
return hexdigest == md5sum, hexdigest
def main(argv=sys.argv[1:]):
"""Dowloads URI to file dest and checks md5 if given."""
parser = ArgumentParser(description='Dowloads URI to file dest. If md5sum is given, checks md5sum. If file existed and mismatch, downloads and checks again')
parser.add_argument('uri')
parser.add_argument('dest')
parser.add_argument('md5sum', nargs='?')
parser.add_argument('--ignore-error', action='store_true', help='Ignore download errors')
args = parser.parse_args(argv)
uri = args.uri
if '://' not in uri:
uri = 'file://' + uri
fresh = False
if not os.path.exists(args.dest):
try:
download_md5(uri, args.dest)
except Exception:
if args.ignore_error:
return 0
raise
fresh = True
if args.md5sum:
result, hexdigest = checkmd5(args.dest, args.md5sum)
if result is False and fresh is False:
print('WARNING: md5sum mismatch (%s != %s); re-downloading file %s' % (hexdigest, args.md5sum, args.dest))
os.remove(args.dest)
try:
download_md5(uri, args.dest)
except Exception:
if args.ignore_error:
return 0
raise
result, hexdigest = checkmd5(args.dest, args.md5sum)
if result is False:
return 'ERROR: md5sum mismatch (%s != %s) on %s; aborting' % (hexdigest, args.md5sum, args.dest)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
pfmoore/invoke
|
invoke/parser/context.py
|
1
|
9145
|
import itertools
from ..vendor.lexicon import Lexicon
from .argument import Argument
def translate_underscores(name):
return name.lstrip('_').rstrip('_').replace('_', '-')
def to_flag(name):
name = translate_underscores(name)
if len(name) == 1:
return '-' + name
return '--' + name
def sort_candidate(arg):
names = arg.names
# TODO: is there no "split into two buckets on predicate" builtin?
shorts = set(x for x in names if len(x.strip('-')) == 1)
longs = set(x for x in names if x not in shorts)
return sorted(shorts if shorts else longs)[0]
def flag_key(x):
"""
Obtain useful key list-of-ints for sorting CLI flags.
"""
# Setup
ret = []
x = sort_candidate(x)
# Long-style flags win over short-style ones, so the first item of
# comparison is simply whether the flag is a single character long (with
# non-length-1 flags coming "first" [lower number])
ret.append(1 if len(x) == 1 else 0)
# Next item of comparison is simply the strings themselves,
# case-insensitive. They will compare alphabetically if compared at this
# stage.
ret.append(x.lower())
# Finally, if the case-insensitive test also matched, compare
# case-sensitive, but inverse (with lowercase letters coming first)
inversed = ''
for char in x:
inversed += char.lower() if char.isupper() else char.upper()
ret.append(inversed)
return ret
# Named slightly more verbose so Sphinx references can be unambiguous.
# Got real sick of fully qualified paths.
class ParserContext(object):
"""
Parsing context with knowledge of flags & their format.
Generally associated with the core program or a task.
When run through a parser, will also hold runtime values filled in by the
parser.
"""
def __init__(self, name=None, aliases=(), args=()):
"""
Create a new ``ParserContext`` named ``name``, with ``aliases``.
``name`` is optional, and should be a string if given. It's used to
tell ParserContext objects apart, and for use in a Parser when
determining what chunk of input might belong to a given ParserContext.
``aliases`` is also optional and should be an iterable containing
strings. Parsing will honor any aliases when trying to "find" a given
context in its input.
May give one or more ``args``, which is a quick alternative to calling
``for arg in args: self.add_arg(arg)`` after initialization.
"""
self.args = Lexicon()
self.positional_args = []
self.flags = Lexicon()
self.inverse_flags = {} # No need for Lexicon here
self.name = name
self.aliases = aliases
for arg in args:
self.add_arg(arg)
def __str__(self):
aliases = ""
if self.aliases:
aliases = " ({0})".format(', '.join(self.aliases))
name = (" {0!r}{1}".format(self.name, aliases)) if self.name else ""
args = (": {0!r}".format(self.args)) if self.args else ""
return "<parser/Context{0}{1}>".format(name, args)
def __repr__(self):
return str(self)
def add_arg(self, *args, **kwargs):
"""
Adds given ``Argument`` (or constructor args for one) to this context.
The Argument in question is added to the following dict attributes:
* ``args``: "normal" access, i.e. the given names are directly exposed
as keys.
* ``flags``: "flaglike" access, i.e. the given names are translated
into CLI flags, e.g. ``"foo"`` is accessible via ``flags['--foo']``.
* ``inverse_flags``: similar to ``flags`` but containing only the
"inverse" versions of boolean flags which default to True. This
allows the parser to track e.g. ``--no-myflag`` and turn it into a
False value for the ``myflag`` Argument.
"""
# Normalize
if len(args) == 1 and isinstance(args[0], Argument):
arg = args[0]
else:
arg = Argument(*args, **kwargs)
# Uniqueness constraint: no name collisions
for name in arg.names:
if name in self.args:
msg = "Tried to add an argument named {0!r} but one already exists!" # noqa
raise ValueError(msg.format(name))
# First name used as "main" name for purposes of aliasing
main = arg.names[0] # NOT arg.name
self.args[main] = arg
# Note positionals in distinct, ordered list attribute
if arg.positional:
self.positional_args.append(arg)
# Add names & nicknames to flags, args
self.flags[to_flag(main)] = arg
for name in arg.nicknames:
self.args.alias(name, to=main)
self.flags.alias(to_flag(name), to=to_flag(main))
# Add attr_name to args, but not flags
if arg.attr_name:
self.args.alias(arg.attr_name, to=main)
# Add to inverse_flags if required
if arg.kind == bool and arg.default is True:
# Invert the 'main' flag name here, which will be a dashed version
# of the primary argument name if underscore-to-dash transformation
# occurred.
inverse_name = to_flag("no-{0}".format(main))
self.inverse_flags[inverse_name] = to_flag(main)
@property
def needs_positional_arg(self):
return any(x.value is None for x in self.positional_args)
@property
def as_kwargs(self):
"""
This context's arguments' values keyed by their ``.name`` attribute.
Results in a dict suitable for use in Python contexts, where e.g. an
arg named ``foo-bar`` becomes accessible as ``foo_bar``.
"""
ret = {}
for arg in self.args.values():
ret[arg.name] = arg.value
return ret
def names_for(self, flag):
# TODO: should probably be a method on Lexicon/AliasDict
return list(set([flag] + self.flags.aliases_of(flag)))
def help_for(self, flag):
"""
Return 2-tuple of ``(flag-spec, help-string)`` for given ``flag``.
"""
# Obtain arg obj
if flag not in self.flags:
err = "{0!r} is not a valid flag for this context! Valid flags are: {1!r}" # noqa
raise ValueError(err.format(flag, self.flags.keys()))
arg = self.flags[flag]
# Determine expected value type, if any
value = {
str: 'STRING',
}.get(arg.kind)
# Format & go
full_names = []
for name in self.names_for(flag):
if value:
# Short flags are -f VAL, long are --foo=VAL
# When optional, also, -f [VAL] and --foo[=VAL]
if len(name.strip('-')) == 1:
value_ = ("[{0}]".format(value)) if arg.optional else value
valuestr = " {0}".format(value_)
else:
valuestr = "={0}".format(value)
if arg.optional:
valuestr = "[{0}]".format(valuestr)
else:
# no value => boolean
# check for inverse
if name in self.inverse_flags.values():
name = "--[no-]{0}".format(name[2:])
valuestr = ""
# Tack together
full_names.append(name + valuestr)
namestr = ", ".join(sorted(full_names, key=len))
helpstr = arg.help or ""
return namestr, helpstr
def help_tuples(self):
"""
Return sorted iterable of help tuples for all member Arguments.
Sorts like so:
* General sort is alphanumerically
* Short flags win over long flags
* Arguments with *only* long flags and *no* short flags will come
first.
* When an Argument has multiple long or short flags, it will sort using
the most favorable (lowest alphabetically) candidate.
This will result in a help list like so::
--alpha, --zeta # 'alpha' wins
--beta
-a, --query # short flag wins
-b, --argh
-c
"""
# TODO: argument/flag API must change :(
# having to call to_flag on 1st name of an Argument is just dumb.
# To pass in an Argument object to help_for may require moderate
# changes?
# Cast to list to ensure non-generator on Python 3.
return list(map(
lambda x: self.help_for(to_flag(x.name)),
sorted(self.flags.values(), key=flag_key)
))
def flag_names(self):
"""
Similar to `help_tuples` but returns flag names only, no helpstrs.
Specifically, all flag names, flattened, in rough order.
"""
# Regular flag names
flags = sorted(self.flags.values(), key=flag_key)
names = [self.names_for(to_flag(x.name)) for x in flags]
# Inverse flag names sold separately
names.append(self.inverse_flags.keys())
return tuple(itertools.chain.from_iterable(names))
|
bsd-2-clause
|
PinguinoIDE/pinguino-multilanguage
|
files/ide/methods/syntax.py
|
2
|
3043
|
#! /usr/bin/python
#-*- coding: utf-8 -*-
import datetime
today = datetime.datetime.now()
Autocompleter = {}
Autocompleter["directive"] = ["define", "include", "ifndef", "endif", "undef", "if", "elif", "else", "error", "warning"]
#from const.h
const = [ "PI", "HALF_PI", "TWO_PI", "DEG_TO_RAD", "RAD_TO_DEG", "NULL", "ON", "OFF", "FALSE", "TRUE", "True", "False", "false", "true",
"INPUT", "OUTPUT", "HIGH",
"LOW", "AND", "OR",
"BYTE", "BIN", "OCT", "DEC", "HEX", "FLOAT",
"LED1", "LED2", "LED3", "LED4", "WHITELED", "GREENLED", "USERLED", "YELLOWLED", "REDLED", "PROGBUTTON", "USERBUTTON",
"RTCC", "PMCS1", "PMRD", "PMWR", "PMA1",
#Others, not in cons.h
"FOSC", "MIPS",
"PORTA", "PORTB", "PORTC", "PORTD", "PORTE", "PORTF", "PORTG",
"void", "const", "BOOL", "char", "unsigned", "short", "int", "long", "float", "double", "byte", "word",
"u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64",
"struct", "union", "typedef", "enum", "register", "static", "extern", "volatile",
"loop", "setup", "INT_MILLISEC", "INT_MICROSEC", "INT_FALLING_EDGE", "interrupt",
#C syntax
"if", "switch", "for", "while", "do", "continue", "break", "else", "return", "case", "default",
]
const += ["P1_%d"%p for p in range(1, 17)]
const += ["P2_%d"%p for p in range(1, 17)]
const += ["D%d"%p for p in range(14)]
const += ["A%d"%p for p in range(8)]
const += ["PWM%d"%p for p in range(5)]
const += ["PWD%d"%p for p in range(8)]
Autocompleter["reserved"] = const
Snippet = {}
#Snippet[name {snippet}]=["to insert, with cursor mark: [!]"] ##[!] is the cursor position
Snippet["void {snippet}"] = "void [!]() {\n\n}"
Snippet["while( ) {snippet}"] = "while ([!]) {\n\n}"
Snippet["do {...} while {snippet}"] = "do {\n\t[!]\n} while();"
Snippet["for {snippet}"] = "for ([!]) {\n\n}"
Snippet["for (i=0;...{snippet}"] = "for (i=[!]; i<; i++) {\n\n}"
Snippet["if...else {snippet}"] = "if ([!]) {\n\n\t}\nelse {\n\n}"
Snippet["if...else if...else {snippet}"] = "if ([!]) {\n\n}\nelse if () {\n\n}\nelse {\n\n}"
Snippet["switch( ) {snippet}"] = "switch ([!]) {\n\tcase:\n\n\t\tbreak;\n\n\tdefault:\n\n\t\tbreak;}"
Snippet["case {snippet}"] = "case [!]: \n\n\tbreak;"
Snippet["struct {snippet}"] = "struct {\n\t[!]\n};"
Snippet["typedef ... struct {snippet}"] = "typedef struct {\n\t[!]\n}TYPE;"
Snippet["union {snippet}"] = "union [!] {\n\t\n};"
Snippet["typedef ... union {snippet}"] = "typedef union {\n\t[!]\n}TYPE;"
Snippet["Bare minimum {snippet}"] = "void setup() {\n\t// put your setup code here, to run once:\n\n\t[!]\n}\n\nvoid loop() {\n\t// put your main code here, to run repeatedly:\n\n}\n"
Snippet["file {snippet}"] = """/*-----------------------------------------------------
%s: [!] --<>
%s: %s
%s:
-----------------------------------------------------*/
""" %("Author", "Date", today.strftime("%Y-%m-%d"), "Description")
for key in Snippet.keys():
Snippet[key] = Snippet[key].replace("\t", " "*4)
|
gpl-2.0
|
facebook/fbthrift
|
thrift/lib/py/Thrift.py
|
1
|
11042
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import six
import sys
import threading
UEXW_MAX_LENGTH = 1024
class TType:
STOP = 0
VOID = 1
BOOL = 2
BYTE = 3
I08 = 3
DOUBLE = 4
I16 = 6
I32 = 8
I64 = 10
STRING = 11
UTF7 = 11
STRUCT = 12
MAP = 13
SET = 14
LIST = 15
UTF8 = 16
UTF16 = 17
FLOAT = 19
class TMessageType:
CALL = 1
REPLY = 2
EXCEPTION = 3
ONEWAY = 4
class TPriority:
""" apache::thrift::concurrency::PRIORITY """
HIGH_IMPORTANT = 0
HIGH = 1
IMPORTANT = 2
NORMAL = 3
BEST_EFFORT = 4
N_PRIORITIES = 5
class TRequestContext:
def __init__(self):
self._headers = None
def getHeaders(self):
return self._headers
def setHeaders(self, headers):
self._headers = headers
class TProcessorEventHandler:
"""Event handler for thrift processors"""
# TODO: implement asyncComplete for Twisted
def getHandlerContext(self, fn_name, server_context):
"""Called at the start of processing a handler method"""
return None
def preRead(self, handler_context, fn_name, args):
"""Called before the handler method's argument are read"""
pass
def postRead(self, handler_context, fn_name, args):
"""Called after the handler method's argument are read"""
pass
def preWrite(self, handler_context, fn_name, result):
"""Called before the handler method's results are written"""
pass
def postWrite(self, handler_context, fn_name, result):
"""Called after the handler method's results are written"""
pass
def handlerException(self, handler_context, fn_name, exception):
"""Called if (and only if) the handler threw an expected exception."""
pass
def handlerError(self, handler_context, fn_name, exception):
"""Called if (and only if) the handler threw an unexpected exception.
Note that this method is NOT called if the handler threw an
exception that is declared in the thrift service specification"""
logging.exception("Unexpected error in service handler " + fn_name + ":")
class TServerInterface:
def __init__(self):
self._tl_request_context = threading.local()
def setRequestContext(self, request_context):
self._tl_request_context.ctx = request_context
def getRequestContext(self):
return self._tl_request_context.ctx
class TProcessor:
"""Base class for processor, which works on two streams."""
def __init__(self):
self._event_handler = TProcessorEventHandler() # null object handler
self._handler = None
self._processMap = {}
self._priorityMap = {}
def setEventHandler(self, event_handler):
self._event_handler = event_handler
def getEventHandler(self):
return self._event_handler
def process(self, iprot, oprot, server_context=None):
pass
def onewayMethods(self):
return ()
def readMessageBegin(self, iprot):
name, _, seqid = iprot.readMessageBegin()
if six.PY3:
name = name.decode('utf8')
return name, seqid
def skipMessageStruct(self, iprot):
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
def doesKnowFunction(self, name):
return name in self._processMap
def callFunction(self, name, seqid, iprot, oprot, server_ctx):
process_fn = self._processMap[name]
return process_fn(self, seqid, iprot, oprot, server_ctx)
def readArgs(self, iprot, handler_ctx, fn_name, argtype):
args = argtype()
self._event_handler.preRead(handler_ctx, fn_name, args)
args.read(iprot)
iprot.readMessageEnd()
self._event_handler.postRead(handler_ctx, fn_name, args)
return args
def writeException(self, oprot, name, seqid, exc):
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
exc.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def get_priority(self, fname):
return self._priorityMap.get(fname, TPriority.NORMAL)
def _getReplyType(self, result):
if isinstance(result, TApplicationException):
return TMessageType.EXCEPTION
return TMessageType.REPLY
@staticmethod
def _get_exception_from_thrift_result(result):
"""Returns the wrapped exception, if pressent. None if not.
result is a generated *_result object. This object either has a
'success' field set indicating the call succeeded, or a field set
indicating the exception thrown.
"""
fields = (
result.__dict__.keys()
if hasattr(result, "__dict__") else result.__slots__
)
for field in fields:
value = getattr(result, field)
if value is None:
continue
elif field == 'success':
return None
else:
return value
return None
def writeReply(self, oprot, handler_ctx, fn_name, seqid, result, server_ctx=None):
self._event_handler.preWrite(handler_ctx, fn_name, result)
reply_type = self._getReplyType(result)
if server_ctx is not None and hasattr(server_ctx, 'context_data'):
ex = (result if reply_type == TMessageType.EXCEPTION
else self._get_exception_from_thrift_result(result))
if ex:
server_ctx.context_data.setHeaderEx(ex.__class__.__name__)
server_ctx.context_data.setHeaderExWhat(str(ex)[:UEXW_MAX_LENGTH])
try:
oprot.writeMessageBegin(fn_name, reply_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
except Exception as e:
# Handle any thrift serialization exceptions
# Transport is likely in a messed up state. Some data may already have
# been written and it may not be possible to recover. Doing nothing
# causes the client to wait until the request times out. Try to
# close the connection to trigger a quicker failure on client side
oprot.trans.close()
# Let application know that there has been an exception
self._event_handler.handlerError(handler_ctx, fn_name, e)
# We raise the exception again to avoid any further processing
raise
finally:
# Since we called preWrite, we should also call postWrite to
# allow application to properly log their requests.
self._event_handler.postWrite(handler_ctx, fn_name, result)
class TException(Exception):
"""Base class for all thrift exceptions."""
# BaseException.message is deprecated in Python v[2.6,3.0)
if (2, 6, 0) <= sys.version_info < (3, 0):
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def __init__(self, message=None):
Exception.__init__(self, message)
self.message = message
class TApplicationException(TException):
"""Application level thrift exceptions."""
UNKNOWN = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
INVALID_TRANSFORM = 8
INVALID_PROTOCOL = 9
UNSUPPORTED_CLIENT_TYPE = 10
LOADSHEDDING = 11
TIMEOUT = 12
INJECTED_FAILURE = 13
EXTYPE_TO_STRING = {
UNKNOWN_METHOD: 'Unknown method',
INVALID_MESSAGE_TYPE: 'Invalid message type',
WRONG_METHOD_NAME: 'Wrong method name',
BAD_SEQUENCE_ID: 'Bad sequence ID',
MISSING_RESULT: 'Missing result',
INTERNAL_ERROR: 'Internal error',
PROTOCOL_ERROR: 'Protocol error',
INVALID_TRANSFORM: 'Invalid transform',
INVALID_PROTOCOL: 'Invalid protocol',
UNSUPPORTED_CLIENT_TYPE: 'Unsupported client type',
LOADSHEDDING: 'Loadshedding request',
TIMEOUT: 'Task timeout',
INJECTED_FAILURE: 'Injected Failure',
}
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
def __str__(self):
if self.message:
return self.message
else:
return self.EXTYPE_TO_STRING.get(
self.type,
'Default (unknown) TApplicationException')
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
message = iprot.readString()
if sys.version_info.major >= 3 and isinstance(message,
bytes):
try:
message = message.decode('utf-8')
except UnicodeDecodeError:
pass
self.message = message
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.type = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
oprot.writeStructBegin(b'TApplicationException')
if self.message is not None:
oprot.writeFieldBegin(b'message', TType.STRING, 1)
oprot.writeString(self.message.encode('utf-8')
if not isinstance(self.message, bytes)
else self.message)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin(b'type', TType.I32, 2)
oprot.writeI32(self.type)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
class UnimplementedTypedef:
pass
|
apache-2.0
|
natefoo/ansible-modules-extras
|
database/vertica/vertica_role.py
|
148
|
8202
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_role
version_added: '2.0'
short_description: Adds or removes Vertica database roles and assigns roles to them.
description:
- Adds or removes Vertica database role and, optionally, assign other roles.
options:
name:
description:
- Name of the role to add or remove.
required: true
assigned_roles:
description:
- Comma separated list of roles to assign to the role.
aliases: ['assigned_role']
required: false
default: null
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a role.
required: false
choices: ['present', 'absent']
default: present
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica role
vertica_role: name=role_name db=db_name state=present
- name: creating a new vertica role with other role assigned
vertica_role: name=role_name assigned_role=other_role_name state=present
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def update_roles(role_facts, cursor, role,
existing, required):
for assigned_role in set(existing) - set(required):
cursor.execute("revoke {0} from {1}".format(assigned_role, role))
for assigned_role in set(required) - set(existing):
cursor.execute("grant {0} to {1}".format(assigned_role, role))
def check(role_facts, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
return False
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
return False
return True
def present(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key not in role_facts:
cursor.execute("create role {0}".format(role))
update_roles(role_facts, cursor, role, [], assigned_roles)
role_facts.update(get_role_facts(cursor, role))
return True
else:
changed = False
if assigned_roles and cmp(sorted(assigned_roles), sorted(role_facts[role_key]['assigned_roles'])) != 0:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], assigned_roles)
changed = True
if changed:
role_facts.update(get_role_facts(cursor, role))
return changed
def absent(role_facts, cursor, role, assigned_roles):
role_key = role.lower()
if role_key in role_facts:
update_roles(role_facts, cursor, role,
role_facts[role_key]['assigned_roles'], [])
cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
del role_facts[role_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
role=dict(required=True, aliases=['name']),
assigned_roles=dict(default=None, aliases=['assigned_role']),
state=dict(default='present', choices=['absent', 'present']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
role = module.params['role']
assigned_roles = []
if module.params['assigned_roles']:
assigned_roles = module.params['assigned_roles'].split(',')
assigned_roles = filter(None, assigned_roles)
state = module.params['state']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception, e:
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
role_facts = get_role_facts(cursor)
if module.check_mode:
changed = not check(role_facts, role, assigned_roles)
elif state == 'absent':
try:
changed = absent(role_facts, cursor, role, assigned_roles)
except pyodbc.Error, e:
module.fail_json(msg=str(e))
elif state == 'present':
try:
changed = present(role_facts, cursor, role, assigned_roles)
except pyodbc.Error, e:
module.fail_json(msg=str(e))
except NotSupportedError, e:
module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
except CannotDropError, e:
module.fail_json(msg=str(e), ansible_facts={'vertica_roles': role_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception, e:
module.fail_json(msg=e)
module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
# import ansible utilities
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
40223119/cdaw11
|
static/Brython3.1.0-20150301-090019/Lib/site-packages/turtle.py
|
619
|
105984
|
import math
from javascript import console
from browser import document, html
import _svg
_CFG = {"width" : 0.5, # Screen
"height" : 0.75,
"canvwidth" : 400,
"canvheight": 300,
"leftright": None,
"topbottom": None,
"mode": "standard", # TurtleScreen
"colormode": 1.0,
"delay": 10,
"undobuffersize": 1000, # RawTurtle
"shape": "classic",
"pencolor" : "black",
"fillcolor" : "black",
"resizemode" : "noresize",
"visible" : True,
"language": "english", # docstrings
"exampleturtle": "turtle",
"examplescreen": "screen",
"title": "Python Turtle Graphics",
"using_IDLE": False
}
class Vec2D(tuple):
"""A 2 dimensional vector class, used as a helper class
for implementing turtle graphics.
May be useful for turtle graphics programs also.
Derived from tuple, so a vector is a tuple!
Provides (for a, b vectors, k number):
a+b vector addition
a-b vector subtraction
a*b inner product
k*a and a*k multiplication with scalar
|a| absolute value of a
a.rotate(angle) rotation
"""
def __new__(cls, x, y):
return tuple.__new__(cls, (x, y))
def __add__(self, other):
return Vec2D(self[0]+other[0], self[1]+other[1])
def __mul__(self, other):
if isinstance(other, Vec2D):
return self[0]*other[0]+ self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
def __neg__(self):
return Vec2D(-self[0], -self[1])
def __abs__(self):
return (self[0]**2 + self[1]**2)**0.5
def rotate(self, angle):
"""rotate self counterclockwise by angle
"""
perp = Vec2D(-self[1], self[0])
angle = angle * math.pi / 180.0
c, s = math.cos(angle), math.sin(angle)
return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
def __getnewargs__(self):
return (self[0], self[1])
def __repr__(self):
return "(%.2f,%.2f)" % self
##############################################################################
### From here up to line : Tkinter - Interface for turtle.py ###
### May be replaced by an interface to some different graphics toolkit ###
##############################################################################
class _Root:
"""Root class for Screen based on Tkinter."""
def setupcanvas(self, width, height, cwidth, cheight):
self._svg=_svg.svg(Id="mycanvas", width=cwidth, height=cheight)
self._canvas=_svg.g(transform="translate(%d,%d)" % (cwidth//2, cheight//2))
self._svg <= self._canvas
def end(self):
def set_svg():
#have to do this to get animate to work...
document['container'].html=document['container'].html
if "mycanvas" not in document:
document["container"] <= self._svg
from browser import timer
#need this for chrome so that first few draw commands are viewed properly.
timer.set_timeout(set_svg, 1)
def _getcanvas(self):
return self._canvas
def win_width(self):
return self._canvas.width
def win_height(self):
return self._canvas.height
class TurtleScreenBase:
"""Provide the basic graphics functionality.
Interface between Tkinter and turtle.py.
To port turtle.py to some different graphics toolkit
a corresponding TurtleScreenBase class has to be implemented.
"""
#@staticmethod
#def _blankimage():
# """return a blank image object
# """
# pass
#@staticmethod
#def _image(filename):
# """return an image object containing the
# imagedata from a gif-file named filename.
# """
# pass
def __init__(self, cv):
self.cv = cv
self._previous_turtle_attributes={}
self._draw_pos=0
self.canvwidth = cv.width
self.canvheight = cv.height
self.xscale = self.yscale = 1.0
def _createpoly(self):
"""Create an invisible polygon item on canvas self.cv)
"""
#console.log("_createpoly")
pass
def _drawpoly(self, polyitem, coordlist, fill=None,
outline=None, width=None, top=False):
"""Configure polygonitem polyitem according to provided
arguments:
coordlist is sequence of coordinates
fill is filling color
outline is outline color
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
#console.log("_drawpoly")
pass
def _drawline(self, lineitem, coordlist=None,
fill=None, width=None, top=False):
"""Configure lineitem according to provided arguments:
coordlist is sequence of coordinates
fill is drawing color
width is width of drawn line.
top is a boolean value, which specifies if polyitem
will be put on top of the canvas' displaylist so it
will not be covered by other items.
"""
#console.log("_drawline")
#if not isinstance(lineitem, Turtle):
# return
if coordlist is not None:
_x0, _y0=coordlist[0]
_x1, _y1=coordlist[1]
_dist=math.sqrt( (_x0-_x1)*(_x0-_x1) + (_y0-_y1)*(_y0-_y1) )
_dur="%4.2fs" % (0.01*_dist)
if _dur == '0.00s':
_dur='0.1s'
#_dur="%ss" % 1
self._draw_pos+=1
_shape=["%s,%s" % (_x, _y) for _x,_y in lineitem.get_shapepoly()]
if 0:
#if lineitem.isvisible():
if lineitem in self._previous_turtle_attributes:
_previous=self._previous_turtle_attributes[lineitem]
if _previous.heading() != lineitem.heading():
#if self._turtle_heading[lineitem] != lineitem.heading():
_rotate=_previous.heading()
_turtle=_svg.polygon(points=" ".join(_shape),
transform="rotate(%s)" % (_rotate-90),
style={'stroke': fill, 'fill': fill,
'stroke-width': width, 'display': 'none'})
# we need to rotate our turtle..
_turtle <= _svg.animateTransform(
Id="animateLine%s" % self._draw_pos,
attributeName="transform",
type="rotate",
attributeType="XML",
From=_rotate - 90,
to=lineitem.heading() -90,
dur=_dur,
begin="animateLine%s.end" % (self._draw_pos-1))
_turtle <= _svg.set(attributeName="display",
attributeType="CSS", to="block",
begin="animateLine%s.begin" % self._draw_pos,
end="animateLine%s.end" % self._draw_pos)
#_turtle <= _svg.animateMotion(From="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
# to="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
# begin="animateLine%s.begin" % self._draw_pos,
# end="animateLine%s.end" % self._draw_pos)
#_turtle <= _svg.animate(attributeName="fill",
# From=_previous.fill, to=fill, dur=_dur,
# begin="animateLine%s.begin" % self._draw_pos)
self._draw_pos+=1
self._canvas <= _turtle
_line= _svg.line(x1=_x0*self.xscale, y1=_y0*self.yscale,
x2=_x0*self.xscale, y2=_y0*self.yscale,
style={'stroke': fill, 'stroke-width': width})
_an1=_svg.animate(Id="animateLine%s" % self._draw_pos,
attributeName="x2", attributeType="XML",
From=_x0*self.xscale, to=_x1*self.xscale,
dur=_dur, fill='freeze')
_an2=_svg.animate(attributeName="y2", attributeType="XML",
begin="animateLine%s.begin" % self._draw_pos,
From=_y0*self.xscale, to=_y1*self.xscale,
dur=_dur, fill='freeze')
# draw turtle
if lineitem.isvisible():
_turtle=_svg.polygon(points=" ".join(_shape),
transform="rotate(%s)" % (lineitem.heading() - 90),
style={'stroke': fill, 'fill': fill,
'stroke-width': width, 'display': 'none'})
_turtle <= _svg.animateMotion(From="%s,%s" % (_x0*self.xscale, _y0*self.yscale),
to="%s,%s" % (_x1*self.xscale, _y1*self.yscale),
dur=_dur, begin="animateLine%s.begin" % self._draw_pos)
_turtle <= _svg.set(attributeName="display", attributeType="CSS",
to="block",
begin="animateLine%s.begin" % self._draw_pos,
end="animateLine%s.end" % self._draw_pos)
self._canvas <= _turtle
self._previous_turtle_attributes[lineitem]=lineitem
if self._draw_pos == 1:
_an1.setAttribute('begin', "0s")
else:
_an1.setAttribute('begin', "animateLine%s.end" % (self._draw_pos-1))
_line <= _an1
_line <= _an2
self._canvas <= _line
def _delete(self, item):
"""Delete graphics item from canvas.
If item is"all" delete all graphics items.
"""
pass
def _update(self):
"""Redraw graphics items on canvas
"""
pass
def _delay(self, delay):
"""Delay subsequent canvas actions for delay ms."""
pass
def _iscolorstring(self, color):
"""Check if the string color is a legal Tkinter color string.
"""
return True #fix me
#try:
# rgb = self.cv.winfo_rgb(color)
# ok = True
#except TK.TclError:
# ok = False
#return ok
def _bgcolor(self, color=None):
"""Set canvas' backgroundcolor if color is not None,
else return backgroundcolor."""
if color is not None:
self.cv.style.backgroundColor=color
else:
return self.cv.style.backgroundColor
def _write(self, pos, txt, align, font, pencolor):
"""Write txt at pos in canvas with specified font
and color.
Return text item and x-coord of right bottom corner
of text's bounding box."""
self._draw_pos+=1
_text= _svg.text(txt, x=pos[0], y=pos[1], fill=pencolor,
style={'display': 'none'})
_text <= _svg.animate(Id="animateLine%s" % self._draw_pos,
attributeName="display", attributeType="CSS",
From="block", to="block", dur="1s", fill='freeze',
begin="animateLine%s.end" % (self._draw_pos-1))
self._canvas <= _text
return Vec2D(pos[0]+50, pos[1]+50) #fix me
## def _dot(self, pos, size, color):
## """may be implemented for some other graphics toolkit"""
def _createimage(self, image):
"""Create and return image item on canvas.
"""
pass
def _drawimage(self, item, pos, image):
"""Configure image item as to draw image object
at position (x,y) on canvas)
"""
pass
def _setbgpic(self, item, image):
"""Configure image item as to draw image object
at center of canvas. Set item to the first item
in the displaylist, so it will be drawn below
any other item ."""
pass
def _type(self, item):
"""Return 'line' or 'polygon' or 'image' depending on
type of item.
"""
pass
def _resize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on. Does
not alter the drawing window.
"""
self.cv.style.width=canvwidth
self.cv.style.height=canvheight
if bg is not None:
self.cv.style.backgroundColor=bg
def _window_size(self):
""" Return the width and height of the turtle window.
"""
#for now just return canvas width/height
return self.cv.width, self.cv.height
def mainloop(self):
"""Starts event loop - calling Tkinter's mainloop function.
No argument.
Must be last statement in a turtle graphics program.
Must NOT be used if a script is run from within IDLE in -n mode
(No subprocess) - for interactive use of turtle graphics.
Example (for a TurtleScreen instance named screen):
>>> screen.mainloop()
"""
pass
def textinput(self, title, prompt):
"""Pop up a dialog window for input of a string.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what information to input.
Return the string input
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.textinput("NIM", "Name of first player:")
"""
pass
def numinput(self, title, prompt, default=None, minval=None, maxval=None):
"""Pop up a dialog window for input of a number.
Arguments: title is the title of the dialog window,
prompt is a text mostly describing what numerical information to input.
default: default value
minval: minimum value for imput
maxval: maximum value for input
The number input must be in the range minval .. maxval if these are
given. If not, a hint is issued and the dialog remains open for
correction. Return the number input.
If the dialog is canceled, return None.
Example (for a TurtleScreen instance named screen):
>>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000)
"""
pass
##############################################################################
### End of Tkinter - interface ###
##############################################################################
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
This stops execution of a turtle graphics script.
Main purpose: use in the Demo-Viewer turtle.Demo.py.
"""
pass
class TurtleGraphicsError(Exception):
"""Some TurtleGraphics Error
"""
pass
class Shape:
"""Data structure modeling shapes.
attribute _type is one of "polygon", "image", "compound"
attribute _data is - depending on _type a poygon-tuple,
an image or a list constructed using the addcomponent method.
"""
def __init__(self, type_, data=None):
self._type = type_
if type_ == "polygon":
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
if isinstance(data, str):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
elif type_ == "compound":
data = []
else:
raise TurtleGraphicsError("There is no shape type %s" % type_)
self._data = data
def addcomponent(self, poly, fill, outline=None):
"""Add component to a shape of type compound.
Arguments: poly is a polygon, i. e. a tuple of number pairs.
fill is the fillcolor of the component,
outline is the outline color of the component.
call (for a Shapeobject namend s):
-- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue")
Example:
>>> poly = ((0,0),(10,-5),(0,10),(-10,-5))
>>> s = Shape("compound")
>>> s.addcomponent(poly, "red", "blue")
>>> # .. add more components and then use register_shape()
"""
if self._type != "compound":
raise TurtleGraphicsError("Cannot add component to %s Shape"
% self._type)
if outline is None:
outline = fill
self._data.append([poly, fill, outline])
class TurtleScreen(TurtleScreenBase):
"""Provides screen oriented methods like setbg etc.
Only relies upon the methods of TurtleScreenBase and NOT
upon components of the underlying graphics toolkit -
which is Tkinter in this case.
"""
_RUNNING = True
def __init__(self, cv, mode=_CFG["mode"],
colormode=_CFG["colormode"], delay=_CFG["delay"]):
self._shapes = {
"arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))),
"turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7),
(-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6),
(-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6),
(5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10),
(2,14))),
"circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88),
(5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51),
(-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0),
(-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09),
(-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51),
(5.88,-8.09), (8.09,-5.88), (9.51,-3.09))),
"square" : Shape("polygon", ((10,-10), (10,10), (-10,10),
(-10,-10))),
"triangle" : Shape("polygon", ((10,-5.77), (0,11.55),
(-10,-5.77))),
"classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))),
"blank" : Shape("image", None) #self._blankimage())
}
self._bgpics = {"nopic" : ""}
TurtleScreenBase.__init__(self, cv)
self._mode = mode
self._delayvalue = delay
self._colormode = _CFG["colormode"]
self._keys = []
self.clear()
def clear(self):
"""Delete all drawings and all turtles from the TurtleScreen.
No argument.
Reset empty TurtleScreen to its initial state: white background,
no backgroundimage, no eventbindings and tracing on.
Example (for a TurtleScreen instance named screen):
>>> screen.clear()
Note: this method is not available as function.
"""
self._delayvalue = _CFG["delay"]
self._colormode = _CFG["colormode"]
self._delete("all")
self._bgpic = self._createimage("")
self._bgpicname = "nopic"
self._tracing = 1
self._updatecounter = 0
self._turtles = []
self.bgcolor("white")
#for btn in 1, 2, 3:
# self.onclick(None, btn)
#self.onkeypress(None)
#for key in self._keys[:]:
# self.onkey(None, key)
# self.onkeypress(None, key)
Turtle._pen = None
def mode(self, mode=None):
"""Set turtle-mode ('standard', 'logo' or 'world') and perform reset.
Optional argument:
mode -- on of the strings 'standard', 'logo' or 'world'
Mode 'standard' is compatible with turtle.py.
Mode 'logo' is compatible with most Logo-Turtle-Graphics.
Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in
this mode angles appear distorted if x/y unit-ratio doesn't equal 1.
If mode is not given, return the current mode.
Mode Initial turtle heading positive angles
------------|-------------------------|-------------------
'standard' to the right (east) counterclockwise
'logo' upward (north) clockwise
Examples:
>>> mode('logo') # resets turtle heading to north
>>> mode()
'logo'
"""
if mode is None:
return self._mode
mode = mode.lower()
if mode not in ["standard", "logo", "world"]:
raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode)
self._mode = mode
if mode in ["standard", "logo"]:
self._setscrollregion(-self.canvwidth//2, -self.canvheight//2,
self.canvwidth//2, self.canvheight//2)
self.xscale = self.yscale = 1.0
self.reset()
def setworldcoordinates(self, llx, lly, urx, ury):
"""Set up a user defined coordinate-system.
Arguments:
llx -- a number, x-coordinate of lower left corner of canvas
lly -- a number, y-coordinate of lower left corner of canvas
urx -- a number, x-coordinate of upper right corner of canvas
ury -- a number, y-coordinate of upper right corner of canvas
Set up user coodinat-system and switch to mode 'world' if necessary.
This performs a screen.reset. If mode 'world' is already active,
all drawings are redrawn according to the new coordinates.
But ATTENTION: in user-defined coordinatesystems angles may appear
distorted. (see Screen.mode())
Example (for a TurtleScreen instance named screen):
>>> screen.setworldcoordinates(-10,-0.5,50,1.5)
>>> for _ in range(36):
... left(10)
... forward(0.5)
"""
if self.mode() != "world":
self.mode("world")
xspan = float(urx - llx)
yspan = float(ury - lly)
wx, wy = self._window_size()
self.screensize(wx-20, wy-20)
oldxscale, oldyscale = self.xscale, self.yscale
self.xscale = self.canvwidth / xspan
self.yscale = self.canvheight / yspan
srx1 = llx * self.xscale
sry1 = -ury * self.yscale
srx2 = self.canvwidth + srx1
sry2 = self.canvheight + sry1
self._setscrollregion(srx1, sry1, srx2, sry2)
self._rescale(self.xscale/oldxscale, self.yscale/oldyscale)
#self.update()
def register_shape(self, name, shape=None):
"""Adds a turtle shape to TurtleScreen's shapelist.
Arguments:
(1) name is the name of a gif-file and shape is None.
Installs the corresponding image shape.
!! Image-shapes DO NOT rotate when turning the turtle,
!! so they do not display the heading of the turtle!
(2) name is an arbitrary string and shape is a tuple
of pairs of coordinates. Installs the corresponding
polygon shape
(3) name is an arbitrary string and shape is a
(compound) Shape object. Installs the corresponding
compound shape.
To use a shape, you have to issue the command shape(shapename).
call: register_shape("turtle.gif")
--or: register_shape("tri", ((0,0), (10,10), (-10,10)))
Example (for a TurtleScreen instance named screen):
>>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3)))
"""
if shape is None:
# image
if name.lower().endswith(".gif"):
shape = Shape("image", self._image(name))
else:
raise TurtleGraphicsError("Bad arguments for register_shape.\n"
+ "Use help(register_shape)" )
elif isinstance(shape, tuple):
shape = Shape("polygon", shape)
## else shape assumed to be Shape-instance
self._shapes[name] = shape
def _colorstr(self, color):
"""Return color string corresponding to args.
Argument may be a string or a tuple of three
numbers corresponding to actual colormode,
i.e. in the range 0<=n<=colormode.
If the argument doesn't represent a color,
an error is raised.
"""
if len(color) == 1:
color = color[0]
if isinstance(color, str):
if self._iscolorstring(color) or color == "":
return color
else:
raise TurtleGraphicsError("bad color string: %s" % str(color))
try:
r, g, b = color
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(color))
if self._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(color))
return "#%02x%02x%02x" % (r, g, b)
def _color(self, cstr):
if not cstr.startswith("#"):
return cstr
if len(cstr) == 7:
cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)]
elif len(cstr) == 4:
cl = [16*int(cstr[h], 16) for h in cstr[1:]]
else:
raise TurtleGraphicsError("bad colorstring: %s" % cstr)
return tuple([c * self._colormode/255 for c in cl])
def colormode(self, cmode=None):
"""Return the colormode or set it to 1.0 or 255.
Optional argument:
cmode -- one of the values 1.0 or 255
r, g, b values of colortriples have to be in range 0..cmode.
Example (for a TurtleScreen instance named screen):
>>> screen.colormode()
1.0
>>> screen.colormode(255)
>>> pencolor(240,160,80)
"""
if cmode is None:
return self._colormode
if cmode == 1.0:
self._colormode = float(cmode)
elif cmode == 255:
self._colormode = int(cmode)
def reset(self):
"""Reset all Turtles on the Screen to their initial state.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.reset()
"""
for turtle in self._turtles:
turtle._setmode(self._mode)
turtle.reset()
def turtles(self):
"""Return the list of turtles on the screen.
Example (for a TurtleScreen instance named screen):
>>> screen.turtles()
[<turtle.Turtle object at 0x00E11FB0>]
"""
return self._turtles
def bgcolor(self, *args):
"""Set or return backgroundcolor of the TurtleScreen.
Arguments (if given): a color string or three numbers
in the range 0..colormode or a 3-tuple of such numbers.
Example (for a TurtleScreen instance named screen):
>>> screen.bgcolor("orange")
>>> screen.bgcolor()
'orange'
>>> screen.bgcolor(0.5,0,0.5)
>>> screen.bgcolor()
'#800080'
"""
if args:
color = self._colorstr(args)
else:
color = None
color = self._bgcolor(color)
if color is not None:
color = self._color(color)
return color
def tracer(self, n=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a TurtleScreen instance named screen):
>>> screen.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... fd(dist)
... rt(90)
... dist += 2
"""
if n is None:
return self._tracing
self._tracing = int(n)
self._updatecounter = 0
if delay is not None:
self._delayvalue = int(delay)
if self._tracing:
self.update()
def delay(self, delay=None):
""" Return or set the drawing delay in milliseconds.
Optional argument:
delay -- positive integer
Example (for a TurtleScreen instance named screen):
>>> screen.delay(15)
>>> screen.delay()
15
"""
if delay is None:
return self._delayvalue
self._delayvalue = int(delay)
def _incrementudc(self):
"""Increment update counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNNING = True
raise Terminator
if self._tracing > 0:
self._updatecounter += 1
self._updatecounter %= self._tracing
def update(self):
"""Perform a TurtleScreen update.
"""
return
tracing = self._tracing
self._tracing = True
for t in self.turtles():
#t._update_data()
t._drawturtle()
self._tracing = tracing
self._update()
def window_width(self):
""" Return the width of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_width()
640
"""
return self._window_size()[0]
def window_height(self):
""" Return the height of the turtle window.
Example (for a TurtleScreen instance named screen):
>>> screen.window_height()
480
"""
return self._window_size()[1]
def getcanvas(self):
"""Return the Canvas of this TurtleScreen.
No argument.
Example (for a Screen instance named screen):
>>> cv = screen.getcanvas()
>>> cv
<turtle.ScrolledCanvas instance at 0x010742D8>
"""
return self.cv
def getshapes(self):
"""Return a list of names of all currently available turtle shapes.
No argument.
Example (for a TurtleScreen instance named screen):
>>> screen.getshapes()
['arrow', 'blank', 'circle', ... , 'turtle']
"""
return sorted(self._shapes.keys())
def onclick(self, fun, btn=1, add=None):
"""Bind fun to mouse-click event on canvas.
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
num -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen)
>>> screen.onclick(goto)
>>> # Subsequently clicking into the TurtleScreen will
>>> # make the turtle move to the clicked point.
>>> screen.onclick(None)
"""
self._onscreenclick(fun, btn, add)
def onkey(self, fun, key):
"""Bind fun to key-release event of key.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkey(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, consequently drawing a hexagon
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key not in self._keys:
self._keys.append(key)
self._onkeyrelease(fun, key)
def onkeypress(self, fun, key=None):
"""Bind fun to key-press event of key if key is given,
or to any key-press-event if no key is given.
Arguments:
fun -- a function with no arguments
key -- a string: key (e.g. "a") or key-symbol (e.g. "space")
In order to be able to register key-events, TurtleScreen
must have focus. (See method listen.)
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
>>> def f():
... fd(50)
... lt(60)
...
>>> screen.onkeypress(f, "Up")
>>> screen.listen()
Subsequently the turtle can be moved by repeatedly pressing
the up-arrow key, or by keeping pressed the up-arrow key.
consequently drawing a hexagon.
"""
if fun is None:
if key in self._keys:
self._keys.remove(key)
elif key is not None and key not in self._keys:
self._keys.append(key)
self._onkeypress(fun, key)
def listen(self, xdummy=None, ydummy=None):
"""Set focus on TurtleScreen (in order to collect key-events)
No arguments.
Dummy arguments are provided in order
to be able to pass listen to the onclick method.
Example (for a TurtleScreen instance named screen):
>>> screen.listen()
"""
self._listen()
def ontimer(self, fun, t=0):
"""Install a timer, which calls fun after t milliseconds.
Arguments:
fun -- a function with no arguments.
t -- a number >= 0
Example (for a TurtleScreen instance named screen):
>>> running = True
>>> def f():
... if running:
... fd(50)
... lt(60)
... screen.ontimer(f, 250)
...
>>> f() # makes the turtle marching around
>>> running = False
"""
self._ontimer(fun, t)
def bgpic(self, picname=None):
"""Set background image or return name of current backgroundimage.
Optional argument:
picname -- a string, name of a gif-file or "nopic".
If picname is a filename, set the corresponding image as background.
If picname is "nopic", delete backgroundimage, if present.
If picname is None, return the filename of the current backgroundimage.
Example (for a TurtleScreen instance named screen):
>>> screen.bgpic()
'nopic'
>>> screen.bgpic("landscape.gif")
>>> screen.bgpic()
'landscape.gif'
"""
if picname is None:
return self._bgpicname
if picname not in self._bgpics:
self._bgpics[picname] = self._image(picname)
self._setbgpic(self._bgpic, self._bgpics[picname])
self._bgpicname = picname
def screensize(self, canvwidth=None, canvheight=None, bg=None):
"""Resize the canvas the turtles are drawing on.
Optional arguments:
canvwidth -- positive integer, new width of canvas in pixels
canvheight -- positive integer, new height of canvas in pixels
bg -- colorstring or color-tuple, new backgroundcolor
If no arguments are given, return current (canvaswidth, canvasheight)
Do not alter the drawing window. To observe hidden parts of
the canvas use the scrollbars. (Can make visible those parts
of a drawing, which were outside the canvas before!)
Example (for a Turtle instance named turtle):
>>> turtle.screensize(2000,1500)
>>> # e.g. to search for an erroneously escaped turtle ;-)
"""
return self._resize(canvwidth, canvheight, bg)
onscreenclick = onclick
resetscreen = reset
clearscreen = clear
addshape = register_shape
onkeyrelease = onkey
class TNavigator:
"""Navigation part of the RawTurtle.
Implements methods for turtle movement.
"""
START_ORIENTATION = {
"standard": Vec2D(1.0, 0.0),
"world" : Vec2D(1.0, 0.0),
"logo" : Vec2D(0.0, 1.0) }
DEFAULT_MODE = "standard"
DEFAULT_ANGLEOFFSET = 0
DEFAULT_ANGLEORIENT = 1
def __init__(self, mode=DEFAULT_MODE):
self._angleOffset = self.DEFAULT_ANGLEOFFSET
self._angleOrient = self.DEFAULT_ANGLEORIENT
self._mode = mode
self.undobuffer = None
self.degrees()
self._mode = None
self._setmode(mode)
TNavigator.reset(self)
def reset(self):
"""reset turtle to its initial values
Will be overwritten by parent class
"""
self._position = Vec2D(0.0, 0.0)
self._orient = TNavigator.START_ORIENTATION[self._mode]
def _setmode(self, mode=None):
"""Set turtle-mode to 'standard', 'world' or 'logo'.
"""
if mode is None:
return self._mode
if mode not in ["standard", "logo", "world"]:
return
self._mode = mode
if mode in ["standard", "world"]:
self._angleOffset = 0
self._angleOrient = 1
else: # mode == "logo":
self._angleOffset = self._fullcircle/4.
self._angleOrient = -1
def _setDegreesPerAU(self, fullcircle):
"""Helper function for degrees() and radians()"""
self._fullcircle = fullcircle
self._degreesPerAU = 360/fullcircle
if self._mode == "standard":
self._angleOffset = 0
else:
self._angleOffset = fullcircle/4.
def degrees(self, fullcircle=360.0):
""" Set angle measurement units to degrees.
Optional argument:
fullcircle - a number
Set angle measurement units, i. e. set number
of 'degrees' for a full circle. Dafault value is
360 degrees.
Example (for a Turtle instance named turtle):
>>> turtle.left(90)
>>> turtle.heading()
90
Change angle measurement unit to grad (also known as gon,
grade, or gradian and equals 1/100-th of the right angle.)
>>> turtle.degrees(400.0)
>>> turtle.heading()
100
"""
self._setDegreesPerAU(fullcircle)
def radians(self):
""" Set the angle measurement units to radians.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.heading()
90
>>> turtle.radians()
>>> turtle.heading()
1.5707963267948966
"""
self._setDegreesPerAU(2*math.pi)
def _go(self, distance):
"""move turtle forward by specified distance"""
#console.log('_go')
ende = self._position + self._orient * distance
self._goto(ende)
def _rotate(self, angle):
"""Turn turtle counterclockwise by specified angle if angle > 0."""
#console.log('_rotate')
angle *= self._degreesPerAU
self._orient = self._orient.rotate(angle)
def _goto(self, end):
"""move turtle to position end."""
#console.log('_goto')
self._position = end
def forward(self, distance):
"""Move the turtle forward by the specified distance.
Aliases: forward | fd
Argument:
distance -- a number (integer or float)
Move the turtle forward by the specified distance, in the direction
the turtle is headed.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.forward(25)
>>> turtle.position()
(25.00,0.00)
>>> turtle.forward(-75)
>>> turtle.position()
(-50.00,0.00)
"""
self._go(distance)
def back(self, distance):
"""Move the turtle backward by distance.
Aliases: back | backward | bk
Argument:
distance -- a number
Move the turtle backward by distance ,opposite to the direction the
turtle is headed. Do not change the turtle's heading.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 0.00)
>>> turtle.backward(30)
>>> turtle.position()
(-30.00, 0.00)
"""
self._go(-distance)
def right(self, angle):
"""Turn turtle right by angle units.
Aliases: right | rt
Argument:
angle -- a number (integer or float)
Turn turtle right by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.right(45)
>>> turtle.heading()
337.0
"""
self._rotate(-angle)
def left(self, angle):
"""Turn turtle left by angle units.
Aliases: left | lt
Argument:
angle -- a number (integer or float)
Turn turtle left by angle units. (Units are by default degrees,
but can be set via the degrees() and radians() functions.)
Angle orientation depends on mode. (See this.)
Example (for a Turtle instance named turtle):
>>> turtle.heading()
22.0
>>> turtle.left(45)
>>> turtle.heading()
67.0
"""
self._rotate(angle)
def pos(self):
"""Return the turtle's current location (x,y), as a Vec2D-vector.
Aliases: pos | position
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 240.00)
"""
return self._position
def xcor(self):
""" Return the turtle's x coordinate.
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.xcor()
50.0
"""
return self._position[0]
def ycor(self):
""" Return the turtle's y coordinate
---
No arguments.
Example (for a Turtle instance named turtle):
>>> reset()
>>> turtle.left(60)
>>> turtle.forward(100)
>>> print turtle.ycor()
86.6025403784
"""
return self._position[1]
def goto(self, x, y=None):
"""Move turtle to an absolute position.
Aliases: setpos | setposition | goto:
Arguments:
x -- a number or a pair/vector of numbers
y -- a number None
call: goto(x, y) # two coordinates
--or: goto((x, y)) # a pair (tuple) of coordinates
--or: goto(vec) # e.g. as returned by pos()
Move turtle to an absolute position. If the pen is down,
a line will be drawn. The turtle's orientation does not change.
Example (for a Turtle instance named turtle):
>>> tp = turtle.pos()
>>> tp
(0.00, 0.00)
>>> turtle.setpos(60,30)
>>> turtle.pos()
(60.00,30.00)
>>> turtle.setpos((20,80))
>>> turtle.pos()
(20.00,80.00)
>>> turtle.setpos(tp)
>>> turtle.pos()
(0.00,0.00)
"""
if y is None:
self._goto(Vec2D(*x))
else:
self._goto(Vec2D(x, y))
def home(self):
"""Move turtle to the origin - coordinates (0,0).
No arguments.
Move turtle to the origin - coordinates (0,0) and set its
heading to its start-orientation (which depends on mode).
Example (for a Turtle instance named turtle):
>>> turtle.home()
"""
self.goto(0, 0)
self.setheading(0)
def setx(self, x):
"""Set the turtle's first coordinate to x
Argument:
x -- a number (integer or float)
Set the turtle's first coordinate to x, leave second coordinate
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 240.00)
>>> turtle.setx(10)
>>> turtle.position()
(10.00, 240.00)
"""
self._goto(Vec2D(x, self._position[1]))
def sety(self, y):
"""Set the turtle's second coordinate to y
Argument:
y -- a number (integer or float)
Set the turtle's first coordinate to x, second coordinate remains
unchanged.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00, 40.00)
>>> turtle.sety(-10)
>>> turtle.position()
(0.00, -10.00)
"""
self._goto(Vec2D(self._position[0], y))
def distance(self, x, y=None):
"""Return the distance from the turtle to (x,y) in turtle step units.
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(0.00, 0.00)
>>> turtle.distance(30,40)
50.0
>>> pen = Turtle()
>>> pen.forward(77)
>>> turtle.distance(pen)
77.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
return abs(pos - self._position)
def towards(self, x, y=None):
"""Return the angle of the line from the turtle's position to (x, y).
Arguments:
x -- a number or a pair/vector of numbers or a turtle instance
y -- a number None None
call: distance(x, y) # two coordinates
--or: distance((x, y)) # a pair (tuple) of coordinates
--or: distance(vec) # e.g. as returned by pos()
--or: distance(mypen) # where mypen is another turtle
Return the angle, between the line from turtle-position to position
specified by x, y and the turtle's start orientation. (Depends on
modes - "standard" or "logo")
Example (for a Turtle instance named turtle):
>>> turtle.pos()
(10.00, 10.00)
>>> turtle.towards(0,0)
225.0
"""
if y is not None:
pos = Vec2D(x, y)
if isinstance(x, Vec2D):
pos = x
elif isinstance(x, tuple):
pos = Vec2D(*x)
elif isinstance(x, TNavigator):
pos = x._position
x, y = pos - self._position
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def heading(self):
""" Return the turtle's current heading.
No arguments.
Example (for a Turtle instance named turtle):
>>> turtle.left(67)
>>> turtle.heading()
67.0
"""
x, y = self._orient
result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0
result /= self._degreesPerAU
return (self._angleOffset + self._angleOrient*result) % self._fullcircle
def setheading(self, to_angle):
"""Set the orientation of the turtle to to_angle.
Aliases: setheading | seth
Argument:
to_angle -- a number (integer or float)
Set the orientation of the turtle to to_angle.
Here are some common directions in degrees:
standard - mode: logo-mode:
-------------------|--------------------
0 - east 0 - north
90 - north 90 - east
180 - west 180 - south
270 - south 270 - west
Example (for a Turtle instance named turtle):
>>> turtle.setheading(90)
>>> turtle.heading()
90
"""
angle = (to_angle - self.heading())*self._angleOrient
full = self._fullcircle
angle = (angle+full/2.)%full - full/2.
self._rotate(angle)
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self._tracer()
dl = self._delay()
if speed == 0:
self._tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self._tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
## three dummy methods to be implemented by child class:
def speed(self, s=0):
"""dummy method - to be overwritten by child class"""
def _tracer(self, a=None, b=None):
"""dummy method - to be overwritten by child class"""
def _delay(self, n=None):
"""dummy method - to be overwritten by child class"""
fd = forward
bk = back
backward = back
rt = right
lt = left
position = pos
setpos = goto
setposition = goto
seth = setheading
class TPen:
"""Drawing part of the RawTurtle.
Implements drawing properties.
"""
def __init__(self, resizemode=_CFG["resizemode"]):
self._resizemode = resizemode # or "user" or "noresize"
self.undobuffer = None
TPen._reset(self)
def _reset(self, pencolor=_CFG["pencolor"],
fillcolor=_CFG["fillcolor"]):
self._pensize = 1
self._shown = True
self._pencolor = pencolor
self._fillcolor = fillcolor
self._drawing = True
self._speed = 3
self._stretchfactor = (1., 1.)
self._shearfactor = 0.
self._tilt = 0.
self._shapetrafo = (1., 0., 0., 1.)
self._outlinewidth = 1
def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode)
def pensize(self, width=None):
"""Set or return the line thickness.
Aliases: pensize | width
Argument:
width -- positive number
Set the line thickness to width or return it. If resizemode is set
to "auto" and turtleshape is a polygon, that polygon is drawn with
the same line thickness. If no argument is given, current pensize
is returned.
Example (for a Turtle instance named turtle):
>>> turtle.pensize()
1
>>> turtle.pensize(10) # from here on lines of width 10 are drawn
"""
if width is None:
return self._pensize
self.pen(pensize=width)
def penup(self):
"""Pull the pen up -- no drawing when moving.
Aliases: penup | pu | up
No argument
Example (for a Turtle instance named turtle):
>>> turtle.penup()
"""
if not self._drawing:
return
self.pen(pendown=False)
def pendown(self):
"""Pull the pen down -- drawing when moving.
Aliases: pendown | pd | down
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.pendown()
"""
if self._drawing:
return
self.pen(pendown=True)
def isdown(self):
"""Return True if pen is down, False if it's up.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.penup()
>>> turtle.isdown()
False
>>> turtle.pendown()
>>> turtle.isdown()
True
"""
return self._drawing
def speed(self, speed=None):
""" Return or set the turtle's speed.
Optional argument:
speed -- an integer in the range 0..10 or a speedstring (see below)
Set the turtle's speed to an integer value in the range 0 .. 10.
If no argument is given: return current speed.
If input is a number greater than 10 or smaller than 0.5,
speed is set to 0.
Speedstrings are mapped to speedvalues in the following way:
'fastest' : 0
'fast' : 10
'normal' : 6
'slow' : 3
'slowest' : 1
speeds from 1 to 10 enforce increasingly faster animation of
line drawing and turtle turning.
Attention:
speed = 0 : *no* animation takes place. forward/back makes turtle jump
and likewise left/right make the turtle turn instantly.
Example (for a Turtle instance named turtle):
>>> turtle.speed(3)
"""
speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }
if speed is None:
return self._speed
if speed in speeds:
speed = speeds[speed]
elif 0.5 < speed < 10.5:
speed = int(round(speed))
else:
speed = 0
self.pen(speed=speed)
def color(self, *args):
"""Return or set the pencolor and fillcolor.
Arguments:
Several input formats are allowed.
They use 0, 1, 2, or 3 arguments as follows:
color()
Return the current pencolor and the current fillcolor
as a pair of color specification strings as are returned
by pencolor and fillcolor.
color(colorstring), color((r,g,b)), color(r,g,b)
inputs as in pencolor, set both, fillcolor and pencolor,
to the given value.
color(colorstring1, colorstring2),
color((r1,g1,b1), (r2,g2,b2))
equivalent to pencolor(colorstring1) and fillcolor(colorstring2)
and analogously, if the other input format is used.
If turtleshape is a polygon, outline and interior of that polygon
is drawn with the newly set colors.
For mor info see: pencolor, fillcolor
Example (for a Turtle instance named turtle):
>>> turtle.color('red', 'green')
>>> turtle.color()
('red', 'green')
>>> colormode(255)
>>> color((40, 80, 120), (160, 200, 240))
>>> color()
('#285078', '#a0c8f0')
"""
if args:
l = len(args)
if l == 1:
pcolor = fcolor = args[0]
elif l == 2:
pcolor, fcolor = args
elif l == 3:
pcolor = fcolor = args
pcolor = self._colorstr(pcolor)
fcolor = self._colorstr(fcolor)
self.pen(pencolor=pcolor, fillcolor=fcolor)
else:
return self._color(self._pencolor), self._color(self._fillcolor)
def pencolor(self, *args):
""" Return or set the pencolor.
Arguments:
Four input formats are allowed:
- pencolor()
Return the current pencolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- pencolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- pencolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- pencolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the outline of that polygon is drawn
with the newly set pencolor.
Example (for a Turtle instance named turtle):
>>> turtle.pencolor('brown')
>>> tup = (0.2, 0.8, 0.55)
>>> turtle.pencolor(tup)
>>> turtle.pencolor()
'#33cc8c'
"""
if args:
color = self._colorstr(args)
if color == self._pencolor:
return
self.pen(pencolor=color)
else:
return self._color(self._pencolor)
def fillcolor(self, *args):
""" Return or set the fillcolor.
Arguments:
Four input formats are allowed:
- fillcolor()
Return the current fillcolor as color specification string,
possibly in hex-number format (see example).
May be used as input to another color/pencolor/fillcolor call.
- fillcolor(colorstring)
s is a Tk color specification string, such as "red" or "yellow"
- fillcolor((r, g, b))
*a tuple* of r, g, and b, which represent, an RGB color,
and each of r, g, and b are in the range 0..colormode,
where colormode is either 1.0 or 255
- fillcolor(r, g, b)
r, g, and b represent an RGB color, and each of r, g, and b
are in the range 0..colormode
If turtleshape is a polygon, the interior of that polygon is drawn
with the newly set fillcolor.
Example (for a Turtle instance named turtle):
>>> turtle.fillcolor('violet')
>>> col = turtle.pencolor()
>>> turtle.fillcolor(col)
>>> turtle.fillcolor(0, .5, 0)
"""
if args:
color = self._colorstr(args)
if color == self._fillcolor:
return
self.pen(fillcolor=color)
else:
return self._color(self._fillcolor)
def showturtle(self):
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
self.pen(shown=True)
def hideturtle(self):
"""Makes the turtle invisible.
Aliases: hideturtle | ht
No argument.
It's a good idea to do this while you're in the
middle of a complicated drawing, because hiding
the turtle speeds up the drawing observably.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
"""
self.pen(shown=False)
def isvisible(self):
"""Return True if the Turtle is shown, False if it's hidden.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> print turtle.isvisible():
False
"""
return self._shown
def pen(self, pen=None, **pendict):
"""Return or set the pen's attributes.
Arguments:
pen -- a dictionary with some or all of the below listed keys.
**pendict -- one or more keyword-arguments with the below
listed keys as keywords.
Return or set the pen's attributes in a 'pen-dictionary'
with the following key/value pairs:
"shown" : True/False
"pendown" : True/False
"pencolor" : color-string or color-tuple
"fillcolor" : color-string or color-tuple
"pensize" : positive number
"speed" : number in range 0..10
"resizemode" : "auto" or "user" or "noresize"
"stretchfactor": (positive number, positive number)
"shearfactor": number
"outline" : positive number
"tilt" : number
This dictionary can be used as argument for a subsequent
pen()-call to restore the former pen-state. Moreover one
or more of these attributes can be provided as keyword-arguments.
This can be used to set several pen attributes in one statement.
Examples (for a Turtle instance named turtle):
>>> turtle.pen(fillcolor="black", pencolor="red", pensize=10)
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'black',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> penstate=turtle.pen()
>>> turtle.color("yellow","")
>>> turtle.penup()
>>> turtle.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'yellow', 'pendown': False, 'fillcolor': '',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
>>> p.pen(penstate, fillcolor="green")
>>> p.pen()
{'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1,
'pencolor': 'red', 'pendown': True, 'fillcolor': 'green',
'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0}
"""
_pd = {"shown" : self._shown,
"pendown" : self._drawing,
"pencolor" : self._pencolor,
"fillcolor" : self._fillcolor,
"pensize" : self._pensize,
"speed" : self._speed,
"resizemode" : self._resizemode,
"stretchfactor" : self._stretchfactor,
"shearfactor" : self._shearfactor,
"outline" : self._outlinewidth,
"tilt" : self._tilt
}
#console.log('pen')
if not (pen or pendict):
return _pd
if isinstance(pen, dict):
p = pen
else:
p = {}
p.update(pendict)
_p_buf = {}
for key in p:
_p_buf[key] = _pd[key]
if self.undobuffer:
self.undobuffer.push(("pen", _p_buf))
newLine = False
if "pendown" in p:
if self._drawing != p["pendown"]:
newLine = True
if "pencolor" in p:
if isinstance(p["pencolor"], tuple):
p["pencolor"] = self._colorstr((p["pencolor"],))
if self._pencolor != p["pencolor"]:
newLine = True
if "pensize" in p:
if self._pensize != p["pensize"]:
newLine = True
if newLine:
self._newLine()
if "pendown" in p:
self._drawing = p["pendown"]
if "pencolor" in p:
self._pencolor = p["pencolor"]
if "pensize" in p:
self._pensize = p["pensize"]
if "fillcolor" in p:
if isinstance(p["fillcolor"], tuple):
p["fillcolor"] = self._colorstr((p["fillcolor"],))
self._fillcolor = p["fillcolor"]
if "speed" in p:
self._speed = p["speed"]
if "resizemode" in p:
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
if isinstance(sf, (int, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "shearfactor" in p:
self._shearfactor = p["shearfactor"]
if "outline" in p:
self._outlinewidth = p["outline"]
if "shown" in p:
self._shown = p["shown"]
if "tilt" in p:
self._tilt = p["tilt"]
if "stretchfactor" in p or "tilt" in p or "shearfactor" in p:
scx, scy = self._stretchfactor
shf = self._shearfactor
sa, ca = math.sin(self._tilt), math.cos(self._tilt)
self._shapetrafo = ( scx*ca, scy*(shf*ca + sa),
-scx*sa, scy*(ca - shf*sa))
self._update()
## three dummy methods to be implemented by child class:
def _newLine(self, usePos = True):
"""dummy method - to be overwritten by child class"""
def _update(self, count=True, forced=False):
"""dummy method - to be overwritten by child class"""
def _color(self, args):
"""dummy method - to be overwritten by child class"""
def _colorstr(self, args):
"""dummy method - to be overwritten by child class"""
width = pensize
up = penup
pu = penup
pd = pendown
down = pendown
st = showturtle
ht = hideturtle
class _TurtleImage:
"""Helper class: Datatype to store Turtle attributes
"""
def __init__(self, screen, shapeIndex):
self.screen = screen
self._type = None
self._setshape(shapeIndex)
def _setshape(self, shapeIndex):
#console.log("_setshape", self._type)
screen = self.screen
self.shapeIndex = shapeIndex
if self._type == "polygon" == screen._shapes[shapeIndex]._type:
return
if self._type == "image" == screen._shapes[shapeIndex]._type:
return
if self._type in ["image", "polygon"]:
screen._delete(self._item)
elif self._type == "compound":
for item in self._item:
screen._delete(item)
self._type = screen._shapes[shapeIndex]._type
return
#console.log(self._type)
if self._type == "polygon":
self._item = screen._createpoly()
elif self._type == "image":
self._item = screen._createimage(screen._shapes["blank"]._data)
elif self._type == "compound":
self._item = [screen._createpoly() for item in
screen._shapes[shapeIndex]._data]
#console.log(self._item)
class RawTurtle(TPen, TNavigator):
"""Animation part of the RawTurtle.
Puts RawTurtle upon a TurtleScreen and provides tools for
its animation.
"""
screens = []
def __init__(self, canvas=None,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if isinstance(canvas, _Screen):
self.screen = canvas
elif isinstance(canvas, TurtleScreen):
if canvas not in RawTurtle.screens:
RawTurtle.screens.append(canvas)
self.screen = canvas
#elif isinstance(canvas, (ScrolledCanvas, Canvas)):
# for screen in RawTurtle.screens:
# if screen.cv == canvas:
# self.screen = screen
# break
# else:
# self.screen = TurtleScreen(canvas)
# RawTurtle.screens.append(self.screen)
else:
raise TurtleGraphicsError("bad canvas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
TPen.__init__(self)
screen._turtles.append(self)
#self.drawingLineItem = screen._createline()
self.turtle = _TurtleImage(screen, shape)
self._poly = None
self._creatingPoly = False
self._fillitem = self._fillpath = None
self._shown = visible
self._hidden_from_screen = False
#self.currentLineItem = screen._createline()
self.currentLine = [self._position]
#self.items = [] #[self.currentLineItem]
self.stampItems = []
self._undobuffersize = undobuffersize
self.undobuffer = None #Tbuffer(undobuffersize)
#self._update()
def reset(self):
"""Delete the turtle's drawings and restore its default values.
No argument.
Delete the turtle's drawings from the screen, re-center the turtle
and set variables to the default values.
Example (for a Turtle instance named turtle):
>>> turtle.position()
(0.00,-22.00)
>>> turtle.heading()
100.0
>>> turtle.reset()
>>> turtle.position()
(0.00,0.00)
>>> turtle.heading()
0.0
"""
TNavigator.reset(self)
TPen._reset(self)
self._clear()
self._drawturtle()
#self._update()
def setundobuffer(self, size):
"""Set or disable undobuffer.
Argument:
size -- an integer or None
If size is an integer an empty undobuffer of given size is installed.
Size gives the maximum number of turtle-actions that can be undone
by the undo() function.
If size is None, no undobuffer is present.
Example (for a Turtle instance named turtle):
>>> turtle.setundobuffer(42)
"""
if size is None:
self.undobuffer = None
else:
self.undobuffer = Tbuffer(size)
def undobufferentries(self):
"""Return count of entries in the undobuffer.
No argument.
Example (for a Turtle instance named turtle):
>>> while undobufferentries():
... undo()
"""
if self.undobuffer is None:
return 0
return self.undobuffer.nr_of_items()
def _clear(self):
"""Delete all of pen's drawings"""
self._fillitem = self._fillpath = None
#for item in self.items:
# self.screen._delete(item)
#self.currentLineItem = #self.screen._createline()
self.currentLine = []
if self._drawing:
self.currentLine.append(self._position)
#self.items = [self.currentLineItem]
self.clearstamps()
#self.setundobuffer(self._undobuffersize)
def clear(self):
"""Delete the turtle's drawings from the screen. Do not move turtle.
No arguments.
Delete the turtle's drawings from the screen. Do not move turtle.
State and position of the turtle as well as drawings of other
turtles are not affected.
Examples (for a Turtle instance named turtle):
>>> turtle.clear()
"""
self._clear()
#self._update()
#def _update_data(self):
# self.screen._incrementudc()
# if self.screen._updatecounter != 0:
# return
# if len(self.currentLine)>1:
# self.screen._drawline(self.currentLineItem, self.currentLine,
# self._pencolor, self._pensize)
def _update(self):
"""Perform a Turtle-data update.
"""
return
screen = self.screen
if screen._tracing == 0:
return
elif screen._tracing == 1:
#self._update_data()
self._drawturtle()
#screen._update() # TurtleScreenBase
#screen._delay(screen._delayvalue) # TurtleScreenBase
else:
#self._update_data()
if screen._updatecounter == 0:
for t in screen.turtles():
t._drawturtle()
#screen._update()
def _tracer(self, flag=None, delay=None):
"""Turns turtle animation on/off and set delay for update drawings.
Optional arguments:
n -- nonnegative integer
delay -- nonnegative integer
If n is given, only each n-th regular screen update is really performed.
(Can be used to accelerate the drawing of complex graphics.)
Second arguments sets delay value (see RawTurtle.delay())
Example (for a Turtle instance named turtle):
>>> turtle.tracer(8, 25)
>>> dist = 2
>>> for i in range(200):
... turtle.fd(dist)
... turtle.rt(90)
... dist += 2
"""
return self.screen.tracer(flag, delay)
def _color(self, args):
return self.screen._color(args)
def _colorstr(self, args):
return self.screen._colorstr(args)
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
if isinstance(args, str):
return args
try:
r, g, b = args
except:
raise TurtleGraphicsError("bad color arguments: %s" % str(args))
if self.screen._colormode == 1.0:
r, g, b = [round(255.0*x) for x in (r, g, b)]
if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)):
raise TurtleGraphicsError("bad color sequence: %s" % str(args))
return "#%02x%02x%02x" % (r, g, b)
def shape(self, name=None):
"""Set turtle shape to shape with given name / return current shapename.
Optional argument:
name -- a string, which is a valid shapename
Set turtle shape to shape with given name or, if name is not given,
return name of current shape.
Shape with name must exist in the TurtleScreen's shape dictionary.
Initially there are the following polygon shapes:
'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'.
To learn about how to deal with shapes see Screen-method register_shape.
Example (for a Turtle instance named turtle):
>>> turtle.shape()
'arrow'
>>> turtle.shape("turtle")
>>> turtle.shape()
'turtle'
"""
if name is None:
return self.turtle.shapeIndex
if not name in self.screen.getshapes():
raise TurtleGraphicsError("There is no shape named %s" % name)
self.turtle._setshape(name)
#self._update()
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
Optional arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
Return or set the pen's attributes x/y-stretchfactors and/or outline.
Set resizemode to "user".
If and only if resizemode is set to "user", the turtle will be displayed
stretched according to its stretchfactors:
stretch_wid is stretchfactor perpendicular to orientation
stretch_len is stretchfactor in direction of turtles orientation.
outline determines the width of the shapes's outline.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("user")
>>> turtle.shapesize(5, 5, 12)
>>> turtle.shapesize(outline=8)
"""
if stretch_wid is stretch_len is outline is None:
stretch_wid, stretch_len = self._stretchfactor
return stretch_wid, stretch_len, self._outlinewidth
if stretch_wid == 0 or stretch_len == 0:
raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero")
if stretch_wid is not None:
if stretch_len is None:
stretchfactor = stretch_wid, stretch_wid
else:
stretchfactor = stretch_wid, stretch_len
elif stretch_len is not None:
stretchfactor = self._stretchfactor[0], stretch_len
else:
stretchfactor = self._stretchfactor
if outline is None:
outline = self._outlinewidth
self.pen(resizemode="user",
stretchfactor=stretchfactor, outline=outline)
def shearfactor(self, shear=None):
"""Set or return the current shearfactor.
Optional argument: shear -- number, tangent of the shear angle
Shear the turtleshape according to the given shearfactor shear,
which is the tangent of the shear angle. DO NOT change the
turtle's heading (direction of movement).
If shear is not given: return the current shearfactor, i. e. the
tangent of the shear angle, by which lines parallel to the
heading of the turtle are sheared.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.shearfactor(0.5)
>>> turtle.shearfactor()
>>> 0.5
"""
if shear is None:
return self._shearfactor
self.pen(resizemode="user", shearfactor=shear)
def settiltangle(self, angle):
"""Rotate the turtleshape to point in the specified direction
Argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.settiltangle(45)
>>> stamp()
>>> turtle.fd(50)
>>> turtle.settiltangle(-45)
>>> stamp()
>>> turtle.fd(50)
"""
tilt = -angle * self._degreesPerAU * self._angleOrient
tilt = (tilt * math.pi / 180.0) % (2*math.pi)
self.pen(resizemode="user", tilt=tilt)
def tiltangle(self, angle=None):
"""Set or return the current tilt-angle.
Optional argument: angle -- number
Rotate the turtleshape to point in the direction specified by angle,
regardless of its current tilt-angle. DO NOT change the turtle's
heading (direction of movement).
If angle is not given: return the current tilt-angle, i. e. the angle
between the orientation of the turtleshape and the heading of the
turtle (its direction of movement).
Deprecated since Python 3.1
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(45)
>>> turtle.tiltangle()
"""
if angle is None:
tilt = -self._tilt * (180.0/math.pi) * self._angleOrient
return (tilt / self._degreesPerAU) % self._fullcircle
else:
self.settiltangle(angle)
def tilt(self, angle):
"""Rotate the turtleshape by angle.
Argument:
angle - a number
Rotate the turtleshape by angle from its current tilt-angle,
but do NOT change the turtle's heading (direction of movement).
Examples (for a Turtle instance named turtle):
>>> turtle.shape("circle")
>>> turtle.shapesize(5,2)
>>> turtle.tilt(30)
>>> turtle.fd(50)
>>> turtle.tilt(30)
>>> turtle.fd(50)
"""
self.settiltangle(angle + self.tiltangle())
def shapetransform(self, t11=None, t12=None, t21=None, t22=None):
"""Set or return the current transformation matrix of the turtle shape.
Optional arguments: t11, t12, t21, t22 -- numbers.
If none of the matrix elements are given, return the transformation
matrix.
Otherwise set the given elements and transform the turtleshape
according to the matrix consisting of first row t11, t12 and
second row t21, 22.
Modify stretchfactor, shearfactor and tiltangle according to the
given matrix.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapesize(4,2)
>>> turtle.shearfactor(-0.5)
>>> turtle.shapetransform()
(4.0, -1.0, -0.0, 2.0)
"""
#console.log("shapetransform")
if t11 is t12 is t21 is t22 is None:
return self._shapetrafo
m11, m12, m21, m22 = self._shapetrafo
if t11 is not None: m11 = t11
if t12 is not None: m12 = t12
if t21 is not None: m21 = t21
if t22 is not None: m22 = t22
if t11 * t22 - t12 * t21 == 0:
raise TurtleGraphicsError("Bad shape transform matrix: must not be singular")
self._shapetrafo = (m11, m12, m21, m22)
alfa = math.atan2(-m21, m11) % (2 * math.pi)
sa, ca = math.sin(alfa), math.cos(alfa)
a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22,
sa*m11 + ca*m21, sa*m12 + ca*m22)
self._stretchfactor = a11, a22
self._shearfactor = a12/a22
self._tilt = alfa
self._update()
def _polytrafo(self, poly):
"""Computes transformed polygon shapes from a shape
according to current position and heading.
"""
screen = self.screen
p0, p1 = self._position
e0, e1 = self._orient
e = Vec2D(e0, e1 * screen.yscale / screen.xscale)
e0, e1 = (1.0 / abs(e)) * e
return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale)
for (x, y) in poly]
def get_shapepoly(self):
"""Return the current shape polygon as tuple of coordinate pairs.
No argument.
Examples (for a Turtle instance named turtle):
>>> turtle.shape("square")
>>> turtle.shapetransform(4, -1, 0, 2)
>>> turtle.get_shapepoly()
((50, -20), (30, 20), (-50, 20), (-30, -20))
"""
shape = self.screen._shapes[self.turtle.shapeIndex]
if shape._type == "polygon":
return self._getshapepoly(shape._data, shape._type == "compound")
# else return None
def _getshapepoly(self, polygon, compound=False):
"""Calculate transformed shape polygon according to resizemode
and shapetransform.
"""
if self._resizemode == "user" or compound:
t11, t12, t21, t22 = self._shapetrafo
elif self._resizemode == "auto":
l = max(1, self._pensize/5.0)
t11, t12, t21, t22 = l, 0, 0, l
elif self._resizemode == "noresize":
return polygon
return tuple([(t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon])
def _drawturtle(self):
"""Manages the correct rendering of the turtle with respect to
its shape, resizemode, stretch and tilt etc."""
return
############################## stamp stuff ###############################
def stamp(self):
"""Stamp a copy of the turtleshape onto the canvas and return its id.
No argument.
Stamp a copy of the turtle shape onto the canvas at the current
turtle position. Return a stamp_id for that stamp, which can be
used to delete it by calling clearstamp(stamp_id).
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> turtle.stamp()
13
>>> turtle.fd(50)
"""
screen = self.screen
shape = screen._shapes[self.turtle.shapeIndex]
ttype = shape._type
tshape = shape._data
if ttype == "polygon":
stitem = screen._createpoly()
if self._resizemode == "noresize": w = 1
elif self._resizemode == "auto": w = self._pensize
else: w =self._outlinewidth
shape = self._polytrafo(self._getshapepoly(tshape))
fc, oc = self._fillcolor, self._pencolor
screen._drawpoly(stitem, shape, fill=fc, outline=oc,
width=w, top=True)
elif ttype == "image":
stitem = screen._createimage("")
screen._drawimage(stitem, self._position, tshape)
elif ttype == "compound":
stitem = []
for element in tshape:
item = screen._createpoly()
stitem.append(item)
stitem = tuple(stitem)
for item, (poly, fc, oc) in zip(stitem, tshape):
poly = self._polytrafo(self._getshapepoly(poly, True))
screen._drawpoly(item, poly, fill=self._cc(fc),
outline=self._cc(oc), width=self._outlinewidth, top=True)
self.stampItems.append(stitem)
self.undobuffer.push(("stamp", stitem))
return stitem
def _clearstamp(self, stampid):
"""does the work for clearstamp() and clearstamps()
"""
if stampid in self.stampItems:
if isinstance(stampid, tuple):
for subitem in stampid:
self.screen._delete(subitem)
else:
self.screen._delete(stampid)
self.stampItems.remove(stampid)
# Delete stampitem from undobuffer if necessary
# if clearstamp is called directly.
item = ("stamp", stampid)
buf = self.undobuffer
if item not in buf.buffer:
return
index = buf.buffer.index(item)
buf.buffer.remove(item)
if index <= buf.ptr:
buf.ptr = (buf.ptr - 1) % buf.bufsize
buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None])
def clearstamp(self, stampid):
"""Delete stamp with given stampid
Argument:
stampid - an integer, must be return value of previous stamp() call.
Example (for a Turtle instance named turtle):
>>> turtle.color("blue")
>>> astamp = turtle.stamp()
>>> turtle.fd(50)
>>> turtle.clearstamp(astamp)
"""
self._clearstamp(stampid)
self._update()
def clearstamps(self, n=None):
"""Delete all or first/last n of turtle's stamps.
Optional argument:
n -- an integer
If n is None, delete all of pen's stamps,
else if n > 0 delete first n stamps
else if n < 0 delete last n stamps.
Example (for a Turtle instance named turtle):
>>> for i in range(8):
... turtle.stamp(); turtle.fd(30)
...
>>> turtle.clearstamps(2)
>>> turtle.clearstamps(-2)
>>> turtle.clearstamps()
"""
if n is None:
toDelete = self.stampItems[:]
elif n >= 0:
toDelete = self.stampItems[:n]
else:
toDelete = self.stampItems[n:]
for item in toDelete:
self._clearstamp(item)
self._update()
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
if pen is down. All other methods for turtle movement depend
on this one.
"""
if self._speed and self.screen._tracing == 1:
if self._drawing:
#console.log('%s:%s:%s:%s:%s' % (self, start, end, self._pencolor,
# self._pensize))
self.screen._drawline(self, #please remove me eventually
(self._position, end),
self._pencolor, self._pensize, False)
if isinstance(self._fillpath, list):
self._fillpath.append(end)
###### vererbung!!!!!!!!!!!!!!!!!!!!!!
self._position = end
def _rotate(self, angle):
"""Turns pen clockwise by angle.
"""
#console.log('_rotate')
if self.undobuffer:
self.undobuffer.push(("rot", angle, self._degreesPerAU))
angle *= self._degreesPerAU
neworient = self._orient.rotate(angle)
tracing = self.screen._tracing
self._orient = neworient
#self._update()
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
#console.log('_newLine')
return
def filling(self):
"""Return fillstate (True if filling, False else).
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.begin_fill()
>>> if turtle.filling():
... turtle.pensize(5)
... else:
... turtle.pensize(3)
"""
return isinstance(self._fillpath, list)
def begin_fill(self):
"""Called just before drawing a shape to be filled.
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if not self.filling():
self._fillitem = self.screen._createpoly()
#self.items.append(self._fillitem)
self._fillpath = [self._position]
#self._newLine()
if self.undobuffer:
self.undobuffer.push(("beginfill", self._fillitem))
#self._update()
def end_fill(self):
"""Fill the shape drawn after the call begin_fill().
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.color("black", "red")
>>> turtle.begin_fill()
>>> turtle.circle(60)
>>> turtle.end_fill()
"""
if self.filling():
if len(self._fillpath) > 2:
self.screen._drawpoly(self._fillitem, self._fillpath,
fill=self._fillcolor)
if self.undobuffer:
self.undobuffer.push(("dofill", self._fillitem))
self._fillitem = self._fillpath = None
self._update()
def dot(self, size=None, *color):
"""Draw a dot with diameter size, using color.
Optional arguments:
size -- an integer >= 1 (if given)
color -- a colorstring or a numeric color tuple
Draw a circular dot with diameter size, using color.
If size is not given, the maximum of pensize+4 and 2*pensize is used.
Example (for a Turtle instance named turtle):
>>> turtle.dot()
>>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50)
"""
if not color:
if isinstance(size, (str, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
color = self._pencolor
if not size:
size = self._pensize + max(self._pensize, 4)
else:
if size is None:
size = self._pensize + max(self._pensize, 4)
color = self._colorstr(color)
if hasattr(self.screen, "_dot"):
item = self.screen._dot(self._position, size, color)
#self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("dot", item))
else:
pen = self.pen()
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
try:
if self.resizemode() == 'auto':
self.ht()
self.pendown()
self.pensize(size)
self.pencolor(color)
self.forward(0)
finally:
self.pen(pen)
if self.undobuffer:
self.undobuffer.cumulate = False
def _write(self, txt, align, font):
"""Performs the writing for write()
"""
item, end = self.screen._write(self._position, txt, align, font,
self._pencolor)
#self.items.append(item)
if self.undobuffer:
self.undobuffer.push(("wri", item))
return end
def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")):
"""Write text at the current turtle position.
Arguments:
arg -- info, which is to be written to the TurtleScreen
move (optional) -- True/False
align (optional) -- one of the strings "left", "center" or right"
font (optional) -- a triple (fontname, fontsize, fonttype)
Write text - the string representation of arg - at the current
turtle position according to align ("left", "center" or right")
and with the given font.
If move is True, the pen is moved to the bottom-right corner
of the text. By default, move is False.
Example (for a Turtle instance named turtle):
>>> turtle.write('Home = ', True, align="center")
>>> turtle.write((0,0), True)
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
end = self._write(str(arg), align.lower(), font)
if move:
x, y = self.pos()
self.setpos(end, y)
if self.undobuffer:
self.undobuffer.cumulate = False
def begin_poly(self):
"""Start recording the vertices of a polygon.
No argument.
Start recording the vertices of a polygon. Current turtle position
is first point of polygon.
Example (for a Turtle instance named turtle):
>>> turtle.begin_poly()
"""
self._poly = [self._position]
self._creatingPoly = True
def end_poly(self):
"""Stop recording the vertices of a polygon.
No argument.
Stop recording the vertices of a polygon. Current turtle position is
last point of polygon. This will be connected with the first point.
Example (for a Turtle instance named turtle):
>>> turtle.end_poly()
"""
self._creatingPoly = False
def get_poly(self):
"""Return the lastly recorded polygon.
No argument.
Example (for a Turtle instance named turtle):
>>> p = turtle.get_poly()
>>> turtle.register_shape("myFavouriteShape", p)
"""
## check if there is any poly?
if self._poly is not None:
return tuple(self._poly)
def getscreen(self):
"""Return the TurtleScreen object, the turtle is drawing on.
No argument.
Return the TurtleScreen object, the turtle is drawing on.
So TurtleScreen-methods can be called for that object.
Example (for a Turtle instance named turtle):
>>> ts = turtle.getscreen()
>>> ts
<turtle.TurtleScreen object at 0x0106B770>
>>> ts.bgcolor("pink")
"""
return self.screen
def getturtle(self):
"""Return the Turtleobject itself.
No argument.
Only reasonable use: as a function to return the 'anonymous turtle':
Example:
>>> pet = getturtle()
>>> pet.fd(50)
>>> pet
<turtle.Turtle object at 0x0187D810>
>>> turtles()
[<turtle.Turtle object at 0x0187D810>]
"""
return self
getpen = getturtle
################################################################
### screen oriented methods recurring to methods of TurtleScreen
################################################################
def _delay(self, delay=None):
"""Set delay value which determines speed of turtle animation.
"""
return self.screen.delay(delay)
turtlesize = shapesize
RawPen = RawTurtle
### Screen - Singleton ########################
def Screen():
"""Return the singleton screen object.
If none exists at the moment, create a new one and return it,
else return the existing one."""
if Turtle._screen is None:
Turtle._screen = _Screen()
return Turtle._screen
class _Screen(TurtleScreen):
_root = None
_canvas = None
_title = _CFG["title"]
def __init__(self):
# XXX there is no need for this code to be conditional,
# as there will be only a single _Screen instance, anyway
# XXX actually, the turtle demo is injecting root window,
# so perhaps the conditional creation of a root should be
# preserved (perhaps by passing it as an optional parameter)
if _Screen._root is None:
_Screen._root = self._root = _Root()
#self._root.title(_Screen._title)
#self._root.ondestroy(self._destroy)
if _Screen._canvas is None:
width = _CFG["width"]
height = _CFG["height"]
canvwidth = _CFG["canvwidth"]
canvheight = _CFG["canvheight"]
leftright = _CFG["leftright"]
topbottom = _CFG["topbottom"]
self._root.setupcanvas(width, height, canvwidth, canvheight)
_Screen._canvas = self._root._getcanvas()
TurtleScreen.__init__(self, _Screen._canvas)
self.setup(width, height, leftright, topbottom)
def end(self):
self._root.end()
def setup(self, width=_CFG["width"], height=_CFG["height"],
startx=_CFG["leftright"], starty=_CFG["topbottom"]):
""" Set the size and position of the main window.
Arguments:
width: as integer a size in pixels, as float a fraction of the screen.
Default is 50% of screen.
height: as integer the height in pixels, as float a fraction of the
screen. Default is 75% of screen.
startx: if positive, starting position in pixels from the left
edge of the screen, if negative from the right edge
Default, startx=None is to center window horizontally.
starty: if positive, starting position in pixels from the top
edge of the screen, if negative from the bottom edge
Default, starty=None is to center window vertically.
Examples (for a Screen instance named screen):
>>> screen.setup (width=200, height=200, startx=0, starty=0)
sets window to 200x200 pixels, in upper left of screen
>>> screen.setup(width=.75, height=0.5, startx=None, starty=None)
sets window to 75% of screen by 50% of screen and centers
"""
if not hasattr(self._root, "set_geometry"):
return
sw = self._root.win_width()
sh = self._root.win_height()
if isinstance(width, float) and 0 <= width <= 1:
width = sw*width
if startx is None:
startx = (sw - width) / 2
if isinstance(height, float) and 0 <= height <= 1:
height = sh*height
if starty is None:
starty = (sh - height) / 2
self._root.set_geometry(width, height, startx, starty)
self.update()
class Turtle(RawTurtle):
"""RawTurtle auto-creating (scrolled) canvas.
When a Turtle object is created or a function derived from some
Turtle method is called a TurtleScreen object is automatically created.
"""
_pen = None
_screen = None
def __init__(self,
shape=_CFG["shape"],
undobuffersize=_CFG["undobuffersize"],
visible=_CFG["visible"]):
if Turtle._screen is None:
Turtle._screen = Screen()
RawTurtle.__init__(self, Turtle._screen,
shape=shape,
undobuffersize=undobuffersize,
visible=visible)
Pen = Turtle
def _getpen():
"""Create the 'anonymous' turtle if not already present."""
if Turtle._pen is None:
Turtle._pen = Turtle()
return Turtle._pen
def _getscreen():
"""Create a TurtleScreen if not already present."""
if Turtle._screen is None:
Turtle._screen = Screen()
return Turtle._screen
if __name__ == "__main__":
def switchpen():
if isdown():
pu()
else:
pd()
def demo1():
"""Demo of old turtle.py - module"""
reset()
tracer(True)
up()
backward(100)
down()
# draw 3 squares; the last filled
width(3)
for i in range(3):
if i == 2:
begin_fill()
for _ in range(4):
forward(20)
left(90)
if i == 2:
color("maroon")
end_fill()
up()
forward(30)
down()
width(1)
color("black")
# move out of the way
tracer(False)
up()
right(90)
forward(100)
right(90)
forward(100)
right(180)
down()
# some text
write("startstart", 1)
write("start", 1)
color("red")
# staircase
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
# filled staircase
tracer(True)
begin_fill()
for i in range(5):
forward(20)
left(90)
forward(20)
right(90)
end_fill()
# more text
def demo2():
"""Demo of some new features."""
speed(1)
st()
pensize(3)
setheading(towards(0, 0))
radius = distance(0, 0)/2.0
rt(90)
for _ in range(18):
switchpen()
circle(radius, 10)
write("wait a moment...")
while undobufferentries():
undo()
reset()
lt(90)
colormode(255)
laenge = 10
pencolor("green")
pensize(3)
lt(180)
for i in range(-2, 16):
if i > 0:
begin_fill()
fillcolor(255-15*i, 0, 15*i)
for _ in range(3):
fd(laenge)
lt(120)
end_fill()
laenge += 10
lt(15)
speed((speed()+1)%12)
#end_fill()
lt(120)
pu()
fd(70)
rt(30)
pd()
color("red","yellow")
speed(0)
begin_fill()
for _ in range(4):
circle(50, 90)
rt(90)
fd(30)
rt(90)
end_fill()
lt(90)
pu()
fd(30)
pd()
shape("turtle")
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
turtle.resizemode("auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
turtle.speed(0)
turtle.up()
turtle.goto(280, 40)
turtle.lt(30)
turtle.down()
turtle.speed(6)
turtle.color("blue","orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
count = 1
while tri.distance(turtle) > 4:
turtle.fd(3.5)
turtle.lt(0.6)
tri.setheading(tri.towards(turtle))
tri.fd(4)
if count % 20 == 0:
turtle.stamp()
tri.stamp()
switchpen()
count += 1
tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
tri.pencolor("black")
tri.pencolor("red")
def baba(xdummy, ydummy):
clearscreen()
bye()
time.sleep(2)
while undobufferentries():
tri.undo()
turtle.undo()
tri.fd(50)
tri.write(" Click me!", font = ("Courier", 12, "bold") )
tri.onclick(baba, 1)
demo1()
demo2()
exitonclick()
|
gpl-3.0
|
singular78/shadowsocks
|
shadowsocks/shell.py
|
270
|
12676
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', 8388)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
|
apache-2.0
|
zubair-arbi/edx-platform
|
lms/djangoapps/django_comment_client/base/tests.py
|
1
|
70746
|
"""Tests for django comment client views."""
from contextlib import contextmanager
import logging
import json
import ddt
from django.conf import settings
from django.core.cache import get_cache
from django.test.client import Client, RequestFactory
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from request_cache.middleware import RequestCache
from mock import patch, ANY, Mock
from nose.tools import assert_true, assert_equal # pylint: disable=no-name-in-module
from opaque_keys.edx.keys import CourseKey
from lms.lib.comment_client import Thread
from common.test.utils import MockSignalHandlerMixin, disable_signal
from django_comment_client.base import views
from django_comment_client.tests.group_id import CohortedTopicGroupIdTestMixin, NonCohortedTopicGroupIdTestMixin, GroupIdAssertionMixin
from django_comment_client.tests.utils import CohortedTestCase
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_common.models import Role
from django_comment_common.utils import seed_permissions_roles, ThreadContext
from student.tests.factories import CourseEnrollmentFactory, UserFactory, CourseAccessRoleFactory
from teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import check_mongo_calls
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
log = logging.getLogger(__name__)
CS_PREFIX = "http://localhost:4567/api/v1"
# pylint: disable=missing-docstring
class MockRequestSetupMixin(object):
def _create_response_mock(self, data):
return Mock(text=json.dumps(data), json=Mock(return_value=data))
def _set_mock_request_data(self, mock_request, data):
mock_request.return_value = self._create_response_mock(data)
@patch('lms.lib.comment_client.utils.requests.request')
class CreateThreadGroupIdTestCase(
MockRequestSetupMixin,
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
self._set_mock_request_data(mock_request, {})
mock_request.return_value.status_code = 200
request_data = {"body": "body", "title": "title", "thread_type": "discussion"}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().post("dummy_url", request_data)
request.user = user
request.view_name = "create_thread"
return views.create_thread(
request,
course_id=unicode(self.course.id),
commentable_id=commentable_id
)
def test_group_info_in_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
None
)
self._assert_json_response_contains_group_info(response)
@patch('lms.lib.comment_client.utils.requests.request')
@disable_signal(views, 'thread_edited')
@disable_signal(views, 'thread_voted')
@disable_signal(views, 'thread_deleted')
class ThreadActionGroupIdTestCase(
MockRequestSetupMixin,
CohortedTestCase,
GroupIdAssertionMixin
):
def call_view(
self,
view_name,
mock_request,
user=None,
post_params=None,
view_args=None
):
self._set_mock_request_data(
mock_request,
{
"user_id": str(self.student.id),
"group_id": self.student_cohort.id,
"closed": False,
"type": "thread",
"commentable_id": "non_team_dummy_id"
}
)
mock_request.return_value.status_code = 200
request = RequestFactory().post("dummy_url", post_params or {})
request.user = user or self.student
request.view_name = view_name
return getattr(views, view_name)(
request,
course_id=unicode(self.course.id),
thread_id="dummy",
**(view_args or {})
)
def test_update(self, mock_request):
response = self.call_view(
"update_thread",
mock_request,
post_params={"body": "body", "title": "title"}
)
self._assert_json_response_contains_group_info(response)
def test_delete(self, mock_request):
response = self.call_view("delete_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_vote(self, mock_request):
response = self.call_view(
"vote_for_thread",
mock_request,
view_args={"value": "up"}
)
self._assert_json_response_contains_group_info(response)
response = self.call_view("undo_vote_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_flag(self, mock_request):
response = self.call_view("flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
response = self.call_view("un_flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_pin(self, mock_request):
response = self.call_view(
"pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
response = self.call_view(
"un_pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
def test_openclose(self, mock_request):
response = self.call_view(
"openclose_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(
response,
lambda d: d['content']
)
class ViewsTestCaseMixin(object):
"""
This class is used by both ViewsQueryCountTestCase and ViewsTestCase. By
breaking out set_up_course into its own method, ViewsQueryCountTestCase
can build a course in a particular modulestore, while ViewsTestCase can
just run it in setUp for all tests.
"""
def set_up_course(self, module_count=0):
"""
Creates a course, optionally with module_count discussion modules, and
a user with appropriate permissions.
"""
# create a course
self.course = CourseFactory.create(
org='MITx', course='999',
discussion_topics={"Some Topic": {"id": "some_topic"}},
display_name='Robot Super Course',
)
self.course_id = self.course.id
# add some discussion modules
for i in range(module_count):
ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='id_module_{}'.format(i),
discussion_category='Category {}'.format(i),
discussion_target='Discussion {}'.format(i)
)
# seed the forums permissions and roles
call_command('seed_permissions_roles', unicode(self.course_id))
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
self.password = 'test' # pylint: disable=attribute-defined-outside-init
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, self.password) # pylint: disable=attribute-defined-outside-init
self.student.is_active = True
self.student.save()
# Add a discussion moderator
self.moderator = UserFactory.create(password=self.password) # pylint: disable=attribute-defined-outside-init
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
# Enroll the moderator and give them the appropriate roles
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
self.client = Client()
assert_true(self.client.login(username='student', password=self.password))
def _setup_mock_request(self, mock_request, include_depth=False):
"""
Ensure that mock_request returns the data necessary to make views
function correctly
"""
mock_request.return_value.status_code = 200
data = {
"user_id": str(self.student.id),
"closed": False,
"commentable_id": "non_team_dummy_id"
}
if include_depth:
data["depth"] = 0
self._set_mock_request_data(mock_request, data)
def create_thread_helper(self, mock_request, extra_request_data=None, extra_response_data=None):
"""
Issues a request to create a thread and verifies the result.
"""
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"thread_type": "discussion",
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
thread = {
"thread_type": "discussion",
"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"],
}
if extra_request_data:
thread.update(extra_request_data)
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': unicode(self.course_id)})
response = self.client.post(url, data=thread)
assert_true(mock_request.called)
expected_data = {
'thread_type': 'discussion',
'body': u'this is a post',
'context': ThreadContext.COURSE,
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False,
'course_id': unicode(self.course_id),
}
if extra_response_data:
expected_data.update(extra_response_data)
mock_request.assert_called_with(
'post',
'{prefix}/i4x-MITx-999-course-Robot_Super_Course/threads'.format(prefix=CS_PREFIX),
data=expected_data,
params={'request_id': ANY},
headers=ANY,
timeout=5
)
assert_equal(response.status_code, 200)
def update_thread_helper(self, mock_request):
"""
Issues a request to update a thread and verifies the result.
"""
self._setup_mock_request(mock_request)
# Mock out saving in order to test that content is correctly
# updated. Otherwise, the call to thread.save() receives the
# same mocked request data that the original call to retrieve
# the thread did, overwriting any changes.
with patch.object(Thread, 'save'):
response = self.client.post(
reverse("update_thread", kwargs={
"thread_id": "dummy",
"course_id": unicode(self.course_id)
}),
data={"body": "foo", "title": "foo", "commentable_id": "some_topic"}
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['body'], 'foo')
self.assertEqual(data['title'], 'foo')
self.assertEqual(data['commentable_id'], 'some_topic')
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request')
@disable_signal(views, 'thread_created')
@disable_signal(views, 'thread_edited')
class ViewsQueryCountTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin, ViewsTestCaseMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewsQueryCountTestCase, self).setUp(create_user=False)
def clear_caches(self):
"""Clears caches so that query count numbers are accurate."""
for cache in settings.CACHES:
get_cache(cache).clear()
RequestCache.clear_request_cache()
def count_queries(func): # pylint: disable=no-self-argument
"""
Decorates test methods to count mongo and SQL calls for a
particular modulestore.
"""
def inner(self, default_store, module_count, mongo_calls, sql_queries, *args, **kwargs):
with modulestore().default_store(default_store):
self.set_up_course(module_count=module_count)
self.clear_caches()
with self.assertNumQueries(sql_queries):
with check_mongo_calls(mongo_calls):
func(self, *args, **kwargs)
return inner
@ddt.data(
(ModuleStoreEnum.Type.mongo, 3, 4, 22),
(ModuleStoreEnum.Type.mongo, 20, 4, 22),
(ModuleStoreEnum.Type.split, 3, 13, 22),
(ModuleStoreEnum.Type.split, 20, 13, 22),
)
@ddt.unpack
@count_queries
def test_create_thread(self, mock_request):
self.create_thread_helper(mock_request)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 3, 3, 16),
(ModuleStoreEnum.Type.mongo, 20, 3, 16),
(ModuleStoreEnum.Type.split, 3, 10, 16),
(ModuleStoreEnum.Type.split, 20, 10, 16),
)
@ddt.unpack
@count_queries
def test_update_thread(self, mock_request):
self.update_thread_helper(mock_request)
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request')
class ViewsTestCase(
UrlResetMixin,
ModuleStoreTestCase,
MockRequestSetupMixin,
ViewsTestCaseMixin,
MockSignalHandlerMixin
):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp(create_user=False)
self.set_up_course()
@contextmanager
def assert_discussion_signals(self, signal, user=None):
if user is None:
user = self.student
with self.assert_signal_sent(views, signal, sender=None, user=user, exclude_args=('post',)):
yield
def test_create_thread(self, mock_request):
with self.assert_discussion_signals('thread_created'):
self.create_thread_helper(mock_request)
def test_create_thread_standalone(self, mock_request):
team = CourseTeamFactory.create(
name="A Team",
course_id=self.course_id,
topic_id='topic_id',
discussion_topic_id="i4x-MITx-999-course-Robot_Super_Course"
)
# Add the student to the team so they can post to the commentable.
team.add_user(self.student)
# create_thread_helper verifies that extra data are passed through to the comments service
self.create_thread_helper(mock_request, extra_response_data={'context': ThreadContext.STANDALONE})
def test_delete_thread(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_thread_id = "test_thread_id"
request = RequestFactory().post("dummy_url", {"id": test_thread_id})
request.user = self.student
request.view_name = "delete_thread"
with self.assert_discussion_signals('thread_deleted'):
response = views.delete_thread(
request,
course_id=unicode(self.course.id),
thread_id=test_thread_id
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
def test_delete_comment(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_comment_id = "test_comment_id"
request = RequestFactory().post("dummy_url", {"id": test_comment_id})
request.user = self.student
request.view_name = "delete_comment"
with self.assert_discussion_signals('comment_deleted'):
response = views.delete_comment(
request,
course_id=unicode(self.course.id),
comment_id=test_comment_id
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
args = mock_request.call_args[0]
self.assertEqual(args[0], "delete")
self.assertTrue(args[1].endswith("/{}".format(test_comment_id)))
def _test_request_error(self, view_name, view_kwargs, data, mock_request):
"""
Submit a request against the given view with the given data and ensure
that the result is a 400 error and that no data was posted using
mock_request
"""
self._setup_mock_request(mock_request, include_depth=(view_name == "create_sub_comment"))
response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)
self.assertEqual(response.status_code, 400)
for call in mock_request.call_args_list:
self.assertEqual(call[0][0].lower(), "get")
def test_create_thread_no_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo"},
mock_request
)
def test_create_thread_empty_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo", "title": " "},
mock_request
)
def test_create_thread_no_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": unicode(self.course_id)},
{"title": "foo"},
mock_request
)
def test_create_thread_empty_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_no_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo"},
mock_request
)
def test_update_thread_empty_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo", "title": " "},
mock_request
)
def test_update_thread_no_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"title": "foo"},
mock_request
)
def test_update_thread_empty_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_course_topic(self, mock_request):
with self.assert_discussion_signals('thread_edited'):
self.update_thread_helper(mock_request)
@patch('django_comment_client.utils.get_discussion_categories_ids', return_value=["test_commentable"])
def test_update_thread_wrong_commentable_id(self, mock_get_discussion_id_map, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": "foo", "title": "foo", "commentable_id": "wrong_commentable"},
mock_request
)
def test_create_comment(self, mock_request):
self._setup_mock_request(mock_request)
with self.assert_discussion_signals('comment_created'):
response = self.client.post(
reverse(
"create_comment",
kwargs={"course_id": unicode(self.course_id), "thread_id": "dummy"}
),
data={"body": "body"}
)
self.assertEqual(response.status_code, 200)
def test_create_comment_no_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{},
mock_request
)
def test_create_comment_empty_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " "},
mock_request
)
def test_create_sub_comment_no_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": unicode(self.course_id)},
{},
mock_request
)
def test_create_sub_comment_empty_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " "},
mock_request
)
def test_update_comment_no_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": unicode(self.course_id)},
{},
mock_request
)
def test_update_comment_empty_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": unicode(self.course_id)},
{"body": " "},
mock_request
)
def test_update_comment_basic(self, mock_request):
self._setup_mock_request(mock_request)
comment_id = "test_comment_id"
updated_body = "updated body"
with self.assert_discussion_signals('comment_edited'):
response = self.client.post(
reverse(
"update_comment",
kwargs={"course_id": unicode(self.course_id), "comment_id": comment_id}
),
data={"body": updated_body}
)
self.assertEqual(response.status_code, 200)
mock_request.assert_called_with(
"put",
"{prefix}/comments/{comment_id}".format(prefix=CS_PREFIX, comment_id=comment_id),
headers=ANY,
params=ANY,
timeout=ANY,
data={"body": updated_body}
)
def test_flag_thread_open(self, mock_request):
self.flag_thread(mock_request, False)
def test_flag_thread_close(self, mock_request):
self.flag_thread(mock_request, True)
def flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1", "username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
url = reverse('flag_abuse_for_thread', kwargs={
'thread_id': '518d4237b023791dca00000d',
'course_id': unicode(self.course_id)
})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_thread_open(self, mock_request):
self.un_flag_thread(mock_request, False)
def test_un_flag_thread_close(self, mock_request):
self.un_flag_thread(mock_request, True)
def un_flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0
})
url = reverse('un_flag_abuse_for_thread', kwargs={
'thread_id': '518d4237b023791dca00000d',
'course_id': unicode(self.course_id)
})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_flag_comment_open(self, mock_request):
self.flag_comment(mock_request, False)
def test_flag_comment_close(self, mock_request):
self.flag_comment(mock_request, True)
def flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "comment",
"endorsed": False
})
url = reverse('flag_abuse_for_comment', kwargs={
'comment_id': '518d4237b023791dca00000d',
'course_id': unicode(self.course_id)
})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_comment_open(self, mock_request):
self.un_flag_comment(mock_request, False)
def test_un_flag_comment_close(self, mock_request):
self.un_flag_comment(mock_request, True)
def un_flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "comment",
"endorsed": False
})
url = reverse('un_flag_abuse_for_comment', kwargs={
'comment_id': '518d4237b023791dca00000d',
'course_id': unicode(self.course_id)
})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
@ddt.data(
('upvote_thread', 'thread_id', 'thread_voted'),
('upvote_comment', 'comment_id', 'comment_voted'),
('downvote_thread', 'thread_id', 'thread_voted'),
('downvote_comment', 'comment_id', 'comment_voted')
)
@ddt.unpack
def test_voting(self, view_name, item_id, signal, mock_request):
self._setup_mock_request(mock_request)
with self.assert_discussion_signals(signal):
response = self.client.post(
reverse(
view_name,
kwargs={item_id: 'dummy', 'course_id': unicode(self.course_id)}
)
)
self.assertEqual(response.status_code, 200)
def test_endorse_comment(self, mock_request):
self._setup_mock_request(mock_request)
self.client.login(username=self.moderator.username, password=self.password)
with self.assert_discussion_signals('comment_endorsed', user=self.moderator):
response = self.client.post(
reverse(
'endorse_comment',
kwargs={'comment_id': 'dummy', 'course_id': unicode(self.course_id)}
)
)
self.assertEqual(response.status_code, 200)
@patch("lms.lib.comment_client.utils.requests.request")
@disable_signal(views, 'comment_endorsed')
class ViewPermissionsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewPermissionsTestCase, self).setUp()
self.password = "test password"
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create(password=self.password)
self.moderator = UserFactory.create(password=self.password)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
def test_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_un_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_un_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def _set_mock_request_thread_and_comment(self, mock_request, thread_data, comment_data):
def handle_request(*args, **kwargs):
url = args[1]
if "/threads/" in url:
return self._create_response_mock(thread_data)
elif "/comments/" in url:
return self._create_response_mock(comment_data)
else:
raise ArgumentError("Bad url to mock request")
mock_request.side_effect = handle_request
def test_endorse_response_as_staff(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": unicode(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_endorse_response_as_student(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.moderator.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": unicode(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_endorse_response_as_student_question_author(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": unicode(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
class CreateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
super(CreateThreadUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request,):
"""
Test to make sure unicode data in a thread doesn't break it.
"""
self._set_mock_request_data(mock_request, {})
request = RequestFactory().post("dummy_url", {"thread_type": "discussion", "body": text, "title": text})
request.user = self.student
request.view_name = "create_thread"
response = views.create_thread(
request, course_id=unicode(self.course.id), commentable_id="non_team_dummy_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@disable_signal(views, 'thread_edited')
class UpdateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
super(UpdateThreadUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('django_comment_client.utils.get_discussion_categories_ids', return_value=["test_commentable"])
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request, mock_get_discussion_id_map):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text, "title": text, "thread_type": "question", "commentable_id": "test_commentable"})
request.user = self.student
request.view_name = "update_thread"
response = views.update_thread(request, course_id=unicode(self.course.id), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
self.assertEqual(mock_request.call_args[1]["data"]["thread_type"], "question")
self.assertEqual(mock_request.call_args[1]["data"]["commentable_id"], "test_commentable")
@disable_signal(views, 'comment_created')
class CreateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
super(CreateCommentUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
commentable_id = "non_team_dummy_id"
self._set_mock_request_data(mock_request, {
"closed": False,
"commentable_id": commentable_id
})
# We have to get clever here due to Thread's setters and getters.
# Patch won't work with it.
try:
Thread.commentable_id = commentable_id
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_comment"
response = views.create_comment(
request, course_id=unicode(self.course.id), thread_id="dummy_thread_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
finally:
del Thread.commentable_id
@disable_signal(views, 'comment_edited')
class UpdateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
super(UpdateCommentUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "update_comment"
response = views.update_comment(request, course_id=unicode(self.course.id), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@disable_signal(views, 'comment_created')
class CreateSubCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
"""
Make sure comments under a response can handle unicode.
"""
def setUp(self):
super(CreateSubCommentUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
"""
Create a comment with unicode in it.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
"thread_id": "test_thread",
"commentable_id": "non_team_dummy_id"
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_sub_comment"
Thread.commentable_id = "test_commentable"
try:
response = views.create_sub_comment(
request, course_id=unicode(self.course.id), comment_id="dummy_comment_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
finally:
del Thread.commentable_id
@ddt.ddt
@patch("lms.lib.comment_client.utils.requests.request")
@disable_signal(views, 'thread_voted')
@disable_signal(views, 'thread_edited')
@disable_signal(views, 'comment_created')
@disable_signal(views, 'comment_voted')
@disable_signal(views, 'comment_deleted')
class TeamsPermissionsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
# Most of the test points use the same ddt data.
# args: user, commentable_id, status_code
ddt_permissions_args = [
# Student in team can do operations on threads/comments within the team commentable.
('student_in_team', 'team_commentable_id', 200),
# Non-team commentables can be edited by any student.
('student_in_team', 'course_commentable_id', 200),
# Student not in team cannot do operations within the team commentable.
('student_not_in_team', 'team_commentable_id', 401),
# Non-team commentables can be edited by any student.
('student_not_in_team', 'course_commentable_id', 200),
# Moderators can always operator on threads within a team, regardless of team membership.
('moderator', 'team_commentable_id', 200)
]
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(TeamsPermissionsTestCase, self).setUp()
self.password = "test password"
teams_configuration = {
'topics': [{'id': "topic_id", 'name': 'Solar Power', 'description': 'Solar power is hot'}]
}
self.course = CourseFactory.create(teams_configuration=teams_configuration)
seed_permissions_roles(self.course.id)
# Create 3 users-- student in team, student not in team, discussion moderator
self.student_in_team = UserFactory.create(password=self.password)
self.student_not_in_team = UserFactory.create(password=self.password)
self.moderator = UserFactory.create(password=self.password)
CourseEnrollmentFactory(user=self.student_in_team, course_id=self.course.id)
CourseEnrollmentFactory(user=self.student_not_in_team, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
# Create a team.
self.team_commentable_id = "team_discussion_id"
self.team = CourseTeamFactory.create(
name=u'The Only Team',
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.team_commentable_id
)
self.team.add_user(self.student_in_team)
# Dummy commentable ID not linked to a team
self.course_commentable_id = "course_level_commentable"
def _setup_mock(self, user, mock_request, data):
user = getattr(self, user)
self._set_mock_request_data(mock_request, data)
self.client.login(username=user.username, password=self.password)
@ddt.data(
# student_in_team will be able to update his own post, regardless of team membership
('student_in_team', 'student_in_team', 'team_commentable_id', 200),
('student_in_team', 'student_in_team', 'course_commentable_id', 200),
# students can only update their own posts
('student_in_team', 'moderator', 'team_commentable_id', 401),
# Even though student_not_in_team is not in the team, he can still modify posts he created while in the team.
('student_not_in_team', 'student_not_in_team', 'team_commentable_id', 200),
# Moderators can change their own posts and other people's posts.
('moderator', 'moderator', 'team_commentable_id', 200),
('moderator', 'student_in_team', 'team_commentable_id', 200),
)
@ddt.unpack
def test_update_thread(self, user, thread_author, commentable_id, status_code, mock_request):
"""
Verify that update_thread is limited to thread authors and privileged users (team membership does not matter).
"""
commentable_id = getattr(self, commentable_id)
# thread_author is who is marked as the author of the thread being updated.
thread_author = getattr(self, thread_author)
self._setup_mock(
user, mock_request, # user is the person making the request.
{
"user_id": str(thread_author.id),
"closed": False, "commentable_id": commentable_id,
"context": "standalone"
}
)
response = self.client.post(
reverse(
"update_thread",
kwargs={
"course_id": unicode(self.course.id),
"thread_id": "dummy"
}
),
data={"body": "foo", "title": "foo", "commentable_id": commentable_id}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(
# Students can delete their own posts
('student_in_team', 'student_in_team', 'team_commentable_id', 200),
# Moderators can delete any post
('moderator', 'student_in_team', 'team_commentable_id', 200),
# Others cannot delete posts
('student_in_team', 'moderator', 'team_commentable_id', 401),
('student_not_in_team', 'student_in_team', 'team_commentable_id', 401)
)
@ddt.unpack
def test_delete_comment(self, user, comment_author, commentable_id, status_code, mock_request):
commentable_id = getattr(self, commentable_id)
comment_author = getattr(self, comment_author)
self._setup_mock(user, mock_request, {
"closed": False,
"commentable_id": commentable_id,
"user_id": str(comment_author.id)
})
response = self.client.post(
reverse(
"delete_comment",
kwargs={
"course_id": unicode(self.course.id),
"comment_id": "dummy"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_create_comment(self, user, commentable_id, status_code, mock_request):
"""
Verify that create_comment is limited to members of the team or users with 'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(user, mock_request, {"closed": False, "commentable_id": commentable_id})
response = self.client.post(
reverse(
"create_comment",
kwargs={
"course_id": unicode(self.course.id),
"thread_id": "dummy"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_create_sub_comment(self, user, commentable_id, status_code, mock_request):
"""
Verify that create_subcomment is limited to members of the team or users with 'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id, "thread_id": "dummy_thread"},
)
response = self.client.post(
reverse(
"create_sub_comment",
kwargs={
"course_id": unicode(self.course.id),
"comment_id": "dummy_comment"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_comment_actions(self, user, commentable_id, status_code, mock_request):
"""
Verify that voting and flagging of comments is limited to members of the team or users with
'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id, "thread_id": "dummy_thread"},
)
for action in ["upvote_comment", "downvote_comment", "un_flag_abuse_for_comment", "flag_abuse_for_comment"]:
response = self.client.post(
reverse(
action,
kwargs={"course_id": unicode(self.course.id), "comment_id": "dummy_comment"}
)
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_threads_actions(self, user, commentable_id, status_code, mock_request):
"""
Verify that voting, flagging, and following of threads is limited to members of the team or users with
'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id},
)
for action in ["upvote_thread", "downvote_thread", "un_flag_abuse_for_thread", "flag_abuse_for_thread",
"follow_thread", "unfollow_thread"]:
response = self.client.post(
reverse(
action,
kwargs={"course_id": unicode(self.course.id), "thread_id": "dummy_thread"}
)
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_create_thread(self, user, commentable_id, status_code, __):
"""
Verify that creation of threads is limited to members of the team or users with 'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
# mock_request is not used because Commentables don't exist in comment service.
self.client.login(username=getattr(self, user).username, password=self.password)
response = self.client.post(
reverse(
"create_thread",
kwargs={"course_id": unicode(self.course.id), "commentable_id": commentable_id}
),
data={"body": "foo", "title": "foo", "thread_type": "discussion"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_commentable_actions(self, user, commentable_id, status_code, __):
"""
Verify that following of commentables is limited to members of the team or users with
'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
# mock_request is not used because Commentables don't exist in comment service.
self.client.login(username=getattr(self, user).username, password=self.password)
for action in ["follow_commentable", "unfollow_commentable"]:
response = self.client.post(
reverse(
action,
kwargs={"course_id": unicode(self.course.id), "commentable_id": commentable_id}
)
)
self.assertEqual(response.status_code, status_code)
TEAM_COMMENTABLE_ID = 'test-team-discussion'
@disable_signal(views, 'comment_created')
@ddt.ddt
class ForumEventTestCase(ModuleStoreTestCase, MockRequestSetupMixin):
"""
Forum actions are expected to launch analytics events. Test these here.
"""
def setUp(self):
super(ForumEventTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.student.roles.add(Role.objects.get(name="Student", course_id=self.course.id))
CourseAccessRoleFactory(course_id=self.course.id, user=self.student, role='Wizard')
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request')
def test_thread_event(self, __, mock_emit):
request = RequestFactory().post(
"dummy_url", {
"thread_type": "discussion",
"body": "Test text",
"title": "Test",
"auto_subscribe": True
}
)
request.user = self.student
request.view_name = "create_thread"
views.create_thread(request, course_id=unicode(self.course.id), commentable_id="test_commentable")
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.thread.created')
self.assertEqual(event['body'], 'Test text')
self.assertEqual(event['title'], 'Test')
self.assertEqual(event['commentable_id'], 'test_commentable')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['options']['followed'], True)
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['anonymous'], False)
self.assertEqual(event['group_id'], None)
self.assertEqual(event['thread_type'], 'discussion')
self.assertEquals(event['anonymous_to_peers'], False)
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request')
def test_response_event(self, mock_request, mock_emit):
"""
Check to make sure an event is fired when a user responds to a thread.
"""
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"closed": False,
"commentable_id": 'test_commentable_id',
'thread_id': 'test_thread_id',
})
request = RequestFactory().post("dummy_url", {"body": "Test comment", 'auto_subscribe': True})
request.user = self.student
request.view_name = "create_comment"
views.create_comment(request, course_id=unicode(self.course.id), thread_id='test_thread_id')
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.response.created')
self.assertEqual(event['body'], "Test comment")
self.assertEqual(event['commentable_id'], 'test_commentable_id')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['discussion']['id'], 'test_thread_id')
self.assertEqual(event['options']['followed'], True)
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request')
def test_comment_event(self, mock_request, mock_emit):
"""
Ensure an event is fired when someone comments on a response.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
"thread_id": "test_thread_id",
"commentable_id": "test_commentable_id",
"parent_id": "test_response_id"
})
request = RequestFactory().post("dummy_url", {"body": "Another comment"})
request.user = self.student
request.view_name = "create_sub_comment"
views.create_sub_comment(request, course_id=unicode(self.course.id), comment_id="dummy_comment_id")
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.comment.created")
self.assertEqual(event['body'], 'Another comment')
self.assertEqual(event['discussion']['id'], 'test_thread_id')
self.assertEqual(event['response']['id'], 'test_response_id')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['options']['followed'], False)
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request')
@ddt.data((
'create_thread',
'edx.forum.thread.created', {
'thread_type': 'discussion',
'body': 'Test text',
'title': 'Test',
'auto_subscribe': True
},
{'commentable_id': TEAM_COMMENTABLE_ID}
), (
'create_comment',
'edx.forum.response.created',
{'body': 'Test comment', 'auto_subscribe': True},
{'thread_id': 'test_thread_id'}
), (
'create_sub_comment',
'edx.forum.comment.created',
{'body': 'Another comment'},
{'comment_id': 'dummy_comment_id'}
))
@ddt.unpack
def test_team_events(self, view_name, event_name, view_data, view_kwargs, mock_request, mock_emit):
user = self.student
team = CourseTeamFactory.create(discussion_topic_id=TEAM_COMMENTABLE_ID)
CourseTeamMembershipFactory.create(team=team, user=user)
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
'closed': False,
'commentable_id': TEAM_COMMENTABLE_ID,
'thread_id': 'test_thread_id',
})
request = RequestFactory().post('dummy_url', view_data)
request.user = user
request.view_name = view_name
getattr(views, view_name)(request, course_id=unicode(self.course.id), **view_kwargs)
name, event = mock_emit.call_args[0]
self.assertEqual(name, event_name)
self.assertEqual(event['team_id'], team.team_id)
@ddt.data(
('vote_for_thread', 'thread_id', 'thread'),
('undo_vote_for_thread', 'thread_id', 'thread'),
('vote_for_comment', 'comment_id', 'response'),
('undo_vote_for_comment', 'comment_id', 'response'),
)
@ddt.unpack
@patch('eventtracking.tracker.emit')
@patch('lms.lib.comment_client.utils.requests.request')
def test_thread_voted_event(self, view_name, obj_id_name, obj_type, mock_request, mock_emit):
undo = view_name.startswith('undo')
self._set_mock_request_data(mock_request, {
'closed': False,
'commentable_id': 'test_commentable_id',
'username': 'gumprecht',
})
request = RequestFactory().post('dummy_url', {})
request.user = self.student
request.view_name = view_name
view_function = getattr(views, view_name)
kwargs = dict(course_id=unicode(self.course.id))
kwargs[obj_id_name] = obj_id_name
if not undo:
kwargs.update(value='up')
view_function(request, **kwargs)
self.assertTrue(mock_emit.called)
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.{}.voted'.format(obj_type))
self.assertEqual(event['target_username'], 'gumprecht')
self.assertEqual(event['undo_vote'], undo)
self.assertEqual(event['vote_value'], 'up')
class UsersEndpointTestCase(ModuleStoreTestCase, MockRequestSetupMixin):
def set_post_counts(self, mock_request, threads_count=1, comments_count=1):
"""
sets up a mock response from the comments service for getting post counts for our other_user
"""
self._set_mock_request_data(mock_request, {
"threads_count": threads_count,
"comments_count": comments_count,
})
def setUp(self):
super(UsersEndpointTestCase, self).setUp()
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
self.enrollment = CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.other_user = UserFactory.create(username="other")
CourseEnrollmentFactory(user=self.other_user, course_id=self.course.id)
def make_request(self, method='get', course_id=None, **kwargs):
course_id = course_id or self.course.id
request = getattr(RequestFactory(), method)("dummy_url", kwargs)
request.user = self.student
request.view_name = "users"
return views.users(request, course_id=course_id.to_deprecated_string())
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_exact_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)["users"],
[{"id": self.other_user.id, "username": self.other_user.username}]
)
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_no_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="othor")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
def test_requires_GET(self):
response = self.make_request(method='post', username="other")
self.assertEqual(response.status_code, 405)
def test_requires_username_param(self):
response = self.make_request()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_course_does_not_exist(self):
course_id = CourseKey.from_string("does/not/exist")
response = self.make_request(course_id=course_id, username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_requires_requestor_enrolled_in_course(self):
# unenroll self.student from the course.
self.enrollment.delete()
response = self.make_request(username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
@patch('lms.lib.comment_client.utils.requests.request')
def test_requires_matched_user_has_forum_content(self, mock_request):
self.set_post_counts(mock_request, 0, 0)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
|
agpl-3.0
|
ilay09/keystone
|
keystone/resource/routers.py
|
3
|
4658
|
# Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI Routers for the Resource service."""
from keystone.common import json_home
from keystone.common import router
from keystone.common import wsgi
from keystone.resource import controllers
class Admin(wsgi.ComposableRouter):
def add_routes(self, mapper):
# Tenant Operations
tenant_controller = controllers.Tenant()
mapper.connect('/tenants',
controller=tenant_controller,
action='get_all_projects',
conditions=dict(method=['GET']))
mapper.connect('/tenants/{tenant_id}',
controller=tenant_controller,
action='get_project',
conditions=dict(method=['GET']))
class Routers(wsgi.RoutersBase):
def append_v3_routers(self, mapper, routers):
routers.append(
router.Router(controllers.DomainV3(),
'domains', 'domain',
resource_descriptions=self.v3_resources))
config_controller = controllers.DomainConfigV3()
self._add_resource(
mapper, config_controller,
path='/domains/{domain_id}/config',
get_head_action='get_domain_config',
put_action='create_domain_config',
patch_action='update_domain_config_only',
delete_action='delete_domain_config',
rel=json_home.build_v3_resource_relation('domain_config'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID
})
config_group_param = (
json_home.build_v3_parameter_relation('config_group'))
self._add_resource(
mapper, config_controller,
path='/domains/{domain_id}/config/{group}',
get_head_action='get_domain_config_wrapper',
patch_action='update_domain_config_group',
delete_action='delete_domain_config',
rel=json_home.build_v3_resource_relation('domain_config_group'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'group': config_group_param
})
self._add_resource(
mapper, config_controller,
path='/domains/{domain_id}/config/{group}/{option}',
get_head_action='get_domain_config_wrapper',
patch_action='update_domain_config',
delete_action='delete_domain_config',
rel=json_home.build_v3_resource_relation('domain_config_option'),
path_vars={
'domain_id': json_home.Parameters.DOMAIN_ID,
'group': config_group_param,
'option': json_home.build_v3_parameter_relation(
'config_option')
})
self._add_resource(
mapper, config_controller,
path='/domains/config/default',
get_action='get_domain_config_default',
rel=json_home.build_v3_resource_relation('domain_config_default'))
self._add_resource(
mapper, config_controller,
path='/domains/config/{group}/default',
get_action='get_domain_config_default',
rel=json_home.build_v3_resource_relation(
'domain_config_default_group'),
path_vars={
'group': config_group_param
})
self._add_resource(
mapper, config_controller,
path='/domains/config/{group}/{option}/default',
get_action='get_domain_config_default',
rel=json_home.build_v3_resource_relation(
'domain_config_default_option'),
path_vars={
'group': config_group_param,
'option': json_home.build_v3_parameter_relation(
'config_option')
})
routers.append(
router.Router(controllers.ProjectV3(),
'projects', 'project',
resource_descriptions=self.v3_resources))
|
apache-2.0
|
yavuzovski/playground
|
python/Udacity/cs215/find_eulerian_tour.py
|
1
|
1219
|
def find_eulerian_tour(graph):
# find the node with biggest degree
biggest_degree, biggest_node = 0, None
for i, node in enumerate(graph):
for e in node:
count = 0
outer_graph = graph[:]
for inner_node in outer_graph:
if e in inner_node:
count += 1
if count > biggest_degree:
biggest_degree = count
biggest_node = e
# set the starting point
result = []
for i, node in enumerate(graph):
if biggest_node == node[0]:
result = [node[0], node[1]]
current_node = node[1]
graph.pop(i)
break
# find the eulerian tour
i = 0
while i < len(graph):
if current_node == graph[i][0] or current_node == graph[i][1]:
current_node = (graph[i][1] if current_node == graph[i][0] else graph[i][0])
result.append(current_node)
graph.pop(i)
i = 0
else:
i += 1
return result
print(find_eulerian_tour(
[
(0, 1), (1, 5), (1, 7), (4, 5),
(4, 8), (1, 6), (3, 7), (5, 9),
(2, 4), (0, 4), (2, 5), (3, 6), (8, 9)
]
))
|
gpl-3.0
|
diplomacy/research
|
diplomacy_research/models/layers/noisy_networks.py
|
1
|
4039
|
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Noisy Networks
- Converts variables in a graph to their noisy equivalent
"""
from math import sqrt
import sys
assert 'tensorflow' in sys.modules, 'You need to import TF before importing this module.'
from diplomacy_research.utils.tensorflow import tf
from diplomacy_research.utils.tensorflow import graph_editor
def convert_to_noisy_variables(variables, activation=None):
""" Converts a list of variables to noisy variables
:param variables: A list of variables to make noisy
:param activation: Optional. The activation function to use on the linear noisy transformation
:return: Nothing, but modifies the graph in-place
Reference: 1706.10295 - Noisy Networks for exploration
"""
if tf.get_collection(tf.GraphKeys.TRAIN_OP):
raise RuntimeError('You must call convert_to_noisy_variables before applying an optimizer on the graph.')
graph = tf.get_default_graph()
if not isinstance(variables, list):
variables = list(variables)
# Replacing each variable
for variable in variables:
variable_read_op = _get_variable_read_op(variable, graph)
variable_outputs = _get_variable_outputs(variable_read_op, graph)
variable_scope = variable.name.split(':')[0]
variable_shape = variable.shape.as_list()
fan_in = variable_shape[0]
# Creating noisy variables
with tf.variable_scope(variable_scope + '_noisy'):
with tf.device(variable.device):
s_init = tf.constant_initializer(0.5 / sqrt(fan_in))
noisy_u = tf.identity(variable, name='mu')
noisy_s = tf.get_variable(name='sigma',
shape=variable.shape,
dtype=tf.float32,
initializer=s_init,
caching_device=variable._caching_device) # pylint: disable=protected-access
noise = tf.random.normal(shape=variable_shape)
replaced_var = noisy_u + noisy_s * noise
replaced_var = activation(replaced_var) if activation else replaced_var
# Replacing in-place
inputs_index = [var_index for var_index, var_input in enumerate(graph_editor.sgv(*variable_outputs).inputs)
if var_input.name.split(':')[0] == variable_read_op.name.split(':')[0]]
graph_editor.connect(graph_editor.sgv(replaced_var.op),
graph_editor.sgv(*variable_outputs).remap_inputs(inputs_index),
disconnect_first=True)
def _get_variable_read_op(variable, graph):
""" Returns the /read operation for a variable """
return graph.get_operation_by_name(variable.name.split(':')[0] + '/read')
def _get_variable_outputs(variable_read_op, graph):
""" Returns the list of tensors that have the variable as input """
outputs = []
for graph_op in graph.get_operations():
for var_input in graph_op.inputs._inputs: # pylint: disable=protected-access
if var_input in variable_read_op.outputs:
outputs += [graph_op]
return outputs
|
mit
|
techtonik/warehouse
|
warehouse/packaging/urls.py
|
3
|
1076
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from werkzeug.routing import Rule, EndpointPrefix
urls = [
EndpointPrefix("warehouse.packaging.views.", [
Rule(
"/project/<project_name>/",
methods=["GET"],
endpoint="project_detail",
),
Rule(
"/project/<project_name>/<version>/",
methods=["GET"],
endpoint="project_detail",
),
]),
]
|
apache-2.0
|
ksrajkumar/openerp-6.1
|
openerp/addons/l10n_fr/wizard/fr_report_compute_resultant.py
|
7
|
2284
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv, fields
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return { 'type': 'ir.actions.report.xml', 'report_name': 'l10n.fr.compute_resultant', 'datas': data}
account_cdr_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
FarnazH/horton
|
setup.py
|
4
|
17908
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import ConfigParser
import distutils.ccompiler
from distutils.command.install_data import install_data
from distutils.command.install_headers import install_headers
from distutils.core import setup
from distutils.extension import Extension
from glob import glob
import json
import os
import platform
import subprocess
import sys
import numpy as np
from Cython.Distutils import build_ext
# Distutils optimizations
# -----------------------
def parallelCCompile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None,
depends=None):
"""Monkey-patch for distutils compiler to run in parallel."""
# these lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
N = 2 # number of parallel compilations
import multiprocessing.pool
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
list(multiprocessing.pool.ThreadPool(N).imap(_single_compile, objects))
return objects
distutils.ccompiler.CCompiler.compile = parallelCCompile
# Utility functions
# -----------------
def get_sources(dirname):
'''Get all cpp files and the cext.pyx file of a package'''
# avoid accidental inclusion of in-place build files and inc files
result = [fn for fn in glob('%s/*.cpp' % dirname)
if not (('ext.cpp' in fn) or ('_inc.cpp' in fn))]
result.append('%s/cext.pyx' % dirname)
return result
def get_depends(dirname):
'''Get all files that should trigger a recompilation of the C extension of a package'''
result = glob('%s/*.h' % dirname)
result += glob('%s/*.pxd' % dirname)
return result
def get_headers():
'''Get all header-like files that need to be installed'''
result = []
for dn in ['horton/'] + glob('horton/*/'):
result.extend(glob('%s/*.h' % dn))
return result
class my_install_data(install_data):
"""Add a datadir.txt file that points to the root for the data files. It is
otherwise impossible to figure out the location of these data files at
runtime.
"""
def run(self):
# Do the normal install_data
install_data.run(self)
# Create the file datadir.txt. It's exact content is only known
# at installation time. By default, it is the installation prefix
# passed to setup.py, but one can override it using the env var
# INSTALL_DIR, which may be useful for packaging, or any other
# situation where the installed files are moved to a new location
# afterwards.
my_install_dir = os.getenv("INSTALL_DIR", self.install_dir)
# Loop over all packages in this project and write the data_dir.txt
# file only in the main package. Usualy, there is only one that matters.
dist = self.distribution
libdir = dist.command_obj["install_lib"].install_dir
for name in dist.packages:
# If a package contains a dot, e.g. horton.test, then don't write
# the file data_dir.txt.
if '.' not in name:
destination = os.path.join(libdir, name, "data_dir.txt")
print "install_dir={}".format(my_install_dir)
print "Creating {}".format(destination)
if not self.dry_run:
with open(destination, "w") as f:
print >> f, my_install_dir
class my_install_headers(install_headers):
def run(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
dest = os.path.join(os.path.dirname(self.install_dir), header)
dest_dn = os.path.dirname(dest)
if not os.path.isdir(dest_dn):
self.mkpath(dest_dn)
(out, _) = self.copy_file(header, dest)
self.outfiles.append(out)
# Library configuration functions
# -------------------------------
lib_config_keys = ['include_dirs', 'library_dirs', 'libraries', 'extra_objects',
'extra_compile_args', 'extra_link_args']
def print_lib_config(heading, lib_config):
'''Print (partial) lib_config'''
print ' %s' % heading
if len(lib_config) == 0:
print ' -'
else:
for key, value in sorted(lib_config.iteritems()):
if len(value) > 0:
print ' %s: %s' % (key, value)
def get_lib_config_setup(prefix, fn_setup_cfg):
'''Get library configuration from a setup.cfg'''
lib_config = {}
if os.path.isfile(fn_setup_cfg):
config = ConfigParser.ConfigParser()
config.read(fn_setup_cfg)
if config.has_section(prefix):
for key in lib_config_keys:
if config.has_option(prefix, key):
value = config.get(prefix, key).strip()
if value is not None and len(value) > 0:
lib_config[key] = value.split(':')
print_lib_config('From %s' % fn_setup_cfg, lib_config)
else:
print ' File %s not found. Skipping.' % fn_setup_cfg
return lib_config
def get_lib_config_env(prefix):
'''Read library config from the environment variables'''
lib_config = {}
for key in lib_config_keys:
varname = ('%s_%s' % (prefix, key)).upper()
value = os.getenv(varname)
if value is not None:
lib_config[key] = value.split(':')
print_lib_config('From environment variables', lib_config)
return lib_config
class PkgConfigError(Exception):
pass
def run_pkg_config(libname, option):
'''Safely try to call pkg-config'''
try:
return subprocess.check_output(['pkg-config', libname, '--' + option],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
raise PkgConfigError('pkg-config did not exit properly')
except OSError:
raise PkgConfigError('pkg-config not installed')
def get_lib_config_pkg(libname):
'''Get library config from the pkg-config program'''
lib_config = {
'include_dirs': [word[2:] for word in run_pkg_config(libname, 'cflags-only-I').split()],
'library_dirs': [word[2:] for word in run_pkg_config(libname, 'libs-only-L').split()],
'libraries': [word[2:] for word in run_pkg_config(libname, 'libs-only-l').split()],
'extra_compile_args': run_pkg_config(libname, 'cflags-only-other').split(),
'extra_link_args': run_pkg_config(libname, 'libs-only-other').split(),
}
print_lib_config('From pkg-config', lib_config)
return lib_config
def all_empty(lib_config):
'''Test if all lib_config fields are empty'''
if len(lib_config) == 0:
return True
return all(len(value) == 0 for value in lib_config.itervalues())
def all_exist(lib_config):
'''Test if all paths in the lib_config exist'''
for key, value in lib_config.iteritems():
for path in value:
if not os.path.exists(path):
return False
return True
def lib_config_magic(prefix, libname, static_config={}, known_include_dirs=[]):
'''Detect the configuration of a given library
Parameters
----------
prefix : str
The prefix for this library. This is a name that HORTON uses to refer to the
library.
libname : str
The library name as it is known to the compiler and to pkg-config. For example, if
the shared object is libfoo.so, then the library name is foo.
static_config : dict
If given, this static library configuration is attempted. Ignored when empty, or
when it contains non-existing files.
known_include_dirs : list of str
When all other methods of finding the library settings fail, the first existing
directory in this list is added to the include path. This is useful when header
files are commonly installed in a place that is not considered by default by most
compilers.
'''
print '%s Configuration' % prefix.upper()
# Start out empty
lib_config = dict((key, []) for key in lib_config_keys)
# Update with info from setup.cfg
lib_config.update(get_lib_config_setup(prefix, 'setup.cfg'))
# Override with environment variables
lib_config.update(get_lib_config_env(prefix))
# If no environment variables were set, attempt to use the static config.
if all_empty(lib_config):
if all_empty(static_config):
print ' No static config available for this library'
elif not all_exist(static_config):
print_lib_config('Static lib not found in ${QAWORKDIR}', static_config)
else:
# If the static build is present, use it.
print_lib_config('Static lib config in ${QAWORKDIR}', static_config)
lib_config.update(static_config)
# If also the static config did not work, try pkg-config
if all_empty(lib_config):
try:
# Try to get dynamic link info from pkg-config
lib_config.update(get_lib_config_pkg(libname))
except PkgConfigError:
print ' pkg-config failed.'
# Uber-dumb fallback. It works most of the times.
if all_empty(lib_config):
lib_config['libraries'] = [libname]
for include_dir in known_include_dirs:
if os.path.isdir(include_dir):
lib_config['include_dirs'] = [include_dir]
break
print_lib_config('Last resort fallback plan', lib_config)
print_lib_config('Final', lib_config)
return lib_config
# Print the Machine name on screen
# --------------------------------
print 'PLATFORM={}'.format(platform.platform())
# Load dependency information
# ---------------------------
with open('dependencies.json') as f:
dependencies = json.load(f)
# Order does not matter here. Just make it easy to look things up
dependencies = dict((d['name'], d) for d in dependencies)
# Locate ${QAWORKDIR}
# -------------------
qaworkdir = os.getenv('QAWORKDIR')
if qaworkdir is None:
qaworkdir = 'qaworkdir'
# Configuration of LibXC
# ----------------------
# Static build info in the QAWORKDIR:
libxc_dir = '%s/cached/libxc-%s' % (qaworkdir, str(dependencies['libxc']['version_ci']))
libxc_static_config = {
'extra_objects': ['%s/lib/libxc.a' % libxc_dir],
'include_dirs': ['%s/include' % libxc_dir],
}
# Common include dirs that are not considered by the compiler by default:
known_libxc_include_dirs = ['/opt/local/include']
# Detect the configuration for LibXC
libxc_config = lib_config_magic(
'libxc', 'xc', libxc_static_config, known_libxc_include_dirs)
# Configuration of LibInt2
# ------------------------
# Static build info in the QAWORKDIR:
libint2_dir = '%s/cached/libint-%s' % (qaworkdir, str(dependencies['libint']['version_ci']))
libint2_static_config = {
'extra_objects': ['%s/lib/libint2.a' % libint2_dir],
'include_dirs': ['%s/include/libint2' % libint2_dir],
}
# Common include dirs that are not considered by the compiler by default:
known_libint2_include_dirs = ['/usr/include/libint2', '/opt/local/include/libint2']
libint2_config = lib_config_magic(
'libint2', 'int2', libint2_static_config, known_libint2_include_dirs)
# Print versions of (almost) all dependencies
# -------------------------------------------
print 'Version of dependencies:'
for name, dependency in sorted(dependencies.iteritems()):
version_command = dependency.get('version_command')
if version_command is not None:
try:
version_info = subprocess.check_output(
dependency['version_command'], shell=True,
stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError:
version_info = '-- not found --'
print '{:>20}: {}'.format(name, version_info)
# Define extension modules
# ------------------------
ext_modules = [
Extension(
"horton.cext",
sources=get_sources('horton'),
depends=get_depends('horton'),
include_dirs=[np.get_include(), '.'],
extra_compile_args=['-std=c++11'],
language="c++"),
Extension(
"horton.gbasis.cext",
sources=get_sources('horton/gbasis') + ['horton/moments.cpp'],
depends=get_depends('horton/gbasis') + ['horton/moments.pxd', 'horton/moments.h'],
include_dirs=[np.get_include(), '.'] +
libint2_config['include_dirs'],
library_dirs=libint2_config['library_dirs'],
libraries=libint2_config['libraries'],
extra_objects=libint2_config['extra_objects'],
extra_compile_args=libint2_config['extra_compile_args'] +
['-std=c++11'],
extra_link_args=libint2_config['extra_link_args'],
language="c++"),
Extension(
"horton.grid.cext",
sources=get_sources('horton/grid') + [
'horton/cell.cpp',
'horton/moments.cpp'],
depends=get_depends('horton/grid') + [
'horton/cell.pxd', 'horton/cell.h',
'horton/moments.pxd', 'horton/moments.h'],
include_dirs=[np.get_include(), '.'],
extra_compile_args=['-std=c++11'],
language="c++", ),
Extension(
"horton.meanfield.cext",
sources=get_sources('horton/meanfield'),
depends=get_depends('horton/meanfield'),
include_dirs=[np.get_include(), '.'] + libxc_config['include_dirs'],
library_dirs=libxc_config['library_dirs'],
libraries=libxc_config['libraries'],
extra_objects=libxc_config['extra_objects'],
extra_compile_args=libxc_config['extra_compile_args'] + ['-std=c++11'],
extra_link_args=libxc_config['extra_link_args'],
language="c++"),
Extension(
"horton.espfit.cext",
sources=get_sources('horton/espfit') + [
'horton/cell.cpp',
'horton/grid/uniform.cpp'],
depends=get_depends('horton/espfit') + [
'horton/cell.pxd', 'horton/cell.h',
'horton/grid/uniform.pxd', 'horton/grid/uniform.h'],
include_dirs=[np.get_include(), '.'],
extra_compile_args=['-std=c++11'],
language="c++"),
]
for e in ext_modules:
e.cython_directives = {"embedsignature": True}
# Call distutils setup
# --------------------
setup(
name='horton',
version='2.1.0',
description='HORTON: Helpful Open-source Research TOol for N-fermion systems.',
author='Toon Verstraelen',
author_email='Toon.Verstraelen@UGent.be',
url='http://theochem.github.com/horton/',
scripts=glob("scripts/*.py"),
package_dir={'horton': 'horton'},
packages=[
'horton', 'horton.test',
'horton.espfit', 'horton.espfit.test',
'horton.gbasis', 'horton.gbasis.test',
'horton.grid', 'horton.grid.test',
'horton.io', 'horton.io.test',
'horton.meanfield', 'horton.meanfield.test',
'horton.part', 'horton.part.test',
'horton.scripts', 'horton.scripts.test',
'horton.modelhamiltonians', 'horton.modelhamiltonians.test'],
cmdclass={
'build_ext': build_ext,
'install_data': my_install_data,
'install_headers': my_install_headers,
},
data_files=[
('share/horton', glob('data/*.*')),
('share/horton/test', glob('data/test/*.*')),
('share/horton/basis', glob('data/basis/*.*')),
('share/horton/grids', glob('data/grids/*.txt')),
('share/horton/refatoms', glob('data/refatoms/*.h5')),
] + [
('share/horton/examples/%s' % os.path.basename(dn[:-1]),
glob('%s/*.py' % dn) + glob('%s/README' % dn))
for dn in glob('data/examples/*/')
] + [
('include/horton', glob('horton/*.h')),
('include/horton/grid', glob('horton/grid/*.h')),
('include/horton/gbasis', glob('horton/gbasis/*.h')),
('include/horton/espfit', glob('horton/espfit/*.h')),
],
package_data={
'horton': ['*.pxd'],
'horton.espfit': ['*.pxd'],
'horton.gbasis': ['*.pxd'],
'horton.grid': ['*.pxd'],
},
ext_modules=ext_modules,
headers=get_headers(),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Cython',
'Programming Language :: C++',
'Topic :: Science/Engineering :: Molecular Science'
],
)
|
gpl-3.0
|
khertan/ownNotes
|
python/requests/packages/charade/big5prober.py
|
206
|
1726
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
|
gpl-3.0
|
vmturbo/nova
|
nova/tests/unit/virt/ironic/test_client_wrapper.py
|
2
|
6018
|
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient import client as ironic_client
from ironicclient import exc as ironic_exception
import keystoneauth1.session
import mock
from oslo_config import cfg
import nova.conf
from nova import exception
from nova import test
from nova.tests.unit.virt.ironic import utils as ironic_utils
from nova.virt.ironic import client_wrapper
CONF = nova.conf.CONF
FAKE_CLIENT = ironic_utils.FakeClient()
def get_new_fake_client(*args, **kwargs):
return ironic_utils.FakeClient()
class IronicClientWrapperTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicClientWrapperTestCase, self).setUp()
self.ironicclient = client_wrapper.IronicClientWrapper()
# Do not waste time sleeping
cfg.CONF.set_override('api_retry_interval', 0, 'ironic')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_good_no_args(self, mock_get_client, mock_multi_getattr):
mock_get_client.return_value = FAKE_CLIENT
self.ironicclient.call("node.list")
mock_get_client.assert_called_once_with(retry_on_conflict=True)
mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
mock_multi_getattr.return_value.assert_called_once_with()
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_good_with_args(self, mock_get_client, mock_multi_getattr):
mock_get_client.return_value = FAKE_CLIENT
self.ironicclient.call("node.list", 'test', associated=True)
mock_get_client.assert_called_once_with(retry_on_conflict=True)
mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
mock_multi_getattr.return_value.assert_called_once_with(
'test', associated=True)
@mock.patch.object(keystoneauth1.session, 'Session')
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_session(self, mock_ir_cli, mock_session):
"""An Ironicclient is called with a keystoneauth1 Session"""
mock_session.return_value = 'session'
ironicclient = client_wrapper.IronicClientWrapper()
# dummy call to have _get_client() called
ironicclient.call("node.list")
expected = {'session': 'session',
'max_retries': CONF.ironic.api_max_retries,
'retry_interval': CONF.ironic.api_retry_interval,
'os_ironic_api_version': '1.29',
'ironic_url': None}
mock_ir_cli.assert_called_once_with(1, **expected)
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_fail_exception(self, mock_get_client, mock_multi_getattr):
test_obj = mock.Mock()
test_obj.side_effect = ironic_exception.HTTPNotFound
mock_multi_getattr.return_value = test_obj
mock_get_client.return_value = FAKE_CLIENT
self.assertRaises(ironic_exception.HTTPNotFound,
self.ironicclient.call, "node.list")
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_unauthorized(self, mock_get_client):
mock_get_client.side_effect = ironic_exception.Unauthorized
self.assertRaises(exception.NovaException,
self.ironicclient._get_client)
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_unexpected_exception(self, mock_get_client):
mock_get_client.side_effect = ironic_exception.ConnectionRefused
self.assertRaises(ironic_exception.ConnectionRefused,
self.ironicclient._get_client)
def test__multi_getattr_good(self):
response = self.ironicclient._multi_getattr(FAKE_CLIENT, "node.list")
self.assertEqual(FAKE_CLIENT.node.list, response)
def test__multi_getattr_fail(self):
self.assertRaises(AttributeError, self.ironicclient._multi_getattr,
FAKE_CLIENT, "nonexistent")
@mock.patch.object(ironic_client, 'get_client')
def test__client_is_cached(self, mock_get_client):
mock_get_client.side_effect = get_new_fake_client
ironicclient = client_wrapper.IronicClientWrapper()
first_client = ironicclient._get_client()
second_client = ironicclient._get_client()
self.assertEqual(id(first_client), id(second_client))
@mock.patch.object(ironic_client, 'get_client')
def test__invalidate_cached_client(self, mock_get_client):
mock_get_client.side_effect = get_new_fake_client
ironicclient = client_wrapper.IronicClientWrapper()
first_client = ironicclient._get_client()
ironicclient._invalidate_cached_client()
second_client = ironicclient._get_client()
self.assertNotEqual(id(first_client), id(second_client))
@mock.patch.object(ironic_client, 'get_client')
def test_call_uses_cached_client(self, mock_get_client):
mock_get_client.side_effect = get_new_fake_client
ironicclient = client_wrapper.IronicClientWrapper()
for n in range(0, 4):
ironicclient.call("node.list")
self.assertEqual(1, mock_get_client.call_count)
|
apache-2.0
|
sudheesh001/oh-mainline
|
vendor/packages/sphinx/sphinx/search/__init__.py
|
15
|
10995
|
# -*- coding: utf-8 -*-
"""
sphinx.search
~~~~~~~~~~~~~
Create a full-text search index for offline search.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import with_statement
import re
import itertools
import cPickle as pickle
from docutils.nodes import comment, title, Text, NodeVisitor, SkipNode
from sphinx.util import jsdump, rpartition
class SearchLanguage(object):
"""
This class is the base class for search natural language preprocessors. If
you want to add support for a new language, you should override the methods
of this class.
You should override `lang` class property too (e.g. 'en', 'fr' and so on).
.. attribute:: stopwords
This is a set of stop words of the target language. Default `stopwords`
is empty. This word is used for building index and embedded in JS.
.. attribute:: js_stemmer_code
Return stemmer class of JavaScript version. This class' name should be
``Stemmer`` and this class must have ``stemWord`` method. This string is
embedded as-is in searchtools.js.
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
lang = None
stopwords = set()
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
*/
var Stemmer = function() {
this.stemWord = function(w) {
return w;
}
}
"""
_word_re = re.compile(r'\w+(?u)')
def __init__(self, options):
self.options = options
self.init(options)
def init(self, options):
"""
Initialize the class with the options the user has given.
"""
def split(self, input):
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
languages.
"""
return self._word_re.findall(input)
def stem(self, word):
"""
This method implements stemming algorithm of the Python version.
Default implementation does nothing. You should implement this if the
language has any stemming rules.
This class is used to preprocess search words before registering them in
the search index. The stemming of the Python version and the JS version
(given in the js_stemmer_code attribute) must be compatible.
"""
return word
def word_filter(self, word):
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
"""
return not (((len(word) < 3) and (12353 < ord(word[0]) < 12436)) or
(ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
word.isdigit())))
from sphinx.search import en, ja
languages = {
'en': en.SearchEnglish,
'ja': ja.SearchJapanese,
}
class _JavaScriptIndex(object):
"""
The search index as javascript file that calls a function
on the documentation search object to register the index.
"""
PREFIX = 'Search.setIndex('
SUFFIX = ')'
def dumps(self, data):
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
data = s[len(self.PREFIX):-len(self.SUFFIX)]
if not data or not s.startswith(self.PREFIX) or not \
s.endswith(self.SUFFIX):
raise ValueError('invalid data')
return jsdump.loads(data)
def dump(self, data, f):
f.write(self.dumps(data))
def load(self, f):
return self.loads(f.read())
js_index = _JavaScriptIndex()
class WordCollector(NodeVisitor):
"""
A special visitor that collects words for the `IndexBuilder`.
"""
def __init__(self, document, lang):
NodeVisitor.__init__(self, document)
self.found_words = []
self.found_title_words = []
self.lang = lang
def dispatch_visit(self, node):
if node.__class__ is comment:
raise SkipNode
elif node.__class__ is Text:
self.found_words.extend(self.lang.split(node.astext()))
elif node.__class__ is title:
self.found_title_words.extend(self.lang.split(node.astext()))
class IndexBuilder(object):
"""
Helper class that creates a searchindex based on the doctrees
passed to the `feed` method.
"""
formats = {
'jsdump': jsdump,
'pickle': pickle
}
def __init__(self, env, lang, options, scoring):
self.env = env
# filename -> title
self._titles = {}
# stemmed word -> set(filenames)
self._mapping = {}
# stemmed words in titles -> set(filenames)
self._title_mapping = {}
# word -> stemmed word
self._stem_cache = {}
# objtype -> index
self._objtypes = {}
# objtype index -> (domain, type, objname (localized))
self._objnames = {}
# add language-specific SearchLanguage instance
self.lang = languages[lang](options)
if scoring:
with open(scoring, 'rb') as fp:
self.js_scorer_code = fp.read().decode('utf-8')
else:
self.js_scorer_code = u''
def load(self, stream, format):
"""Reconstruct from frozen data."""
if isinstance(format, basestring):
format = self.formats[format]
frozen = format.load(stream)
# if an old index is present, we treat it as not existing.
if not isinstance(frozen, dict) or \
frozen.get('envversion') != self.env.version:
raise ValueError('old format')
index2fn = frozen['filenames']
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
rv = {}
for k, v in mapping.iteritems():
if isinstance(v, int):
rv[k] = set([index2fn[v]])
else:
rv[k] = set(index2fn[i] for i in v)
return rv
self._mapping = load_terms(frozen['terms'])
self._title_mapping = load_terms(frozen['titleterms'])
# no need to load keywords/objtypes
def dump(self, stream, format):
"""Dump the frozen index to a stream."""
if isinstance(format, basestring):
format = self.formats[format]
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
rv = {}
otypes = self._objtypes
onames = self._objnames
for domainname, domain in self.env.domains.iteritems():
for fullname, dispname, type, docname, anchor, prio in \
domain.get_objects():
# XXX use dispname?
if docname not in fn2index:
continue
if prio < 0:
continue
prefix, name = rpartition(fullname, '.')
pdict = rv.setdefault(prefix, {})
try:
typeindex = otypes[domainname, type]
except KeyError:
typeindex = len(otypes)
otypes[domainname, type] = typeindex
otype = domain.object_types.get(type)
if otype:
# use unicode() to fire translation proxies
onames[typeindex] = (domainname, type,
unicode(domain.get_type_name(otype)))
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
shortanchor = ''
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
shortanchor = anchor
pdict[name] = (fn2index[docname], typeindex, prio, shortanchor)
return rv
def get_terms(self, fn2index):
rvs = {}, {}
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in mapping.iteritems():
if len(v) == 1:
fn, = v
if fn in fn2index:
rv[k] = fn2index[fn]
else:
rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
return rvs
def freeze(self):
"""Create a usable data structure for serializing."""
filenames = self._titles.keys()
titles = self._titles.values()
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
terms, title_terms = self.get_terms(fn2index)
objects = self.get_objects(fn2index) # populates _objtypes
objtypes = dict((v, k[0] + ':' + k[1])
for (k, v) in self._objtypes.iteritems())
objnames = self._objnames
return dict(filenames=filenames, titles=titles, terms=terms,
objects=objects, objtypes=objtypes, objnames=objnames,
titleterms=title_terms, envversion=self.env.version)
def prune(self, filenames):
"""Remove data for all filenames not in the list."""
new_titles = {}
for filename in filenames:
if filename in self._titles:
new_titles[filename] = self._titles[filename]
self._titles = new_titles
for wordnames in self._mapping.itervalues():
wordnames.intersection_update(filenames)
for wordnames in self._title_mapping.itervalues():
wordnames.intersection_update(filenames)
def feed(self, filename, title, doctree):
"""Feed a doctree to the index."""
self._titles[filename] = title
visitor = WordCollector(doctree, self.lang)
doctree.walk(visitor)
# memoize self.lang.stem
def stem(word):
try:
return self._stem_cache[word]
except KeyError:
self._stem_cache[word] = self.lang.stem(word)
return self._stem_cache[word]
_filter = self.lang.word_filter
for word in itertools.chain(visitor.found_title_words,
self.lang.split(title)):
word = stem(word)
if _filter(word):
self._title_mapping.setdefault(word, set()).add(filename)
for word in visitor.found_words:
word = stem(word)
if word not in self._title_mapping and _filter(word):
self._mapping.setdefault(word, set()).add(filename)
def context_for_searchtool(self):
return dict(
search_language_stemming_code = self.lang.js_stemmer_code,
search_language_stop_words =
jsdump.dumps(sorted(self.lang.stopwords)),
search_scorer_tool = self.js_scorer_code,
)
|
agpl-3.0
|
plang85/rough_surfaces
|
rough_surfaces/surface.py
|
1
|
2746
|
import numpy as np
class Surface(np.ndarray):
"""
One- or two-dimensional surface height representation.
The assumption upon which this framework is based is a uniform lattice size in both directions.
This is tightly integrated here. 'Surface' is the fundamental class that most modules build
upon. It usually represents the model or computational domain, as it may discretize either,
individual and composite surfaces, i.e., rough surfaces and aperture fields.
Standard initialization is from two-dimensional ndarray and lattice size:
>>> import numpy as np
>>> N, dxy = 100, 0.1
>>> h = np.zeros((N,N))
>>> s = Surface(h, dxy)
>>> length(s) # egde length in x-direction
10.0
>>> length(s, 1) # egde length in y-direction
10.0
Surfaces can also be one-dimensional, e.g., represent traces or cross-sections:
>>> import numpy as np
>>> N, dxy = 100, 0.1
>>> h = np.zeros((N))
>>> s = Surface(h, dxy)
>>> length(s) # length
10.0
>>> length(s, 1) # there is no second axis for one-dimensional surfaces
Traceback (most recent call last):
...
IndexError: tuple index out of range
"""
def __new__(cls, input_array, dxy):
obj = np.asarray(input_array).view(cls)
obj.dxy = float(dxy)
return obj
def __array_finalize__(self, obj):
if obj is None:
self.dxy = getattr(obj, 'dxy', None)
def rms(surface):
""""Returns root-mean-square roughness [L]."""
return np.sqrt(np.mean(surface**2))
def length(surface, axis=0):
""""Returns length [L] of surface in x- or y-direction, for axis=0 and 1, respectively."""
return surface.shape[axis] * surface.dxy
def nominal_area(surface):
""""Returns length() [L] for 1D, area [L^2] for 2D."""
a = 1.0
for i in range(len(surface.shape)):
a *= length(surface)
return a
def shift_to_zero_mean(surface):
""""Returns shifted surface such that <h> = 0."""
return Surface(surface - np.mean(surface), surface.dxy)
def mean_aperture(surface):
""""Composite surface assumption: mean of difference field to highest point."""
return np.mean(np.abs(np.subtract(surface, np.max(surface))))
def pore_volume(surface):
""""Composite surface assumption: mean aperture times area (2D-->[L^3]) or length (1D-->[L^2])."""
return mean_aperture(surface) * nominal_area(surface)
def scale_to_rms(surface, rms_target):
"""
Scales height to fit target property, which must be name of scalar returning method.
"""
rms_current = rms(surface)
return Surface(surface * (rms_target / rms_current), surface.dxy)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
mit
|
bryx-inc/boto
|
boto/configservice/__init__.py
|
107
|
1672
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS Config service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.configservice.layer1 import ConfigServiceConnection
return get_regions('configservice', connection_cls=ConfigServiceConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
mit
|
amyvmiwei/kbengine
|
kbe/src/lib/python/Lib/test/test_importlib/test_abc.py
|
10
|
33823
|
import contextlib
import inspect
import io
import marshal
import os
import sys
from test import support
import types
import unittest
from unittest import mock
import warnings
from . import util
frozen_init, source_init = util.import_importlib('importlib')
frozen_abc, source_abc = util.import_importlib('importlib.abc')
machinery = util.import_importlib('importlib.machinery')
frozen_util, source_util = util.import_importlib('importlib.util')
##### Inheritance ##############################################################
class InheritanceTests:
"""Test that the specified class is a subclass/superclass of the expected
classes."""
subclasses = []
superclasses = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.superclasses = [getattr(self.abc, class_name)
for class_name in self.superclass_names]
if hasattr(self, 'subclass_names'):
# Because test.support.import_fresh_module() creates a new
# importlib._bootstrap per module, inheritance checks fail when
# checking across module boundaries (i.e. the _bootstrap in abc is
# not the same as the one in machinery). That means stealing one of
# the modules from the other to make sure the same instance is used.
self.subclasses = [getattr(self.abc.machinery, class_name)
for class_name in self.subclass_names]
assert self.subclasses or self.superclasses, self.__class__
testing = self.__class__.__name__.partition('_')[2]
self.__test = getattr(self.abc, testing)
def test_subclasses(self):
# Test that the expected subclasses inherit.
for subclass in self.subclasses:
self.assertTrue(issubclass(subclass, self.__test),
"{0} is not a subclass of {1}".format(subclass, self.__test))
def test_superclasses(self):
# Test that the class inherits from the expected superclasses.
for superclass in self.superclasses:
self.assertTrue(issubclass(self.__test, superclass),
"{0} is not a superclass of {1}".format(superclass, self.__test))
def create_inheritance_tests(base_class):
def set_frozen(ns):
ns['abc'] = frozen_abc
def set_source(ns):
ns['abc'] = source_abc
classes = []
for prefix, ns_set in [('Frozen', set_frozen), ('Source', set_source)]:
classes.append(types.new_class('_'.join([prefix, base_class.__name__]),
(base_class, unittest.TestCase),
exec_body=ns_set))
return classes
class MetaPathFinder(InheritanceTests):
superclass_names = ['Finder']
subclass_names = ['BuiltinImporter', 'FrozenImporter', 'PathFinder',
'WindowsRegistryFinder']
tests = create_inheritance_tests(MetaPathFinder)
Frozen_MetaPathFinderInheritanceTests, Source_MetaPathFinderInheritanceTests = tests
class PathEntryFinder(InheritanceTests):
superclass_names = ['Finder']
subclass_names = ['FileFinder']
tests = create_inheritance_tests(PathEntryFinder)
Frozen_PathEntryFinderInheritanceTests, Source_PathEntryFinderInheritanceTests = tests
class ResourceLoader(InheritanceTests):
superclass_names = ['Loader']
tests = create_inheritance_tests(ResourceLoader)
Frozen_ResourceLoaderInheritanceTests, Source_ResourceLoaderInheritanceTests = tests
class InspectLoader(InheritanceTests):
superclass_names = ['Loader']
subclass_names = ['BuiltinImporter', 'FrozenImporter', 'ExtensionFileLoader']
tests = create_inheritance_tests(InspectLoader)
Frozen_InspectLoaderInheritanceTests, Source_InspectLoaderInheritanceTests = tests
class ExecutionLoader(InheritanceTests):
superclass_names = ['InspectLoader']
subclass_names = ['ExtensionFileLoader']
tests = create_inheritance_tests(ExecutionLoader)
Frozen_ExecutionLoaderInheritanceTests, Source_ExecutionLoaderInheritanceTests = tests
class FileLoader(InheritanceTests):
superclass_names = ['ResourceLoader', 'ExecutionLoader']
subclass_names = ['SourceFileLoader', 'SourcelessFileLoader']
tests = create_inheritance_tests(FileLoader)
Frozen_FileLoaderInheritanceTests, Source_FileLoaderInheritanceTests = tests
class SourceLoader(InheritanceTests):
superclass_names = ['ResourceLoader', 'ExecutionLoader']
subclass_names = ['SourceFileLoader']
tests = create_inheritance_tests(SourceLoader)
Frozen_SourceLoaderInheritanceTests, Source_SourceLoaderInheritanceTests = tests
##### Default return values ####################################################
def make_abc_subclasses(base_class):
classes = []
for kind, abc in [('Frozen', frozen_abc), ('Source', source_abc)]:
name = '_'.join([kind, base_class.__name__])
base_classes = base_class, getattr(abc, base_class.__name__)
classes.append(types.new_class(name, base_classes))
return classes
def make_return_value_tests(base_class, test_class):
frozen_class, source_class = make_abc_subclasses(base_class)
tests = []
for prefix, class_in_test in [('Frozen', frozen_class), ('Source', source_class)]:
def set_ns(ns):
ns['ins'] = class_in_test()
tests.append(types.new_class('_'.join([prefix, test_class.__name__]),
(test_class, unittest.TestCase),
exec_body=set_ns))
return tests
class MetaPathFinder:
def find_module(self, fullname, path):
return super().find_module(fullname, path)
Frozen_MPF, Source_MPF = make_abc_subclasses(MetaPathFinder)
class MetaPathFinderDefaultsTests:
def test_find_module(self):
# Default should return None.
self.assertIsNone(self.ins.find_module('something', None))
def test_invalidate_caches(self):
# Calling the method is a no-op.
self.ins.invalidate_caches()
tests = make_return_value_tests(MetaPathFinder, MetaPathFinderDefaultsTests)
Frozen_MPFDefaultTests, Source_MPFDefaultTests = tests
class PathEntryFinder:
def find_loader(self, fullname):
return super().find_loader(fullname)
Frozen_PEF, Source_PEF = make_abc_subclasses(PathEntryFinder)
class PathEntryFinderDefaultsTests:
def test_find_loader(self):
self.assertEqual((None, []), self.ins.find_loader('something'))
def find_module(self):
self.assertEqual(None, self.ins.find_module('something'))
def test_invalidate_caches(self):
# Should be a no-op.
self.ins.invalidate_caches()
tests = make_return_value_tests(PathEntryFinder, PathEntryFinderDefaultsTests)
Frozen_PEFDefaultTests, Source_PEFDefaultTests = tests
class Loader:
def load_module(self, fullname):
return super().load_module(fullname)
Frozen_L, Source_L = make_abc_subclasses(Loader)
class LoaderDefaultsTests:
def test_load_module(self):
with self.assertRaises(ImportError):
self.ins.load_module('something')
def test_module_repr(self):
mod = types.ModuleType('blah')
with self.assertRaises(NotImplementedError):
self.ins.module_repr(mod)
original_repr = repr(mod)
mod.__loader__ = self.ins
# Should still return a proper repr.
self.assertTrue(repr(mod))
tests = make_return_value_tests(Loader, LoaderDefaultsTests)
Frozen_LDefaultTests, SourceLDefaultTests = tests
class ResourceLoader(Loader):
def get_data(self, path):
return super().get_data(path)
Frozen_RL, Source_RL = make_abc_subclasses(ResourceLoader)
class ResourceLoaderDefaultsTests:
def test_get_data(self):
with self.assertRaises(IOError):
self.ins.get_data('/some/path')
tests = make_return_value_tests(ResourceLoader, ResourceLoaderDefaultsTests)
Frozen_RLDefaultTests, Source_RLDefaultTests = tests
class InspectLoader(Loader):
def is_package(self, fullname):
return super().is_package(fullname)
def get_source(self, fullname):
return super().get_source(fullname)
Frozen_IL, Source_IL = make_abc_subclasses(InspectLoader)
class InspectLoaderDefaultsTests:
def test_is_package(self):
with self.assertRaises(ImportError):
self.ins.is_package('blah')
def test_get_source(self):
with self.assertRaises(ImportError):
self.ins.get_source('blah')
tests = make_return_value_tests(InspectLoader, InspectLoaderDefaultsTests)
Frozen_ILDefaultTests, Source_ILDefaultTests = tests
class ExecutionLoader(InspectLoader):
def get_filename(self, fullname):
return super().get_filename(fullname)
Frozen_EL, Source_EL = make_abc_subclasses(ExecutionLoader)
class ExecutionLoaderDefaultsTests:
def test_get_filename(self):
with self.assertRaises(ImportError):
self.ins.get_filename('blah')
tests = make_return_value_tests(ExecutionLoader, InspectLoaderDefaultsTests)
Frozen_ELDefaultTests, Source_ELDefaultsTests = tests
##### MetaPathFinder concrete methods ##########################################
class MetaPathFinderFindModuleTests:
@classmethod
def finder(cls, spec):
class MetaPathSpecFinder(cls.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
self.called_for = fullname, path
return spec
return MetaPathSpecFinder()
def test_no_spec(self):
finder = self.finder(None)
path = ['a', 'b', 'c']
name = 'blah'
found = finder.find_module(name, path)
self.assertIsNone(found)
self.assertEqual(name, finder.called_for[0])
self.assertEqual(path, finder.called_for[1])
def test_spec(self):
loader = object()
spec = self.util.spec_from_loader('blah', loader)
finder = self.finder(spec)
found = finder.find_module('blah', None)
self.assertIs(found, spec.loader)
Frozen_MPFFindModuleTests, Source_MPFFindModuleTests = util.test_both(
MetaPathFinderFindModuleTests,
abc=(frozen_abc, source_abc),
util=(frozen_util, source_util))
##### PathEntryFinder concrete methods #########################################
class PathEntryFinderFindLoaderTests:
@classmethod
def finder(cls, spec):
class PathEntrySpecFinder(cls.abc.PathEntryFinder):
def find_spec(self, fullname, target=None):
self.called_for = fullname
return spec
return PathEntrySpecFinder()
def test_no_spec(self):
finder = self.finder(None)
name = 'blah'
found = finder.find_loader(name)
self.assertIsNone(found[0])
self.assertEqual([], found[1])
self.assertEqual(name, finder.called_for)
def test_spec_with_loader(self):
loader = object()
spec = self.util.spec_from_loader('blah', loader)
finder = self.finder(spec)
found = finder.find_loader('blah')
self.assertIs(found[0], spec.loader)
def test_spec_with_portions(self):
spec = self.machinery.ModuleSpec('blah', None)
paths = ['a', 'b', 'c']
spec.submodule_search_locations = paths
finder = self.finder(spec)
found = finder.find_loader('blah')
self.assertIsNone(found[0])
self.assertEqual(paths, found[1])
Frozen_PEFFindLoaderTests, Source_PEFFindLoaderTests = util.test_both(
PathEntryFinderFindLoaderTests,
abc=(frozen_abc, source_abc),
machinery=machinery,
util=(frozen_util, source_util))
##### Loader concrete methods ##################################################
class LoaderLoadModuleTests:
def loader(self):
class SpecLoader(self.abc.Loader):
found = None
def exec_module(self, module):
self.found = module
def is_package(self, fullname):
"""Force some non-default module state to be set."""
return True
return SpecLoader()
def test_fresh(self):
loader = self.loader()
name = 'blah'
with util.uncache(name):
loader.load_module(name)
module = loader.found
self.assertIs(sys.modules[name], module)
self.assertEqual(loader, module.__loader__)
self.assertEqual(loader, module.__spec__.loader)
self.assertEqual(name, module.__name__)
self.assertEqual(name, module.__spec__.name)
self.assertIsNotNone(module.__path__)
self.assertIsNotNone(module.__path__,
module.__spec__.submodule_search_locations)
def test_reload(self):
name = 'blah'
loader = self.loader()
module = types.ModuleType(name)
module.__spec__ = self.util.spec_from_loader(name, loader)
module.__loader__ = loader
with util.uncache(name):
sys.modules[name] = module
loader.load_module(name)
found = loader.found
self.assertIs(found, sys.modules[name])
self.assertIs(module, sys.modules[name])
Frozen_LoaderLoadModuleTests, Source_LoaderLoadModuleTests = util.test_both(
LoaderLoadModuleTests,
abc=(frozen_abc, source_abc),
util=(frozen_util, source_util))
##### InspectLoader concrete methods ###########################################
class InspectLoaderSourceToCodeTests:
def source_to_module(self, data, path=None):
"""Help with source_to_code() tests."""
module = types.ModuleType('blah')
loader = self.InspectLoaderSubclass()
if path is None:
code = loader.source_to_code(data)
else:
code = loader.source_to_code(data, path)
exec(code, module.__dict__)
return module
def test_source_to_code_source(self):
# Since compile() can handle strings, so should source_to_code().
source = 'attr = 42'
module = self.source_to_module(source)
self.assertTrue(hasattr(module, 'attr'))
self.assertEqual(module.attr, 42)
def test_source_to_code_bytes(self):
# Since compile() can handle bytes, so should source_to_code().
source = b'attr = 42'
module = self.source_to_module(source)
self.assertTrue(hasattr(module, 'attr'))
self.assertEqual(module.attr, 42)
def test_source_to_code_path(self):
# Specifying a path should set it for the code object.
path = 'path/to/somewhere'
loader = self.InspectLoaderSubclass()
code = loader.source_to_code('', path)
self.assertEqual(code.co_filename, path)
def test_source_to_code_no_path(self):
# Not setting a path should still work and be set to <string> since that
# is a pre-existing practice as a default to compile().
loader = self.InspectLoaderSubclass()
code = loader.source_to_code('')
self.assertEqual(code.co_filename, '<string>')
class Frozen_ILSourceToCodeTests(InspectLoaderSourceToCodeTests, unittest.TestCase):
InspectLoaderSubclass = Frozen_IL
class Source_ILSourceToCodeTests(InspectLoaderSourceToCodeTests, unittest.TestCase):
InspectLoaderSubclass = Source_IL
class InspectLoaderGetCodeTests:
def test_get_code(self):
# Test success.
module = types.ModuleType('blah')
with mock.patch.object(self.InspectLoaderSubclass, 'get_source') as mocked:
mocked.return_value = 'attr = 42'
loader = self.InspectLoaderSubclass()
code = loader.get_code('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
def test_get_code_source_is_None(self):
# If get_source() is None then this should be None.
with mock.patch.object(self.InspectLoaderSubclass, 'get_source') as mocked:
mocked.return_value = None
loader = self.InspectLoaderSubclass()
code = loader.get_code('blah')
self.assertIsNone(code)
def test_get_code_source_not_found(self):
# If there is no source then there is no code object.
loader = self.InspectLoaderSubclass()
with self.assertRaises(ImportError):
loader.get_code('blah')
class Frozen_ILGetCodeTests(InspectLoaderGetCodeTests, unittest.TestCase):
InspectLoaderSubclass = Frozen_IL
class Source_ILGetCodeTests(InspectLoaderGetCodeTests, unittest.TestCase):
InspectLoaderSubclass = Source_IL
class InspectLoaderLoadModuleTests:
"""Test InspectLoader.load_module()."""
module_name = 'blah'
def setUp(self):
support.unload(self.module_name)
self.addCleanup(support.unload, self.module_name)
def mock_get_code(self):
return mock.patch.object(self.InspectLoaderSubclass, 'get_code')
def test_get_code_ImportError(self):
# If get_code() raises ImportError, it should propagate.
with self.mock_get_code() as mocked_get_code:
mocked_get_code.side_effect = ImportError
with self.assertRaises(ImportError):
loader = self.InspectLoaderSubclass()
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
loader.load_module(self.module_name)
def test_get_code_None(self):
# If get_code() returns None, raise ImportError.
with self.mock_get_code() as mocked_get_code:
mocked_get_code.return_value = None
with self.assertRaises(ImportError):
loader = self.InspectLoaderSubclass()
loader.load_module(self.module_name)
def test_module_returned(self):
# The loaded module should be returned.
code = compile('attr = 42', '<string>', 'exec')
with self.mock_get_code() as mocked_get_code:
mocked_get_code.return_value = code
loader = self.InspectLoaderSubclass()
module = loader.load_module(self.module_name)
self.assertEqual(module, sys.modules[self.module_name])
class Frozen_ILLoadModuleTests(InspectLoaderLoadModuleTests, unittest.TestCase):
InspectLoaderSubclass = Frozen_IL
class Source_ILLoadModuleTests(InspectLoaderLoadModuleTests, unittest.TestCase):
InspectLoaderSubclass = Source_IL
##### ExecutionLoader concrete methods #########################################
class ExecutionLoaderGetCodeTests:
def mock_methods(self, *, get_source=False, get_filename=False):
source_mock_context, filename_mock_context = None, None
if get_source:
source_mock_context = mock.patch.object(self.ExecutionLoaderSubclass,
'get_source')
if get_filename:
filename_mock_context = mock.patch.object(self.ExecutionLoaderSubclass,
'get_filename')
return source_mock_context, filename_mock_context
def test_get_code(self):
path = 'blah.py'
source_mock_context, filename_mock_context = self.mock_methods(
get_source=True, get_filename=True)
with source_mock_context as source_mock, filename_mock_context as name_mock:
source_mock.return_value = 'attr = 42'
name_mock.return_value = path
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertEqual(code.co_filename, path)
module = types.ModuleType('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
def test_get_code_source_is_None(self):
# If get_source() is None then this should be None.
source_mock_context, _ = self.mock_methods(get_source=True)
with source_mock_context as mocked:
mocked.return_value = None
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertIsNone(code)
def test_get_code_source_not_found(self):
# If there is no source then there is no code object.
loader = self.ExecutionLoaderSubclass()
with self.assertRaises(ImportError):
loader.get_code('blah')
def test_get_code_no_path(self):
# If get_filename() raises ImportError then simply skip setting the path
# on the code object.
source_mock_context, filename_mock_context = self.mock_methods(
get_source=True, get_filename=True)
with source_mock_context as source_mock, filename_mock_context as name_mock:
source_mock.return_value = 'attr = 42'
name_mock.side_effect = ImportError
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertEqual(code.co_filename, '<string>')
module = types.ModuleType('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
class Frozen_ELGetCodeTests(ExecutionLoaderGetCodeTests, unittest.TestCase):
ExecutionLoaderSubclass = Frozen_EL
class Source_ELGetCodeTests(ExecutionLoaderGetCodeTests, unittest.TestCase):
ExecutionLoaderSubclass = Source_EL
##### SourceLoader concrete methods ############################################
class SourceLoader:
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, "
b"repr(__loader__)])")
def __init__(self, path):
self.path = path
def get_data(self, path):
if path != self.path:
raise IOError
return self.source
def get_filename(self, fullname):
return self.path
def module_repr(self, module):
return '<module>'
Frozen_SourceOnlyL, Source_SourceOnlyL = make_abc_subclasses(SourceLoader)
class SourceLoader(SourceLoader):
source_mtime = 1
def __init__(self, path, magic=None):
super().__init__(path)
self.bytecode_path = self.util.cache_from_source(self.path)
self.source_size = len(self.source)
if magic is None:
magic = self.util.MAGIC_NUMBER
data = bytearray(magic)
data.extend(self.init._w_long(self.source_mtime))
data.extend(self.init._w_long(self.source_size))
code_object = compile(self.source, self.path, 'exec',
dont_inherit=True)
data.extend(marshal.dumps(code_object))
self.bytecode = bytes(data)
self.written = {}
def get_data(self, path):
if path == self.path:
return super().get_data(path)
elif path == self.bytecode_path:
return self.bytecode
else:
raise OSError
def path_stats(self, path):
if path != self.path:
raise IOError
return {'mtime': self.source_mtime, 'size': self.source_size}
def set_data(self, path, data):
self.written[path] = bytes(data)
return path == self.bytecode_path
Frozen_SL, Source_SL = make_abc_subclasses(SourceLoader)
Frozen_SL.util = frozen_util
Source_SL.util = source_util
Frozen_SL.init = frozen_init
Source_SL.init = source_init
class SourceLoaderTestHarness:
def setUp(self, *, is_package=True, **kwargs):
self.package = 'pkg'
if is_package:
self.path = os.path.join(self.package, '__init__.py')
self.name = self.package
else:
module_name = 'mod'
self.path = os.path.join(self.package, '.'.join(['mod', 'py']))
self.name = '.'.join([self.package, module_name])
self.cached = self.util.cache_from_source(self.path)
self.loader = self.loader_mock(self.path, **kwargs)
def verify_module(self, module):
self.assertEqual(module.__name__, self.name)
self.assertEqual(module.__file__, self.path)
self.assertEqual(module.__cached__, self.cached)
self.assertEqual(module.__package__, self.package)
self.assertEqual(module.__loader__, self.loader)
values = module._.split('::')
self.assertEqual(values[0], self.name)
self.assertEqual(values[1], self.path)
self.assertEqual(values[2], self.cached)
self.assertEqual(values[3], self.package)
self.assertEqual(values[4], repr(self.loader))
def verify_code(self, code_object):
module = types.ModuleType(self.name)
module.__file__ = self.path
module.__cached__ = self.cached
module.__package__ = self.package
module.__loader__ = self.loader
module.__path__ = []
exec(code_object, module.__dict__)
self.verify_module(module)
class SourceOnlyLoaderTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader for source-only loading.
Reload testing is subsumed by the tests for
importlib.util.module_for_loader.
"""
def test_get_source(self):
# Verify the source code is returned as a string.
# If an OSError is raised by get_data then raise ImportError.
expected_source = self.loader.source.decode('utf-8')
self.assertEqual(self.loader.get_source(self.name), expected_source)
def raise_OSError(path):
raise OSError
self.loader.get_data = raise_OSError
with self.assertRaises(ImportError) as cm:
self.loader.get_source(self.name)
self.assertEqual(cm.exception.name, self.name)
def test_is_package(self):
# Properly detect when loading a package.
self.setUp(is_package=False)
self.assertFalse(self.loader.is_package(self.name))
self.setUp(is_package=True)
self.assertTrue(self.loader.is_package(self.name))
self.assertFalse(self.loader.is_package(self.name + '.__init__'))
def test_get_code(self):
# Verify the code object is created.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_source_to_code(self):
# Verify the compiled code object.
code = self.loader.source_to_code(self.loader.source, self.path)
self.verify_code(code)
def test_load_module(self):
# Loading a module should set __name__, __loader__, __package__,
# __path__ (for packages), __file__, and __cached__.
# The module should also be put into sys.modules.
with util.uncache(self.name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertEqual(module.__path__, [os.path.dirname(self.path)])
self.assertIn(self.name, sys.modules)
def test_package_settings(self):
# __package__ needs to be set, while __path__ is set on if the module
# is a package.
# Testing the values for a package are covered by test_load_module.
self.setUp(is_package=False)
with util.uncache(self.name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertTrue(not hasattr(module, '__path__'))
def test_get_source_encoding(self):
# Source is considered encoded in UTF-8 by default unless otherwise
# specified by an encoding line.
source = "_ = 'ü'"
self.loader.source = source.encode('utf-8')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
source = "# coding: latin-1\n_ = ü"
self.loader.source = source.encode('latin-1')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
class Frozen_SourceOnlyLTests(SourceOnlyLoaderTests, unittest.TestCase):
loader_mock = Frozen_SourceOnlyL
util = frozen_util
class Source_SourceOnlyLTests(SourceOnlyLoaderTests, unittest.TestCase):
loader_mock = Source_SourceOnlyL
util = source_util
@unittest.skipIf(sys.dont_write_bytecode, "sys.dont_write_bytecode is true")
class SourceLoaderBytecodeTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader's use of bytecode.
Source-only testing handled by SourceOnlyLoaderTests.
"""
def verify_code(self, code_object, *, bytecode_written=False):
super().verify_code(code_object)
if bytecode_written:
self.assertIn(self.cached, self.loader.written)
data = bytearray(self.util.MAGIC_NUMBER)
data.extend(self.init._w_long(self.loader.source_mtime))
data.extend(self.init._w_long(self.loader.source_size))
data.extend(marshal.dumps(code_object))
self.assertEqual(self.loader.written[self.cached], bytes(data))
def test_code_with_everything(self):
# When everything should work.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_no_bytecode(self):
# If no bytecode exists then move on to the source.
self.loader.bytecode_path = "<does not exist>"
# Sanity check
with self.assertRaises(OSError):
bytecode_path = self.util.cache_from_source(self.path)
self.loader.get_data(bytecode_path)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_code_bad_timestamp(self):
# Bytecode is only used when the timestamp matches the source EXACTLY.
for source_mtime in (0, 2):
assert source_mtime != self.loader.source_mtime
original = self.loader.source_mtime
self.loader.source_mtime = source_mtime
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
self.loader.source_mtime = original
def test_code_bad_magic(self):
# Skip over bytecode with a bad magic number.
self.setUp(magic=b'0000')
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_dont_write_bytecode(self):
# Bytecode is not written if sys.dont_write_bytecode is true.
# Can assume it is false already thanks to the skipIf class decorator.
try:
sys.dont_write_bytecode = True
self.loader.bytecode_path = "<does not exist>"
code_object = self.loader.get_code(self.name)
self.assertNotIn(self.cached, self.loader.written)
finally:
sys.dont_write_bytecode = False
def test_no_set_data(self):
# If set_data is not defined, one can still read bytecode.
self.setUp(magic=b'0000')
original_set_data = self.loader.__class__.mro()[1].set_data
try:
del self.loader.__class__.mro()[1].set_data
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
finally:
self.loader.__class__.mro()[1].set_data = original_set_data
def test_set_data_raises_exceptions(self):
# Raising NotImplementedError or OSError is okay for set_data.
def raise_exception(exc):
def closure(*args, **kwargs):
raise exc
return closure
self.setUp(magic=b'0000')
self.loader.set_data = raise_exception(NotImplementedError)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
class Frozen_SLBytecodeTests(SourceLoaderBytecodeTests, unittest.TestCase):
loader_mock = Frozen_SL
init = frozen_init
util = frozen_util
class SourceSLBytecodeTests(SourceLoaderBytecodeTests, unittest.TestCase):
loader_mock = Source_SL
init = source_init
util = source_util
class SourceLoaderGetSourceTests:
"""Tests for importlib.abc.SourceLoader.get_source()."""
def test_default_encoding(self):
# Should have no problems with UTF-8 text.
name = 'mod'
mock = self.SourceOnlyLoaderMock('mod.file')
source = 'x = "ü"'
mock.source = source.encode('utf-8')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_decoded_source(self):
# Decoding should work.
name = 'mod'
mock = self.SourceOnlyLoaderMock("mod.file")
source = "# coding: Latin-1\nx='ü'"
assert source.encode('latin-1') != source.encode('utf-8')
mock.source = source.encode('latin-1')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_universal_newlines(self):
# PEP 302 says universal newlines should be used.
name = 'mod'
mock = self.SourceOnlyLoaderMock('mod.file')
source = "x = 42\r\ny = -13\r\n"
mock.source = source.encode('utf-8')
expect = io.IncrementalNewlineDecoder(None, True).decode(source)
self.assertEqual(mock.get_source(name), expect)
class Frozen_SourceOnlyLGetSourceTests(SourceLoaderGetSourceTests, unittest.TestCase):
SourceOnlyLoaderMock = Frozen_SourceOnlyL
class Source_SourceOnlyLGetSourceTests(SourceLoaderGetSourceTests, unittest.TestCase):
SourceOnlyLoaderMock = Source_SourceOnlyL
if __name__ == '__main__':
unittest.main()
|
lgpl-3.0
|
Pexego/account-financial-tools
|
account_invoice_constraint_chronology/__init__.py
|
74
|
1270
|
# -*- coding: utf-8 -*-
#
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import model
from . import tests
|
agpl-3.0
|
turbokongen/home-assistant
|
homeassistant/components/ubus/device_tracker.py
|
15
|
7775
|
"""Support for OpenWRT (ubus) routers."""
import json
import logging
import re
import requests
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME, HTTP_OK
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_DHCP_SOFTWARE = "dhcp_software"
DEFAULT_DHCP_SOFTWARE = "dnsmasq"
DHCP_SOFTWARES = ["dnsmasq", "odhcpd", "none"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_DHCP_SOFTWARE, default=DEFAULT_DHCP_SOFTWARE): vol.In(
DHCP_SOFTWARES
),
}
)
def get_scanner(hass, config):
"""Validate the configuration and return an ubus scanner."""
dhcp_sw = config[DOMAIN][CONF_DHCP_SOFTWARE]
if dhcp_sw == "dnsmasq":
scanner = DnsmasqUbusDeviceScanner(config[DOMAIN])
elif dhcp_sw == "odhcpd":
scanner = OdhcpdUbusDeviceScanner(config[DOMAIN])
else:
scanner = UbusDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
def _refresh_on_access_denied(func):
"""If remove rebooted, it lost our session so rebuild one and try again."""
def decorator(self, *args, **kwargs):
"""Wrap the function to refresh session_id on PermissionError."""
try:
return func(self, *args, **kwargs)
except PermissionError:
_LOGGER.warning(
"Invalid session detected."
" Trying to refresh session_id and re-run RPC"
)
self.session_id = _get_session_id(self.url, self.username, self.password)
return func(self, *args, **kwargs)
return decorator
class UbusDeviceScanner(DeviceScanner):
"""
This class queries a wireless router running OpenWrt firmware.
Adapted from Tomato scanner.
"""
def __init__(self, config):
"""Initialize the scanner."""
host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {}
self.url = f"http://{host}/ubus"
self.session_id = _get_session_id(self.url, self.username, self.password)
self.hostapd = []
self.mac2name = None
self.success_init = self.session_id is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def _generate_mac2name(self):
"""Return empty MAC to name dict. Overridden if DHCP server is set."""
self.mac2name = {}
@_refresh_on_access_denied
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
self._generate_mac2name()
if self.mac2name is None:
# Generation of mac2name dictionary failed
return None
name = self.mac2name.get(device.upper(), None)
return name
@_refresh_on_access_denied
def _update_info(self):
"""Ensure the information from the router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking hostapd")
if not self.hostapd:
hostapd = _req_json_rpc(self.url, self.session_id, "list", "hostapd.*", "")
self.hostapd.extend(hostapd.keys())
self.last_results = []
results = 0
# for each access point
for hostapd in self.hostapd:
result = _req_json_rpc(
self.url, self.session_id, "call", hostapd, "get_clients"
)
if result:
results = results + 1
# Check for each device is authorized (valid wpa key)
for key in result["clients"].keys():
device = result["clients"][key]
if device["authorized"]:
self.last_results.append(key)
return bool(results)
class DnsmasqUbusDeviceScanner(UbusDeviceScanner):
"""Implement the Ubus device scanning for the dnsmasq DHCP server."""
def __init__(self, config):
"""Initialize the scanner."""
super().__init__(config)
self.leasefile = None
def _generate_mac2name(self):
if self.leasefile is None:
result = _req_json_rpc(
self.url,
self.session_id,
"call",
"uci",
"get",
config="dhcp",
type="dnsmasq",
)
if result:
values = result["values"].values()
self.leasefile = next(iter(values))["leasefile"]
else:
return
result = _req_json_rpc(
self.url, self.session_id, "call", "file", "read", path=self.leasefile
)
if result:
self.mac2name = {}
for line in result["data"].splitlines():
hosts = line.split(" ")
self.mac2name[hosts[1].upper()] = hosts[3]
else:
# Error, handled in the _req_json_rpc
return
class OdhcpdUbusDeviceScanner(UbusDeviceScanner):
"""Implement the Ubus device scanning for the odhcp DHCP server."""
def _generate_mac2name(self):
result = _req_json_rpc(self.url, self.session_id, "call", "dhcp", "ipv4leases")
if result:
self.mac2name = {}
for device in result["device"].values():
for lease in device["leases"]:
mac = lease["mac"] # mac = aabbccddeeff
# Convert it to expected format with colon
mac = ":".join(mac[i : i + 2] for i in range(0, len(mac), 2))
self.mac2name[mac.upper()] = lease["hostname"]
else:
# Error, handled in the _req_json_rpc
return
def _req_json_rpc(url, session_id, rpcmethod, subsystem, method, **params):
"""Perform one JSON RPC operation."""
data = json.dumps(
{
"jsonrpc": "2.0",
"id": 1,
"method": rpcmethod,
"params": [session_id, subsystem, method, params],
}
)
try:
res = requests.post(url, data=data, timeout=5)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
return
if res.status_code == HTTP_OK:
response = res.json()
if "error" in response:
if (
"message" in response["error"]
and response["error"]["message"] == "Access denied"
):
raise PermissionError(response["error"]["message"])
raise HomeAssistantError(response["error"]["message"])
if rpcmethod == "call":
try:
return response["result"][1]
except IndexError:
return
else:
return response["result"]
def _get_session_id(url, username, password):
"""Get the authentication token for the given host+username+password."""
res = _req_json_rpc(
url,
"00000000000000000000000000000000",
"call",
"session",
"login",
username=username,
password=password,
)
return res["ubus_rpc_session"]
|
apache-2.0
|
duramato/SickRage
|
lib/concurrent/futures/_compat.py
|
106
|
4976
|
from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys as _sys
def namedtuple(typename, field_names):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError:
e = _sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result
if _sys.version_info[0] < 3:
def reraise(exc, traceback):
locals_ = {'exc_type': type(exc), 'exc_value': exc, 'traceback': traceback}
exec('raise exc_type, exc_value, traceback', {}, locals_)
else:
def reraise(exc, traceback):
# Tracebacks are embedded in exceptions in Python 3
raise exc
|
gpl-3.0
|
40223139/203739test
|
static/Brython3.1.0-20150301-090019/Lib/xml/dom/pulldom.py
|
850
|
11761
|
import xml.sax
import xml.sax.handler
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print(exception)
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def __next__(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if isinstance(stream_or_string, str):
stream = open(stream_or_string, 'rb')
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
from io import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
|
gpl-3.0
|
Yannig/ansible
|
lib/ansible/playbook/playbook_include.py
|
55
|
6748
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError, AnsibleError
from ansible.module_utils.six import iteritems
from ansible.parsing.splitter import split_args, parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.template import Templar
class PlaybookInclude(Base, Conditional, Taggable):
_name = FieldAttribute(isa='string')
_import_playbook = FieldAttribute(isa='string')
_vars = FieldAttribute(isa='dict', default=dict())
@staticmethod
def load(data, basedir, variable_manager=None, loader=None):
return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader)
def load_data(self, ds, basedir, variable_manager=None, loader=None):
'''
Overrides the base load_data(), as we're actually going to return a new
Playbook() object rather than a PlaybookInclude object
'''
# import here to avoid a dependency loop
from ansible.playbook import Playbook
from ansible.playbook.play import Play
# first, we use the original parent method to correctly load the object
# via the load_data/preprocess_data system we normally use for other
# playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
all_vars = self.vars.copy()
if variable_manager:
all_vars.update(variable_manager.get_vars())
templar = Templar(loader=loader, variables=all_vars)
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
file_name = templar.template(new_obj.import_playbook)
if not os.path.isabs(file_name):
file_name = os.path.join(basedir, file_name)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
# finally, update each loaded playbook entry with any variables specified
# on the included playbook and/or any tags which may have been set
for entry in pb._entries:
# conditional includes on a playbook need a marker to skip gathering
if new_obj.when and isinstance(entry, Play):
entry._included_conditional = new_obj.when[:]
temp_vars = entry.vars.copy()
temp_vars.update(new_obj.vars)
param_tags = temp_vars.pop('tags', None)
if param_tags is not None:
entry.tags.extend(param_tags.split(','))
entry.vars = temp_vars
entry.tags = list(set(entry.tags).union(new_obj.tags))
if entry._included_path is None:
entry._included_path = os.path.dirname(file_name)
# Check to see if we need to forward the conditionals on to the included
# plays. If so, we can take a shortcut here and simply prepend them to
# those attached to each block (if any)
if new_obj.when:
for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks):
task_block._attributes['when'] = new_obj.when[:] + task_block.when[:]
return pb
def preprocess_data(self, ds):
'''
Regorganizes the data for a PlaybookInclude datastructure to line
up with what we expect the proper attributes to be
'''
assert isinstance(ds, dict), 'ds (%s) should be a dict but was a %s' % (ds, type(ds))
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
for (k, v) in iteritems(ds):
if k in ('include', 'import_playbook'):
self._preprocess_import(ds, new_ds, k, v)
else:
# some basic error checking, to make sure vars are properly
# formatted and do not conflict with k=v parameters
if k == 'vars':
if 'vars' in new_ds:
raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
elif not isinstance(v, dict):
raise AnsibleParserError("vars for import_playbook statements must be specified as a dictionary", obj=ds)
new_ds[k] = v
return super(PlaybookInclude, self).preprocess_data(new_ds)
def _preprocess_import(self, ds, new_ds, k, v):
'''
Splits the playbook import line up into filename and parameters
'''
if v is None:
raise AnsibleParserError("playbook import parameter is missing", obj=ds)
# The import_playbook line must include at least one item, which is the filename
# to import. Anything after that should be regarded as a parameter to the import
items = split_args(v)
if len(items) == 0:
raise AnsibleParserError("import_playbook statements must specify the file name to import", obj=ds)
else:
new_ds['import_playbook'] = items[0]
if len(items) > 1:
# rejoin the parameter portion of the arguments and
# then use parse_kv() to get a dict of params back
params = parse_kv(" ".join(items[1:]))
if 'tags' in params:
new_ds['tags'] = params.pop('tags')
if 'vars' in new_ds:
raise AnsibleParserError("import_playbook parameters cannot be mixed with 'vars' entries for import statements", obj=ds)
new_ds['vars'] = params
|
gpl-3.0
|
yati-sagade/RyDyrect
|
djangoappengine/management/commands/runserver.py
|
6
|
6549
|
from optparse import make_option
import logging
import sys
from django.db import connections
from ...boot import PROJECT_DIR
from ...db.base import DatabaseWrapper, get_datastore_paths
from django.core.management.base import BaseCommand
from django.core.management.commands.runserver import BaseRunserverCommand
from django.core.exceptions import ImproperlyConfigured
from google.appengine.tools import dev_appserver_main
class Command(BaseRunserverCommand):
"""Overrides the default Django runserver command.
Instead of starting the default Django development server this command
fires up a copy of the full fledged App Engine dev_appserver that emulates
the live environment your application will be deployed to.
"""
option_list = BaseCommand.option_list + (
make_option('--debug', action='store_true', default=False,
help='Prints verbose debugging messages to the console while running.'),
make_option('--debug_imports', action='store_true', default=False,
help='Prints debugging messages related to importing modules, including \
search paths and errors.'),
make_option('-c', '--clear_datastore', action='store_true', default=False,
help='Clears the datastore data and history files before starting the web server.'),
make_option('--high_replication', action='store_true', default=False,
help='Use the high replication datastore consistency model.'),
make_option('--require_indexes', action='store_true', default=False,
help="""Disables automatic generation of entries in the index.yaml file. Instead, when
the application makes a query that requires that its index be defined in the
file and the index definition is not found, an exception will be raised,
similar to what would happen when running on App Engine."""),
make_option('--enable_sendmail', action='store_true', default=False,
help='Uses the local computer\'s Sendmail installation for sending email messages.'),
make_option('--datastore_path',
help="""The path to use for the local datastore data file. The server creates this file
if it does not exist."""),
make_option('--history_path',
help="""The path to use for the local datastore history file. The server uses the query
history file to generate entries for index.yaml."""),
make_option('--login_url',
help='The relative URL to use for the Users sign-in page. Default is /_ah/login.'),
make_option('--smtp_host',
help='The hostname of the SMTP server to use for sending email messages.'),
make_option('--smtp_port',
help='The port number of the SMTP server to use for sending email messages.'),
make_option('--smtp_user',
help='The username to use with the SMTP server for sending email messages.'),
make_option('--smtp_password',
help='The password to use with the SMTP server for sending email messages.'),
)
help = 'Runs a copy of the App Engine development server.'
args = '[optional port number, or ipaddr:port]'
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
# hack __main__ so --help in dev_appserver_main works OK.
sys.modules['__main__'] = dev_appserver_main
return super(Command, self).create_parser(prog_name, subcommand)
def run_from_argv(self, argv):
"""
Captures the program name, usually "manage.py"
"""
self.progname = argv[0]
super(Command, self).run_from_argv(argv)
def run(self, *args, **options):
"""
Starts the App Engine dev_appserver program for the Django project.
The appserver is run with default parameters. If you need to pass any special
parameters to the dev_appserver you will have to invoke it manually.
Unlike the normal devserver, does not use the autoreloader as
App Engine dev_appserver needs to be run from the main thread
"""
args = []
# Set bind ip/port if specified.
if self.addr:
args.extend(["--address", self.addr])
if self.port:
args.extend(["--port", self.port])
# If runserver is called using handle(), progname will not be set
if not hasattr(self, 'progname'):
self.progname = "manage.py"
# Add email settings
from django.conf import settings
if not options.get('smtp_host', None) and not options.get('enable_sendmail', None):
args.extend(['--smtp_host', settings.EMAIL_HOST,
'--smtp_port', str(settings.EMAIL_PORT),
'--smtp_user', settings.EMAIL_HOST_USER,
'--smtp_password', settings.EMAIL_HOST_PASSWORD])
# Pass the application specific datastore location to the server.
for name in connections:
connection = connections[name]
if isinstance(connection, DatabaseWrapper):
for key, path in get_datastore_paths(connection.settings_dict).items():
# XXX/TODO: Remove this when SDK 1.4.3 is released
if key == 'prospective_search_path':
continue
arg = '--' + key
if arg not in args:
args.extend([arg, path])
break
# Process the rest of the options here
bool_options = ['debug', 'debug_imports', 'clear_datastore', 'require_indexes',
'high_replication', 'enable_sendmail', ]
for opt in bool_options:
if options[opt] != False:
args.append("--%s" % opt)
str_options = ['datastore_path', 'history_path', 'login_url', 'smtp_host', 'smtp_port',
'smtp_user', 'smtp_password',]
for opt in str_options:
if options.get(opt, None) != None:
args.extend(["--%s" % opt, options[opt]])
# Reset logging level to INFO as dev_appserver will spew tons of debug logs
logging.getLogger().setLevel(logging.INFO)
# Append the current working directory to the arguments.
dev_appserver_main.main([self.progname] + args + [PROJECT_DIR])
|
bsd-3-clause
|
7kbird/chrome
|
tools/telemetry/telemetry/results/buildbot_output_formatter.py
|
41
|
4594
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import perf_tests_helper
from telemetry import value as value_module
from telemetry.results import output_formatter
from telemetry.value import summary as summary_module
class BuildbotOutputFormatter(output_formatter.OutputFormatter):
def __init__(self, output_stream, trace_tag=''):
super(BuildbotOutputFormatter, self).__init__(output_stream)
self._trace_tag = trace_tag
def _PrintPerfResult(self, measurement, trace, v, units,
result_type='default'):
output = perf_tests_helper.PrintPerfResult(
measurement, trace, v, units, result_type, print_to_stdout=False)
self.output_stream.write(output + '\n')
self.output_stream.flush()
def Format(self, page_test_results):
"""Print summary data in a format expected by buildbot for perf dashboards.
If any failed pages exist, only output individual page results, and do
not output any average data.
"""
had_failures = len(page_test_results.failures) > 0
# Print out the list of unique pages.
perf_tests_helper.PrintPages(
[page.display_name for page in page_test_results.pages_that_succeeded])
summary = summary_module.Summary(page_test_results.all_page_specific_values)
for value in summary.interleaved_computed_per_page_values_and_summaries:
if value.page:
self._PrintComputedPerPageValue(value)
else:
self._PrintComputedSummaryValue(value, had_failures)
self._PrintOverallResults(page_test_results)
def _PrintComputedPerPageValue(self, value):
# We dont print per-page-values when there is a trace tag.
if self._trace_tag:
return
# Actually print the result.
buildbot_value = value.GetBuildbotValue()
buildbot_data_type = value.GetBuildbotDataType(
output_context=value_module.PER_PAGE_RESULT_OUTPUT_CONTEXT)
if buildbot_value is None or buildbot_data_type is None:
return
buildbot_measurement_name, buildbot_trace_name = (
value.GetChartAndTraceNameForPerPageResult())
self._PrintPerfResult(buildbot_measurement_name,
buildbot_trace_name,
buildbot_value, value.units, buildbot_data_type)
def _PrintComputedSummaryValue(self, value, had_failures):
# If there were any page errors, we typically will print nothing.
#
# Note: this branch is structured less-densely to improve legibility.
if had_failures:
return
buildbot_value = value.GetBuildbotValue()
buildbot_data_type = value.GetBuildbotDataType(
output_context=value_module.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT)
if buildbot_value is None or buildbot_data_type is None:
return
buildbot_measurement_name, buildbot_trace_name = (
value.GetChartAndTraceNameForComputedSummaryResult(
self._trace_tag))
self._PrintPerfResult(buildbot_measurement_name,
buildbot_trace_name,
buildbot_value, value.units, buildbot_data_type)
def _PrintOverallResults(self, page_test_results):
# If there were no failed pages, output the overall results (results not
# associated with a page).
had_failures = len(page_test_results.failures) > 0
if not had_failures:
for value in page_test_results.all_summary_values:
buildbot_value = value.GetBuildbotValue()
buildbot_data_type = value.GetBuildbotDataType(
output_context=value_module.SUMMARY_RESULT_OUTPUT_CONTEXT)
buildbot_measurement_name, buildbot_trace_name = (
value.GetChartAndTraceNameForComputedSummaryResult(
self._trace_tag))
self._PrintPerfResult(
buildbot_measurement_name,
buildbot_trace_name,
buildbot_value,
value.units,
buildbot_data_type)
# Print the number of failed and errored pages.
self._PrintPerfResult('telemetry_page_measurement_results', 'num_failed',
[len(page_test_results.failures)], 'count',
'unimportant')
# TODO(chrishenry): Remove this in a separate patch to reduce the risk
# of rolling back due to buildbot breakage.
# Also fix src/tools/bisect-perf-regression_test.py when this is
# removed.
self._PrintPerfResult('telemetry_page_measurement_results', 'num_errored',
[0], 'count', 'unimportant')
|
bsd-3-clause
|
jaywreddy/django
|
tests/template_tests/filter_tests/test_make_list.py
|
345
|
1611
|
from django.template.defaultfilters import make_list
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils.safestring import mark_safe
from ..utils import setup
class MakeListTests(SimpleTestCase):
"""
The make_list filter can destroy existing escaping, so the results are
escaped.
"""
@setup({'make_list01': '{% autoescape off %}{{ a|make_list }}{% endautoescape %}'})
def test_make_list01(self):
output = self.engine.render_to_string('make_list01', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list02': '{{ a|make_list }}'})
def test_make_list02(self):
output = self.engine.render_to_string('make_list02', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list03':
'{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}'})
def test_make_list03(self):
output = self.engine.render_to_string('make_list03', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list04': '{{ a|make_list|stringformat:"s"|safe }}'})
def test_make_list04(self):
output = self.engine.render_to_string('make_list04', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
class FunctionTests(SimpleTestCase):
def test_string(self):
self.assertEqual(make_list('abc'), ['a', 'b', 'c'])
def test_integer(self):
self.assertEqual(make_list(1234), ['1', '2', '3', '4'])
|
bsd-3-clause
|
ramineni/myironic
|
ironic/tests/drivers/drac/utils.py
|
10
|
2393
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.etree import ElementTree
import mock
def build_soap_xml(items, namespace=None):
"""Build a SOAP XML.
:param items: a list of dictionaries where key is the element name
and the value is the element text.
:param namespace: the namespace for the elements, None for no
namespace. Defaults to None
:returns: a XML string.
"""
def _create_element(name, value=None):
xml_string = name
if namespace:
xml_string = "{%(namespace)s}%(item)s" % {'namespace': namespace,
'item': xml_string}
element = ElementTree.Element(xml_string)
element.text = value
return element
soap_namespace = "http://www.w3.org/2003/05/soap-envelope"
envelope_element = ElementTree.Element("{%s}Envelope" % soap_namespace)
body_element = ElementTree.Element("{%s}Body" % soap_namespace)
for item in items:
for i in item:
insertion_point = _create_element(i)
if isinstance(item[i], dict):
for j, value in item[i].items():
insertion_point.append(_create_element(j, value))
else:
insertion_point.text = item[i]
body_element.append(insertion_point)
envelope_element.append(body_element)
return ElementTree.tostring(envelope_element)
def mock_wsman_root(return_value):
"""Helper function to mock the root() from wsman client."""
mock_xml_root = mock.Mock()
mock_xml_root.string.return_value = return_value
mock_xml = mock.Mock()
mock_xml.context.return_value = None
mock_xml.root.return_value = mock_xml_root
return mock_xml
|
apache-2.0
|
cleemesser/eeg-hdfstorage
|
scripts/edf2eeghdf.py
|
1
|
51534
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function # py2.6 with_statement
import sys
import pprint
import h5py
import numpy as np
import os.path
# date related stuff
import datetime
import dateutil
import dateutil.tz
import dateutil.parser
import arrow
# compatibility
import future
from future.utils import iteritems
from builtins import range # range and switch xrange -> range
# from past.builtins import xrange # later, move to from builtins import
import edflib
import eeghdf
# really need to check the original data type and then save as that datatype along with the necessary conversion factors
# so can convert voltages on own
# try with float32 instead?
# LPCH often uses these labels for electrodes
LPCH_COMMON_1020_LABELS = [
'Fp1',
'Fp2',
'F3',
'F4',
'C3',
'C4',
'P3',
'P4',
'O1',
'O2',
'F7',
'F8',
'T3',
'T4',
'T5',
'T6',
'Fz',
'Cz',
'Pz',
'E',
'PG1',
'PG2',
'A1',
'A2',
'T1',
'T2',
'X1',
'X2',
'X3',
'X4',
'X5',
'X6',
'X7',
'EEG Mark1',
'EEG Mark2',
'Events/Markers']
# common 10-20 extended clinical (T1/T2 instead of FT9/FT10)
# will need to specify these as bytes I suppose (or is this ok in utf-8 given the ascii basis)
# keys should be all one case (say upper)
lpch2edf_fixed_len_labels = dict(
FP1='EEG Fp1 ',
F7='EEG F7 ',
T3='EEG T3 ',
T5='EEG T5 ',
O1='EEG O1 ',
F3='EEG F3 ',
C3='EEG C3 ',
P3='EEG P3 ',
FP2='EEG Fp2 ',
F8='EEG F8 ',
T4='EEG T4 ',
T6='EEG T6 ',
O2='EEG O2 ',
F4='EEG F4 ',
C4='EEG C4 ',
P4='EEG P4 ',
CZ='EEG Cz ',
FZ='EEG Fz ',
PZ='EEG Pz ',
T1='EEG FT9 ', # maybe I should map this to FT9/T1
T2='EEG FT10 ', # maybe I should map this to FT10/T2
A1='EEG A1 ',
A2='EEG A2 ',
# these are often (?always) EKG at LPCH, note edfspec says use ECG instead
# of EKG
X1='ECG X1 ', # is this invariant? usually referenced to A1
# this is sometimes ECG but not usually (depends on how squirmy)
X2='X2 ',
PG1='EEG Pg1 ',
PG2='EEG Pg2 ',
# now the uncommon ones
NZ='EEG Nz ',
FPZ='EEG Fpz ',
AF7='EEG AF7 ',
AF8='EEG AF8 ',
AF3='EEG AF3 ',
AFz='EEG AFz ',
AF4='EEG AF4 ',
F9='EEG F9 ',
# F7
F5='EEG F5 ',
# F3 ='EEG F3 ',
F1='EEG F1 ',
# Fz
F2='EEG F2 ',
# F4
F6='EEG F6 ',
# F8
F10='EEG F10 ',
FT9='EEG FT9 ',
FT7='EEG FT7 ',
FC5='EEG FC5 ',
FC3='EEG FC3 ',
FC1='EEG FC1 ',
FCz='EEG FCz ',
FC2='EEG FC2 ',
FC4='EEG FC4 ',
FC6='EEG FC6 ',
FT8='EEG FT8 ',
FT10='EEG FT10 ',
T9='EEG T9 ',
T7='EEG T7 ',
C5='EEG C5 ',
# C3 above
C1='EEG C1 ',
# Cz above
C2='EEG C2 ',
# C4 ='EEG C4 ',
C6='EEG C6 ',
T8='EEG T8 ',
T10='EEG T10 ',
# A2
# T3
# T4
# T5
# T6
TP9='EEG TP9 ',
TP7='EEG TP7 ',
CP5='EEG CP5 ',
CP3='EEG CP3 ',
CP1='EEG CP1 ',
CPZ='EEG CPz ',
CP2='EEG CP2 ',
CP4='EEG CP4 ',
CP6='EEG CP6 ',
TP8='EEG TP8 ',
TP10='EEG TP10 ',
P9='EEG P9 ',
P7='EEG P7 ',
P5='EEG P5 ',
# P3
P1='EEG P1 ',
# Pz
P2='EEG P2 ',
# P4
P6='EEG P6 ',
P8='EEG P8 ',
P10='EEG P10 ',
PO7='EEG PO7 ',
PO3='EEG PO3 ',
POZ='EEG POz ',
PO4='EEG PO4 ',
PO8='EEG PO8 ',
# O1
OZ='EEG Oz ',
# O2
IZ='EEG Iz ',
)
lpch2edf_fixed_len_labels
# print("lpch2edf_fixed_len_labels::\n")
# pprint.pprint(lpch2edf_fixed_len_labels)
LPCH_TO_STD_LABELS_STRIP = {k: v.strip()
for k, v in iteritems(lpch2edf_fixed_len_labels)}
# print('LPCH_TO_STD_LABELS_STRIP::\n')
# pprint.pprint(LPCH_TO_STD_LABELS_STRIP)
LPCH_COMMON_1020_LABELS_to_EDF_STANDARD = {
}
def normalize_lpch_signal_label(label):
uplabel = label.upper()
if uplabel in LPCH_TO_STD_LABELS_STRIP:
return LPCH_TO_STD_LABELS_STRIP[uplabel]
else:
return label
def edf2h5_float32(fn, outfn='', hdf_dir='', anonymous=False):
"""
convert an edf file to hdf5 using a straighforward mapping
convert to real-valued signals store as float32's
justing getting started here
--- metadata ---
number_signals
sample_frequency
nsamples
age
signal_labels
Post Menstrual Age
"""
if not outfn:
base = os.path.basename(fn)
base, ext = os.path.splitext(base)
base = base + '.eeghdf5'
outfn = os.path.join(hdf_dir, base)
print('outfn:', outfn)
# outfn = fn+'.eeg.h5'
with edflib.EdfReader(fn) as ef:
nsigs = ef.signals_in_file
# again know/assume that this is uniform sampling across signals
fs = [ef.samplefrequency(ii) for ii in range(nsigs)]
fs0 = fs[0]
if any([ fs0 != xx for xx in fs]):
print("caught multiple sampling frquencies in edf files!!!")
sys.exit(0)
nsamples0 = ef.samples_in_file(0)
print('nsigs=%s, fs0=%s, nsamples0=%s' % (nsigs, fs0, nsamples0))
# create file 'w-' -> fail if exists , w -> truncate if exists
hdf = h5py.File(outfn, 'w')
# use compression? yes! give it a try
eegdata = hdf.create_dataset('eeg', (nsigs, nsamples0), dtype='float32',
# chunks=(nsigs,fs0),
chunks=True,
fletcher32=True,
# compression='gzip',
# compression='lzf',
# maxshape=(256,None)
)
# no compression -> 50 MiB can view eegdata in vitables
# compression='gzip' -> 27 MiB slower
# compression='lzf' -> 35 MiB
# compression='lzf' maxshape=(256,None) -> 36MiB
# szip is unavailable
patient = hdf.create_group('patient')
# add meta data
hdf.attrs['number_signals'] = nsigs
hdf.attrs['sample_frequency'] = fs0
hdf.attrs['nsamples0'] = nsamples0
patient.attrs['gender_b'] = ef.gender_b
patient.attrs['patientname'] = ef.patient_name # PHI
print('birthdate: %s' % ef.birthdate_b, type(ef.birthdate_b))
# this is a string -> date (datetime)
if not ef.birthdate_b:
print("no birthday in this file")
birthdate = None
else:
birthdate = dateutil.parser.parse(ef.birthdate_b)
print('birthdate (date object):', birthdate_b)
start_date_time = datetime.datetime(
ef.startdate_year,
ef.startdate_month,
ef.startdate_day,
ef.starttime_hour,
ef.starttime_minute,
ef.starttime_second) # ,tzinfo=dateutil.tz.tzlocal())
print(start_date_time)
if start_date_time and birthdate:
age = start_date_time - birthdate
print('age:', age)
else:
age = None
if age:
patient.attrs['post_natal_age_days'] = age.days
else:
patient.attrs['post_natal_age_days'] = -1
# now start storing the lists of things: labels, units...
# nsigs = len(label_list)
# variable ascii string (or b'' type)
str_dt = h5py.special_dtype(vlen=str)
label_ds = hdf.create_dataset('signal_labels', (nsigs,), dtype=str_dt)
units_ds = hdf.create_dataset('signal_units', (nsigs,), dtype=str_dt)
labels = []
units = list()
# signal_nsamples = []
for ii in range(nsigs):
labels.append(ef.signal_label(ii))
units.append(ef.physical_dimension(ii))
# self.signal_nsamples.append(self.cedf.samples_in_file(ii))
# self.samplefreqs.append(self.cedf.samplefrequency(ii))
# eegdata.signal_labels = labels
# labels are fixed length strings
labels_strip = [ss.strip() for ss in labels]
label_ds[:] = labels_strip
units_ds[:] = units
# should be more and a switch for anonymous or not
# need to change this to
nchunks = int(nsamples0 // fs0)
samples_per_chunk = int(fs0)
buf = np.zeros((nsigs, samples_per_chunk),
dtype='float64') # buffer is float64_t
print('nchunks: ', nchunks, 'samples_per_chunk:', samples_per_chunk)
bookmark = 0 # mark where were are in samples
for ii in range(nchunks):
for jj in range(nsigs):
# readsignal(self, signalnum, start, n,
# np.ndarray[np.float64_t, ndim = 1] sigbuf)
# read_phys_signal(chn, 0, nsamples[chn], v)
#read_phys_signal(self, signalnum, start, n, np.ndarray[np.float64_t, ndim=1] sigbuf)
print(ii,jj)
ef.read_phys_signal(jj, bookmark, samples_per_chunk, buf[jj]) # readsignal converts into float
# conversion from float64 to float32
eegdata[:, bookmark:bookmark + samples_per_chunk] = buf
# bookmark should be ii*fs0
bookmark += samples_per_chunk
left_over_samples = nsamples0 - nchunks * samples_per_chunk
print('left_over_samples:', left_over_samples)
if left_over_samples > 0:
for jj in range(nsigs):
ef.read_phys_signal(jj, bookmark, left_over_samples, buf[jj])
eegdata[:,
bookmark:bookmark + left_over_samples] = buf[:,
0:left_over_samples]
hdf.close()
def edf_block_iter_generator(
edf_file, nsamples, samples_per_chunk, dtype='int32'):
"""
factory to produce generators for iterating through an edf file and filling
up an array from the edf with the signal data starting at 0. You choose the
number of @samples_per_chunk, and number of samples to do in total
@nsamples as well as the dtype. 'int16' is reasonable as well 'int32' will
handle everything though
it yields -> (numpy_buffer, mark, num)
numpy_buffer,
mark, which is where in the file in total currently reading from
num -- which is the number of samples in the buffer (per signal) to transfer
"""
nchan = edf_file.signals_in_file
# 'int32' will work for int16 as well
buf = np.zeros((nchan, samples_per_chunk), dtype=dtype)
nchunks = nsamples // samples_per_chunk
left_over_samples = nsamples - nchunks * samples_per_chunk
mark = 0
for ii in range(nchunks):
for cc in range(nchan):
edf_file.read_digital_signal(cc, mark, samples_per_chunk, buf[cc])
yield (buf, mark, samples_per_chunk)
mark += samples_per_chunk
# print('mark:', mark)
# left overs
if left_over_samples > 0:
for cc in range(nchan):
edf_file.read_digital_signal(cc, mark, left_over_samples, buf[cc])
yield (buf[:, 0:left_over_samples], mark, left_over_samples)
def dig2phys(eeghdf, start, end, chstart, chend):
# edfhdr->edfparam[i].bitvalue = (edfhdr->edfparam[i].phys_max - edfhdr->edfparam[i].phys_min) / (edfhdr->edfparam[i].dig_max - edfhdr->edfparam[i].dig_min);
# edfhdr->edfparam[i].offset = edfhdr->edfparam[i].phys_max /
# edfhdr->edfparam[i].bitvalue - edfhdr->edfparam[i].dig_max;
dmins = eeghdf['signal_digital_mins'][:]
dmaxs = eeghdf['signal_digital_maxs'][:]
phys_maxs = eeghdf['signal_physical_maxs'][:]
phys_mins = eeghdf['signal_physical_mins'][:]
print('dmaxs:', repr(dmaxs))
print('dmins:', repr(dmins))
print('dmaxs[:] - dmins[:]', dmaxs - dmins)
print('phys_maxs', phys_maxs)
print('phys_mins', phys_mins)
bitvalues = (phys_maxs - phys_mins) / (dmaxs - dmins)
offsets = phys_maxs / bitvalues - dmaxs
print('bitvalues, offsets:', bitvalues, offsets)
print('now change their shape to column vectors')
for arr in (bitvalues, offsets):
if len(arr.shape) != 1:
print('logical errror %s shape is unexpected' % arr.shape)
raise Exception
s = arr.shape
arr.shape = (s[0], 1)
print('bitvalues, offsets:', bitvalues, offsets)
# buf[i] = phys_bitvalue * (phys_offset + (double)var.two_signed[0]);
dig_signal = eeghdf['signals'][chstart:chend, start:end]
# signal = bitvalues[chstart:chend] *(dig_signal[chstart:chend,:] + offsets[chstart:chend])
phys_signals = (dig_signal[:, start:end] + offsets) * bitvalues
# return signal, bitvalues, offsets
return phys_signals
# TODO: create edf -> hdf version 1000
# hdf -> edf for hdf version 1000
# tests to verify that round trip is lossless
# [] writing encoding of MRN
# [] and entry of mapped pt_code into database coe
def edf2hdf_oldhack(fn, outfn='', hdf_dir='', anonymous=False):
"""
convert an edf file to hdf5 using a straighforward mapping
justing getting started here
--- metadata ---
number_signals
sample_frequency
nsamples
age
signal_labels
Post Menstrual Age
"""
if not outfn:
base = os.path.basename(fn)
base, ext = os.path.splitext(base)
base = base + '.eeg.hdf'
outfn = os.path.join(hdf_dir, base)
print('outfn:', outfn)
# outfn = fn+'.eeg.h5'
with edflib.EdfReader(fn) as ef:
# all the data point related stuff
nsigs = ef.signals_in_file
# again know/assume that this is uniform sampling across signals
fs = [ef.samplefrequency(ii) for ii in range(nsigs)]
fs0 = fs[0]
print([ fs0 != xx for xx in fs])
if any([ fs0 != xx for xx in fs]):
print("caught multiple sampling frquencies in edf files!!!")
sys.exit(0)
nsamples0 = ef.samples_in_file(0)
print('nsigs=%s, fs0=%s, nsamples0=%s\n' % (nsigs, fs0, nsamples0))
num_samples_per_signal = ef.get_samples_per_signal() # np array
print("num_samples_per_signal::\n", repr(num_samples_per_signal), '\n')
file_duration_sec = ef.file_duration_seconds
print("file_duration_sec", repr(file_duration_sec))
signal_frequency_array = ef.get_signal_freqs()
print("signal_frequency_array::\n", repr(signal_frequency_array))
# Note that all annotations except the top row must also specify a duration.
# long long onset; /* onset time of the event, expressed in units of 100 nanoSeconds and relative to the starttime in the header */
# char duration[16]; /* duration time, this is a null-terminated ASCII text-string */
# char annotation[EDFLIB_MAX_ANNOTATION_LEN + 1]; /* description of the event in UTF-8, this is a null term string of max length 512
# start("x.y"), end, char[20]
# annotations = ef.read_annotations_as_array() # get numpy array of
# annotations
annotations = ef.read_annotations_100ns_units()
#print("annotations::\n")
#pprint.pprint(annotations) # get list of annotations
signal_text_labels = ef.get_signal_text_labels()
#print("signal_text_labels::\n")
#pprint.pprint(signal_text_labels)
#print("normalized text labels::\n")
signal_text_labels_lpch_normalized = [
normalize_lpch_signal_label(label) for label in signal_text_labels]
#pprint.pprint(signal_text_labels_lpch_normalized)
# ef.recording_additional
# print()
signal_digital_mins = np.array(
[ef.digital_min(ch) for ch in range(nsigs)])
signal_digital_total_min = min(signal_digital_mins)
print("digital mins:", repr(signal_digital_mins))
print("digital total min:", repr(signal_digital_total_min))
signal_digital_maxs = np.array(
[ef.digital_max(ch) for ch in range(nsigs)])
signal_digital_total_max = max(signal_digital_maxs)
print("digital maxs:", repr(signal_digital_maxs))
print("digital total max:", repr(signal_digital_total_max))
signal_physical_dims = [
ef.physical_dimension(ch) for ch in range(nsigs)]
print('signal_physical_dims::\n')
pprint.pprint(signal_physical_dims)
print()
signal_physical_maxs = np.array(
[ef.physical_max(ch) for ch in range(nsigs)])
print('signal_physical_maxs::\n', repr(signal_physical_maxs))
signal_physical_mins = np.array(
[ef.physical_min(ch) for ch in range(nsigs)])
print('signal_physical_mins::\n', repr(signal_physical_mins))
print('gender:', repr(ef.gender_b))
print('admincode:', repr(ef.admincode))
print('birthdate:', repr(ef.birthdate_b)) # this is a string
birthdate = dateutil.parser.parse(ef.birthdate_b)
print('birthdate as datetime:', birthdate)
print('equipment:', repr(ef.equipment))
print('patient:', repr(ef.patient))
print('patientname:', repr(ef.patient_name))
print('patientcode:', repr(ef.patientcode))
print('patient_additional:', repr(ef.patient_additional))
print('recording_additional:', repr(ef.recording_additional))
print('technician:', repr(ef.technician))
# or use arrow
start_date_time = datetime.datetime(
ef.startdate_year,
ef.startdate_month,
ef.startdate_day,
ef.starttime_hour,
ef.starttime_minute,
ef.starttime_second) # tz naive
# end_date_time = datetime.datetime(ef.enddate_year, ef.enddate_month, ef.enddate_day, ef.endtime_hour,
# ef.endtime_minute, ef.endtime_second) # tz naive
# end_date_time - start_date_time
duration = datetime.timedelta(seconds=ef.file_duration_seconds)
print('start_date_time:', start_date_time)
age = arrow.get(start_date_time) - arrow.get(birthdate)
# age = arrow.get(agedt)
print('predicted age:', age)
# total_seconds() returns a float
print('predicted age (seconds):', age.total_seconds())
print()
# this don't seem to be used much so I will put at end
signal_prefilters = [ef.prefilter(ch) for ch in range(nsigs)]
print('signal_prefilters::\n')
pprint.pprint(signal_prefilters)
print()
signal_transducer = [ef.transducer(ch) for ch in range(nsigs)]
print('signal_transducer::\n')
pprint.pprint(signal_transducer)
# now start building the hdf file
# create file 'w-' -> fail if exists , w -> truncate if exists
hdf = h5py.File(outfn, 'w')
# use compression? yes! give it a try
# integer increasing starting at 1000 semantic change at each thousand
hdf.attrs['eeghdf_version'] = 1000
hdf.attrs['signals_in_file'] = nsigs
hdf.attrs['sample_frequency0'] = fs0
hdf.attrs['nsamples0'] = nsamples0
sample_frequencies = hdf.create_dataset(
'sample_frequencies', (nsigs,), dtype='float32')
sample_frequencies[:] = signal_frequency_array
# add phys_bitvalue = .bitvalue, phys_offset = .offset
# (double) phys_value = phys_bitvalue*(phys_offset + (double) var.two_signed[0])
# edfhdr->edfparam[i].bitvalue = (edfhdr->edfparam[i].phys_max - edfhdr->edfparam[i].phys_min) / (edfhdr->edfparam[i].dig_max - edfhdr->edfparam[i].dig_min);
# edfhdr->edfparam[i].offset = edfhdr->edfparam[i].phys_max /
# edfhdr->edfparam[i].bitvalue - edfhdr->edfparam[i].dig_max;
# add meta data
#
start_date_time = datetime.datetime(ef.startdate_year, ef.startdate_month, ef.startdate_day,
ef.starttime_hour,
ef.starttime_minute,
ef.starttime_second) # ,tzinfo=dateutil.tz.tzlocal())
print(start_date_time)
patient = hdf.create_group('patient')
patient.attrs['gender'] = ef.gender_b
patient.attrs['patientname'] = "" # ef.patient_name # PHI
print('birthdate: %s' % ef.birthdate_b, type(ef.birthdate_b))
default_birthdate = datetime.datetime(year=1990, month=1, day=1)
# birthdate = dateutil.parser.parse(ef.birthdate) # this is a string
# -> date (datetime)
birthdate = default_birthdate
print('birthdate (date object):', birthdate)
private_start_date_time = birthdate + age
patient.attrs['birthdate'] = str(birthdate)
# float number age in seconds
patient.attrs['age_seconds'] = age.total_seconds()
# gestational age at birth (in seconds)
# datetime.timedelta(weeks=40).total_seconds() # default 24192000 seconds or 40 weeks, 280 days
# could also call this post-conceptional-age PCA
patient.attrs['gestatational_age_birth_seconds'] = datetime.timedelta(
weeks=40).total_seconds()
patient.attrs['born_premature'] = 'unknown' # ('unknown', True, False)
# hide actual start/end times question: should vary by year or just
# make all the same
hdf.attrs['startdatetime'] = str(private_start_date_time)
hdf.attrs['enddatetime'] = str(private_start_date_time + duration)
patient.attrs['age_days'] = age.days # post natal age in days
patient.attrs['age_seconds'] = age.total_seconds()
# now start storing the lists of things: labels, units...
# nsigs = len(label_list)
# 1. keep the text-vs-bytes distinction clear
# 2. alays use "bytes" instead of "str" when you're sure you want a byte string.
# for literals, can use "b" prefix, e.g. b'some bytes'
# 3. for text strings use str or btter yet unicode, u'Hello'
# 4. always use UTF-8 in code
# variable ascii string (or b'' type)
str_dt = h5py.special_dtype(vlen=bytes)
label_ds = hdf.create_dataset('signal_labels', (nsigs,), dtype=str_dt)
units_ds = hdf.create_dataset(
'physical_dimensions', (nsigs,), dtype=str_dt)
transducer_ds = hdf.create_dataset(
'transducer', (nsigs,), dtype=str_dt)
prefilter_ds = hdf.create_dataset('prefilter', (nsigs,), dtype=str_dt)
hdf['signal_physical_mins'] = signal_physical_mins
hdf['signal_physical_maxs'] = signal_physical_maxs
hdf['signal_digital_mins'] = signal_digital_mins
hdf['signal_digital_maxs'] = signal_digital_maxs
if all(signal_digital_maxs <= 32767) and all(
signal_digital_mins >= -32768):
number_bits = 16 # EDF
else:
number_bits = 24 # BDF 2^23 = 8388608 + 1 bit for sign
hdf.attrs['number_bits_per_sample'] = number_bits
if number_bits <= 16:
data_dtype = 'int16'
eegdata = hdf.create_dataset('signals', (nsigs, nsamples0), dtype=data_dtype,
# chunks=(nsigs,fs0), # if wanted 1
# second chunks
chunks=True,
fletcher32=True,
compression='gzip' # most universal
# compression='gzip',
# compression='lzf',
# maxshape=(256,None)
)
if number_bits <= 32 and number_bits > 16: # handles up to 32
data_dtype = 'int32'
eegdata = hdf.create_dataset('signals', (nsigs, nsamples0), dtype=data_dtype,
# chunks=(nsigs,fs0), # if wanted 1
# second chunks
chunks=True,
fletcher32=True,
compression='gzip' # most universal
# compression='gzip',
# compression='lzf',
# maxshape=(256,None)
)
# no compression -> 50 MiB can view eegdata in vitables
# compression='gzip' -> 27 MiB slower
# compression='lzf' -> 35 MiB
# compression='lzf' maxshape=(256,None) -> 36MiB
# this works but can do another way:
# labels = []
units = list()
# signal_nsamples = []
for ii in range(nsigs):
# labels.append(ef.signal_label(ii))
units.append(ef.physical_dimension(ii))
# self.signal_nsamples.append(self.cedf.samples_in_file(ii))
# self.samplefreqs.append(self.cedf.samplefrequency(ii))
# eegdata.signal_labels = labels
# labels_strip = [ss.strip() for ss in labels] # labels are fixed
# length strings
units = [cc.strip() for cc in units]
# converted to standard electrode names if possible
label_ds[:] = signal_text_labels_lpch_normalized
units_ds[:] = units
transducer_ds[:] = signal_transducer
prefilter_ds[:] = signal_prefilters
num_annot = len(annotations)
# how do I make sure this init is "long long" enough
edf_annots = hdf.create_group('edf_annotations')
starts = edf_annots.create_dataset(
'starts_100ns', (num_annot,), dtype=np.int64)
# curiously these durations seem to be stored as strings but of
# floating point values "5.00000" for 5 second duration
durations = edf_annots.create_dataset(
'durations_char16', (num_annot,), dtype='S16') # S16 !!! check py3 compatibility
texts = edf_annots.create_dataset('texts', (num_annot,), dtype=str_dt)
# start with a loop
for ii in range(num_annot):
starts[ii] = annotations[ii][0]
# note: so far I have ony seen type(annotations[ii][1] -> <type 'str'> and they look like ascii strings
# of floating point number of seconds for a duration
# print('type(annotations[ii][1]):', type(annotations[ii][1]))
durations[ii] = annotations[ii][1]
texts[ii] = annotations[ii][2].strip()
# should be more and a switch for anonymous or not
# need to change this to
nchunks = int(nsamples0 // fs0)
samples_per_chunk = int(fs0) # 1 second of samples
buf = np.zeros((nsigs, samples_per_chunk), dtype='int32')
print(
'nchunks:%s, samples_per_chunk: %s' %
(nchunks, samples_per_chunk))
bookmark = 0 # mark where were are in samples
for ii in range(nchunks):
for jj in range(nsigs):
# read_phys_signal(self, signalnum, start, n,
# np.ndarray[np.float64_t, ndim = 1] sigbuf)
# readsignal converts into int32 as necessary
ef.read_digital_signal(
jj, bookmark, samples_per_chunk, buf[jj])
# conversion from int32 to int16 as necessary
eegdata[:, bookmark:bookmark + samples_per_chunk] = buf
# bookmark should be ii*fs0
bookmark += samples_per_chunk
left_over_samples = nsamples0 - nchunks * samples_per_chunk
print('left_over_samples:', left_over_samples)
if left_over_samples > 0:
for jj in range(nsigs):
ef.read_digital_signal(
jj, bookmark, left_over_samples, buf[jj])
eegdata[:,bookmark:bookmark + left_over_samples] = buf[:,0:left_over_samples]
hdf.close()
# from trackingdb.models.nkdb import find_lpch_birthday_from_mrn
# Plan
# v = ValidateTrackHeader(header=h)
# if v.is_valid():
# process(v.cleaned_data)
# else:
# mark_as_invalid(h)
def first(mapping):
if mapping:
return mapping[0]
else:
return mapping # say mapping = [] or None
class ValidateTrackHeaderLPCH:
# after validated place all data in cleaned_data field
def __init__(self, header):
# TOOO: validate that databae_source_label is in accepted sources
self.hdr = header.copy()
self.validated = False
# self.clean = False
self.cleaned_data = {} # vs update/copy from header
def is_valid(self):
# if name contains "Test" then we should skip this file and log it
mrnobj = None
try:
if name_is_test(self.hdr['patient_name']):
raise ValidationError('test file encountered', code='test file', params=self.hdr)
# if we have a valid mrn, then we can potentially look up the patient or even the study
mrn_ok = valid_lpch_mrn(self.hdr['patientcode'])
if mrn_ok:
mrn = self.hdr['patientcode'].strip()
self.cleaned_data['patientcode'] = mrn
else:
raise ValidationError('bad MRN', code='bad mrn', params=self.hdr['patientcode'])
if valid_lpch_name(self.hdr['patient_name']):
self.cleaned_data['patient_name'] = self.hdr['patient_name'].strip()
else:
if mrn_ok: # try to look up patient in databases
# look up name, dob here based upon mrn in nk_db and/or epic_db
mrnobj = models.NkMrn.query.filter_by(mrn=mrn).first()
if mrnobj:
self.cleaned_data['patient_name'] = mrnobj.nkpatient.name
else:
raise ValidationError('invalid patient name', 'invalid name',
params=self.hdr)
eegno_ok = valid_lpch_eegno(self.hdr['admincode'])
if eegno_ok:
self.cleaned_data['admincode'] = _csu(self.hdr['admincode'])
else:
raise ValidationError('bad eegno/admincode', code='invalid admincode', params=self.hdr)
if self.hdr['birthdate_date']:
self.cleaned_data['birthdate_date'] = self.hdr['birthdate_date']
else:
# then couldn't make a date, see if can find birthday in database
if mrn_ok:
mrnobj = mrnobj if mrnobj else models.NkMrn.query.filter_by(mrn=mrn).first()
if not mrnobj:
raise ValidationError('bad birthdate_date','birthdate error', params=self.hdr)
else:
nbday = mrnobj.nkpatient.dob
self.cleaned_data['birthdate_date'] = nbday
else:
raise ValidationError('bad birthday','birthday error', params=self.hdr)
# copy over other header members
# todo: should do more validation of 'gender'
self.cleaned_data['gender'] = self.hdr['gender']
self.cleaned_data['file_name'] = self.hdr['file_name']
self.cleaned_data['filetype'] = self.hdr['filetype']
self.cleaned_data['signals_in_file'] = self.hdr['signals_in_file']
self.cleaned_data['datarecords_in_file'] = self.hdr['datarecords_in_file']
self.cleaned_data['file_duration_100ns'] = self.hdr['file_duration_100ns']
self.cleaned_data['file_duration_seconds'] = self.hdr['file_duration_seconds']
self.cleaned_data['startdate_date'] = self.hdr['startdate_date']
self.cleaned_data['start_datetime'] = self.hdr['start_datetime']
self.cleaned_data['starttime_subsecond_offset'] = self.hdr['starttime_subsecond_offset']
self.cleaned_data['patient_additional'] = self.hdr['patient_additional'].strip()
self.cleaned_data['technician'] = self.hdr['technician'].strip()
self.cleaned_data['equipment'] = self.hdr['equipment'].strip()
self.cleaned_data['recording_additional'] = self.hdr['recording_additional'].strip()
self.cleaned_data['datarecord_duration_100ns'] = self.hdr['datarecord_duration_100ns']
self.validated = True
return True
except ValidationError as ve:
self.errors = ve.message
self.error_code = ve.code
self.error_params = ve.params
debug(ve.message)
return False
class AnonymizeTrackHeaderLPCH(ValidateTrackHeaderLPCH):
LPCH_DEFAULT_BIRTH_DATETIME = datetime.datetime(year=1990, month=1, day=1)
# datatbase sources
LPCH_NK = 'LPCH_NK'
STANFORD_NK = 'STANFORD_NK'
def __init__(self, header, source_database_label=LPCH_NK):
super().__init__(header)
with app.app_context():
self.anonymous_header = models.register_and_create_anonymous_header(self.hdr, source_database_label=source_database_label)
# will need to track: patient, study, file
# file needs source and key NK origin
class ValidateTrackHeaderStanford:
# after validated place all data in cleaned_data field
def __init__(self, header):
# TOOO: validate that databae_source_label is in accepted sources
self.hdr = header.copy()
self.validated = False
# self.clean = False
self.cleaned_data = {} # vs update/copy from header
def is_valid(self):
# if name contains "Test" then we should skip this file and log it
mrnobj = None
try:
if name_is_test(self.hdr['patient_name']):
raise ValidationError('test file encountered', code='test file', params=self.hdr)
# if we have a valid mrn, then we can potentially look up the patient or even the study
mrn_ok = valid_stanford_mrn(self.hdr['patientcode'])
if mrn_ok:
mrn = self.hdr['patientcode'].strip()
self.cleaned_data['patientcode'] = mrn
else:
raise ValidationError('bad MRN', code='bad mrn', params=self.hdr['patientcode'])
if valid_stanford_name(self.hdr['patient_name']):
self.cleaned_data['patient_name'] = self.hdr['patient_name'].strip()
else:
if mrn_ok: # try to look up patient in databases
# look up name, dob here based upon mrn in nk_db and/or epic_db
mrnobj = models.NkMrn.query.filter_by(mrn=mrn).first()
if mrnobj:
self.cleaned_data['patient_name'] = mrnobj.nkpatient.name
else:
raise ValidationError('invalid patient name', 'invalid name',
params=self.hdr)
eegno_ok = valid_stanford_eegno(self.hdr['admincode'])
if eegno_ok:
self.cleaned_data['admincode'] = _csu(self.hdr['admincode'])
else:
raise ValidationError('bad eegno/admincode', code='invalid admincode', params=self.hdr)
if self.hdr['birthdate_date']:
self.cleaned_data['birthdate_date'] = self.hdr['birthdate_date']
else:
# then couldn't make a date, see if can find birthday in database
if mrn_ok:
mrnobj = mrnobj if mrnobj else models.NkMrn.query.filter_by(mrn=mrn).first()
if not mrnobj:
raise ValidationError('bad birthdate_date','birthdate error', params=self.hdr)
else:
nbday = mrnobj.nkpatient.dob
self.cleaned_data['birthdate_date'] = nbday
else:
raise ValidationError('bad birthday','birthday error', params=self.hdr)
# copy over other header members
# todo: should do more validation of 'gender'
self.cleaned_data['gender'] = self.hdr['gender']
self.cleaned_data['file_name'] = self.hdr['file_name']
self.cleaned_data['filetype'] = self.hdr['filetype']
self.cleaned_data['signals_in_file'] = self.hdr['signals_in_file']
self.cleaned_data['datarecords_in_file'] = self.hdr['datarecords_in_file']
self.cleaned_data['file_duration_100ns'] = self.hdr['file_duration_100ns']
self.cleaned_data['file_duration_seconds'] = self.hdr['file_duration_seconds']
self.cleaned_data['startdate_date'] = self.hdr['startdate_date']
self.cleaned_data['start_datetime'] = self.hdr['start_datetime']
self.cleaned_data['starttime_subsecond_offset'] = self.hdr['starttime_subsecond_offset']
self.cleaned_data['patient_additional'] = self.hdr['patient_additional'].strip()
self.cleaned_data['technician'] = self.hdr['technician'].strip()
self.cleaned_data['equipment'] = self.hdr['equipment'].strip()
self.cleaned_data['recording_additional'] = self.hdr['recording_additional'].strip()
self.cleaned_data['datarecord_duration_100ns'] = self.hdr['datarecord_duration_100ns']
self.validated = True
return True
except ValidationError as ve:
self.errors = ve.message
self.error_code = ve.code
self.error_params = ve.params
debug(ve.message)
return False
class AnonymizeTrackHeaderStanford(ValidateTrackHeaderStanford):
STANFORD_DEFAULT_BIRTH_DATETIME = datetime.datetime(year=1910, month=1, day=1)
# datatbase sources
LPCH_NK = 'LPCH_NK'
STANFORD_NK = 'STANFORD_NK'
def __init__(self, header, source_database_label='STANFORD_NK'):
super().__init__(header)
with app.app_context():
self.anonymous_header = models.register_and_create_anonymous_header(self.hdr, source_database_label=source_database_label)
# will need to track: patient, study, file
# file needs source and key NK origin
def find_blocks(arr):
blocks = []
print("total arr:", arr)
dfs = np.diff(arr)
dfs_ind = np.where(dfs != 0.0)[0]
last_ind = 0
for dd in dfs_ind+1:
print("block:",arr[last_ind:dd])
blocks.append((last_ind,dd))
last_ind = dd
print("last block:", arr[last_ind:])
blocks.append( (last_ind,len(arr)))
return blocks
def find_blocks2(arr):
blocks = []
N = len(arr)
print("total arr:", arr)
last_ind = 0
last_val = arr[0]
for ii in range(1,N):
if last_val == arr[ii]:
pass
else:
blocks.append((last_ind,ii))
last_ind = ii
last_val = arr[ii]
blocks.append((last_ind,N))
return blocks
def test_find_blocks1():
s = [250.0, 250.0, 250.0, 1.0, 1.0, 1000.0, 1000.0]
blocks = find_blocks(s)
print("blocks:")
print(blocks)
def test_find_blocks2():
s = [250.0, 250.0, 250.0, 1.0, 1.0, 1000.0, 1000.0]
blocks = find_blocks2(s)
print("blocks:")
print(blocks)
def test_find_blocks2_2():
s = [100,100,100,100,100,100,100,100]
blocks = find_blocks2(s)
print("blocks:")
print(blocks)
def edf2hdf2(fn, outfn='', hdf_dir='', anonymize=False):
"""
convert an edf file to hdf5 using fairly straightforward mapping
return True if successful
@database_sourcel_label tells us which database it came from LPCH_NK or STANFORD_NK
this is important!
"""
if not outfn:
base = os.path.basename(fn)
base, ext = os.path.splitext(base)
base = base + '.eeghdf'
outfn = os.path.join(hdf_dir, base)
# print('outfn:', outfn)
# all the data point related stuff
with edflib.EdfReader(fn) as ef:
# read all EDF+ header information in just the way I want it
header = {
'file_name': os.path.basename(fn),
'filetype': ef.filetype,
'patient_name': ef.patient_name,
'patientcode': ef.patientcode,
'gender': ef.gender,
'signals_in_file': ef.signals_in_file,
'datarecords_in_file': ef.datarecords_in_file,
'file_duration_100ns': ef.file_duration_100ns,
'file_duration_seconds': ef.file_duration_seconds,
'startdate_date': datetime.date(ef.startdate_year, ef.startdate_month, ef.startdate_day),
'start_datetime': datetime.datetime(ef.startdate_year, ef.startdate_month, ef.startdate_day,
ef.starttime_hour, ef.starttime_minute, ef.starttime_second),
'starttime_subsecond_offset': ef.starttime_subsecond,
'birthdate_date': ef.birthdate_date,
'patient_additional': ef.patient_additional,
'admincode': ef.admincode, # usually the study eg. C13-100
'technician': ef.technician,
'equipment': ef.equipment,
'recording_additional': ef.recording_additional,
'datarecord_duration_100ns': ef.datarecord_duration_100ns,
}
pprint.pprint(header)
#### validation code #####
validator = None
# if source_database_label=='LPCH_NK':
# validator = ValidateTrackHeaderLPCH(header=header)
# elif source_database_label== 'STANFORD_NK':
# validator = ValidateTrackHeaderStanford(header=header)
# else:
# raise ValidationError
# if not validator.is_valid():
# print('problem with this file:', fn)
# print(validator.errors,validator.error_code,
# validator.error_params)
# return False, validator
# else:
# print('\nvalid header::')
# pprint.pprint(validator.cleaned_data)
# header = validator.cleaned_data
# from here on the header is valid and cleaned
# use arrow
start_datetime = header['start_datetime']
# end_date_time = datetime.datetime(ef.enddate_year, ef.enddate_month, ef.enddate_day, ef.endtime_hour,
# ef.endtime_minute, ef.endtime_second) # tz naive
# end_date_time - start_date_time
duration = datetime.timedelta(seconds=header['file_duration_seconds'])
# derived information
birthdate = header['birthdate_date']
if birthdate:
age = arrow.get(start_datetime) - arrow.get(header['birthdate_date'])
debug('predicted age: %s' % age)
# total_seconds() returns a float
debug('predicted age (seconds): %s' % age.total_seconds())
else:
age = datetime.timedelta(seconds=0)
# if anonymize:
# if source_database_label== 'LPCH_NK':
# anonymizer = AnonymizeTrackHeaderLPCH(header, source_database_label=source_database_label)
# if source_database_label == 'STANFORD_NK':
# anonymizer = AnonymizeTrackHeaderStanford(header, source_database_label=source_database_label)
# header = anonymizer.anonymous_header # replace the original header with the anonymous one
# print('anonymized header')
# pprint.pprint(header)
# anonymized version if necessary
header['end_datetime'] = header['start_datetime'] + duration
############# signal array information ##################
# signal block related stuff
nsigs = ef.signals_in_file
# again know/assume that this is uniform sampling across signals
fs0 = ef.samplefrequency(0)
signal_frequency_array = ef.get_signal_freqs()
dfs = np.diff(signal_frequency_array)
dfs_ind = np.where(dfs != 0.0)
dfs_ind = dfs_ind[0]
last_ind = 0
for dd in dfs_ind+1:
print("block:",signal_frequency_array[last_ind:dd])
last_ind = dd
print("last block:", signal_frequency_array[last_ind:])
print("where does sampling rate change?", np.where(dfs != 0.0))
print("elements:", signal_frequency_array[np.where(dfs != 0.0)])
print("signal_frequency_array::\n", repr(signal_frequency_array))
print("len(signal_frequency_array):", len(signal_frequency_array))
assert all(signal_frequency_array[:-3] == fs0)
nsamples0 = ef.samples_in_file(0) # samples per channel
print('nsigs=%s, fs0=%s, nsamples0=%s\n' % (nsigs, fs0, nsamples0))
num_samples_per_signal = ef.get_samples_per_signal() # np array
print("num_samples_per_signal::\n", repr(num_samples_per_signal), '\n')
# assert all(num_samples_per_signal == nsamples0)
file_duration_sec = ef.file_duration_seconds
#print("file_duration_sec", repr(file_duration_sec))
# Note that all annotations except the top row must also specify a duration.
# long long onset; /* onset time of the event, expressed in units of 100
# nanoSeconds and relative to the starttime in the header */
# char duration[16]; /* duration time, this is a null-terminated ASCII text-string */
# char annotation[EDFLIB_MAX_ANNOTATION_LEN + 1]; /* description of the
# event in UTF-8, this is a null term string of max length 512*/
# start("x.y"), end, char[20]
# annotations = ef.read_annotations_as_array() # get numpy array of
# annotations
annotations_b = ef.read_annotations_b_100ns_units()
# print("annotations_b::\n")
# pprint.pprint(annotations_b) # get list of annotations
signal_text_labels = ef.get_signal_text_labels()
print("signal_text_labels::\n")
pprint.pprint(signal_text_labels)
print("normalized text labels::\n")
signal_text_labels_lpch_normalized = [
normalize_lpch_signal_label(label) for label in signal_text_labels]
pprint.pprint(signal_text_labels_lpch_normalized)
# ef.recording_additional
# print()
signal_digital_mins = np.array(
[ef.digital_min(ch) for ch in range(nsigs)])
signal_digital_total_min = min(signal_digital_mins)
print("digital mins:", repr(signal_digital_mins))
print("digital total min:", repr(signal_digital_total_min))
signal_digital_maxs = np.array(
[ef.digital_max(ch) for ch in range(nsigs)])
signal_digital_total_max = max(signal_digital_maxs)
print("digital maxs:", repr(signal_digital_maxs))
#print("digital total max:", repr(signal_digital_total_max))
signal_physical_dims = [
ef.physical_dimension(ch) for ch in range(nsigs)]
# print('signal_physical_dims::\n')
# pprint.pprint(signal_physical_dims)
#print()
signal_physical_maxs = np.array(
[ef.physical_max(ch) for ch in range(nsigs)])
#print('signal_physical_maxs::\n', repr(signal_physical_maxs))
signal_physical_mins = np.array(
[ef.physical_min(ch) for ch in range(nsigs)])
#print('signal_physical_mins::\n', repr(signal_physical_mins))
# this don't seem to be used much so I will put at end
signal_prefilters = [ef.prefilter(ch).strip() for ch in range(nsigs)]
#print('signal_prefilters::\n')
# pprint.pprint(signal_prefilters)
#print()
signal_transducers = [ef.transducer(ch).strip() for ch in range(nsigs)]
#print('signal_transducers::\n')
#pprint.pprint(signal_transducers)
with eeghdf.EEGHDFWriter(outfn, 'w') as eegf:
eegf.write_patient_info(patient_name=header['patient_name'],
patientcode=header['patientcode'],
gender=header['gender'],
birthdate_isostring=header['birthdate_date'],
# gestational_age_at_birth_days
# born_premature
patient_additional=header['patient_additional'])
signal_text_labels_lpch_normalized = [
normalize_lpch_signal_label(label) for label in signal_text_labels]
rec = eegf.create_record_block(record_duration_seconds=header['file_duration_seconds'],
start_isodatetime=str(header['start_datetime']),
end_isodatetime=str(header['end_datetime']),
number_channels=header['signals_in_file'],
num_samples_per_channel=nsamples0,
sample_frequency=fs0,
signal_labels=signal_text_labels_lpch_normalized,
signal_physical_mins=signal_physical_mins,
signal_physical_maxs=signal_physical_maxs,
signal_digital_mins=signal_digital_mins,
signal_digital_maxs=signal_digital_maxs,
physical_dimensions=signal_physical_dims,
patient_age_days=age.total_seconds() / 86400.0,
signal_prefilters=signal_prefilters,
signal_transducers=signal_transducers,
technician=header['technician'])
eegf.write_annotations_b(annotations_b) # may be should be called record annotations
edfblock_itr = edf_block_iter_generator(
ef,
nsamples0,
100 * ef.samples_in_datarecord(0)*header['signals_in_file'], # samples_per_chunk roughly 100 datarecords at a time
dtype='int32')
signals = eegf.stream_dig_signal_to_record_block(rec, edfblock_itr)
return True, validator # we succeeded
def test_edf2hdf_info():
# on chris's macbook
EDF_DIR = r'/Users/clee/code/eegml/nk_database_proj/private/lpch_edfs'
fn = os.path.join(EDF_DIR, 'XA2731AX_1-1+.edf')
edf2hdf(filename)
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
file_name = sys.argv[1]
edf2hdf2(file_name)
|
bsd-3-clause
|
mikewiebe-ansible/ansible
|
lib/ansible/modules/cloud/vultr/vultr_dns_domain_info.py
|
14
|
2916
|
#!/usr/bin/python
#
# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_dns_domain_info
short_description: Gather information about the Vultr DNS domains available.
description:
- Gather information about DNS domains available in Vultr.
version_added: "2.9"
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr DNS domains information
local_action:
module: vultr_dns_domains_info
register: result
- name: Print the gathered information
debug:
var: result.vultr_dns_domain_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_dns_domain_info:
description: Response from Vultr API
returned: success
type: complex
sample:
"vultr_dns_domain_info": [
{
"date_created": "2018-07-19 07:14:21",
"domain": "ansibletest.com"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrDnsDomainInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrDnsDomainInfo, self).__init__(module, "vultr_dns_domain_info")
self.returns = {
"date_created": dict(),
"domain": dict(),
}
def get_domains(self):
return self.api_query(path="/v1/dns/list")
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
domain_info = AnsibleVultrDnsDomainInfo(module)
result = domain_info.get_result(domain_info.get_domains())
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
OpenCorpora/opencorpora
|
scripts/validators/year_valid.py
|
2
|
3240
|
# -*- coding: utf-8 -*-
import ConfigParser
import MySQLdb
import sys
import re
path = sys.argv[1]
cp = ConfigParser.ConfigParser()
cp.read(path)
hostname = cp.get ('mysql', 'host')
dbname = cp.get ('mysql', 'dbname')
username = cp.get ('mysql', 'user')
password = cp.get ('mysql', 'passwd')
db = MySQLdb.connect( host = hostname, user = username, passwd = password, db = dbname, use_unicode=True)
c=db.cursor()
c.execute("SET NAMES utf8")
c.execute("SELECT * FROM book_tags")
data = c.fetchall()
n = len(c.description)
string = ""
c.execute("DELETE FROM tag_errors WHERE error_type IN (1, 2)")
for element in data:
list = []
c.execute("SELECT * from book_tags WHERE book_id=%d" % (element[0]))
f = c.fetchall()
for el in f:
list = list + [el[1]]
#// print list
m = 0
for l in list:
pattern = 'Год:\d+'
match = re.search(pattern, l)
if match is None:
pass
else:
m = 1
if m == 0:
#print element[0]
c.execute("DELETE FROM tag_errors WHERE error_type=1 AND book_id=%d" % (element[0]))
sql = """INSERT INTO tag_errors(book_id, tag_name, error_type) VALUES (%d, '%s', %d)""" % (element[0], 'no year_tag', 1)
c.execute(sql)
for element in data:
i = 1
while i< n :
pattern = 'Год:\d+'
pattern1 = 'Дата:.*'
match = re.search(pattern, element[i])
match1 = re.search(pattern1, element[i])
if match is None:
pass
if match1 is None:
pass
if match is not None:
year = element[i].split(":")[1]
try:
if int(year) < 1800 or int(year)>2015:
sql = """INSERT INTO tag_errors(book_id, tag_name, error_type) VALUES(%d, '%s', %d)""" % (element[0], element[i], 1)
c.execute(sql)
else:
pass
except:
sql = """INSERT INTO tag_errors(book_id, tag_name, error_type) VALUES(%d, '%s', %d)""" % (element[0], element[i], 1)
c.execute(sql)
if match1 is not None:
date = element[i].split(":")[1]
try:
pat = '(\d{2})'
day = date.split("/")[0]
month = date.split("/")[1]
mat = re.search(pat, day)
mat1 = re.search(pat, month)
sql = """INSERT INTO tag_errors(book_id, tag_name, error_type) VALUES (%d, '%s', %d)""" % (element[0], element[i], 2)
if mat is None or mat1 is None:
c.execute(sql)
if int(month)>12 or int(day)>31:
c.execute(sql)
if int(month) in [ 4, 6, 9, 11] and int(day) == 31:
c.execute(sql)
if int(month) == 2 and int(day) > 28:
c.execute(sql)
else:
pass
except:
pass
i = i + 1
db.commit()
c.execute("SELECT * from tag_errors")
d = c.fetchall()
#print d
#print c.description
db.close()
|
gpl-2.0
|
tragiclifestories/django
|
django/contrib/contenttypes/migrations/0001_initial.py
|
585
|
1227
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.contenttypes.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100, verbose_name='python model class name')),
],
options={
'ordering': ('name',),
'db_table': 'django_content_type',
'verbose_name': 'content type',
'verbose_name_plural': 'content types',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.contenttypes.models.ContentTypeManager()),
],
),
migrations.AlterUniqueTogether(
name='contenttype',
unique_together=set([('app_label', 'model')]),
),
]
|
bsd-3-clause
|
tsdmgz/ansible
|
lib/ansible/module_utils/facts/network/freebsd.py
|
232
|
1190
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class FreeBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class FreeBSDNetworkCollector(NetworkCollector):
_fact_class = FreeBSDNetwork
_platform = 'FreeBSD'
|
gpl-3.0
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/azure_reachability_report_py3.py
|
1
|
1995
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureReachabilityReport(Model):
"""Azure reachability report details.
All required parameters must be populated in order to send to Azure.
:param aggregation_level: Required. The aggregation level of Azure
reachability report. Can be Country, State or City.
:type aggregation_level: str
:param provider_location: Required.
:type provider_location:
~azure.mgmt.network.v2017_11_01.models.AzureReachabilityReportLocation
:param reachability_report: Required. List of Azure reachability report
items.
:type reachability_report:
list[~azure.mgmt.network.v2017_11_01.models.AzureReachabilityReportItem]
"""
_validation = {
'aggregation_level': {'required': True},
'provider_location': {'required': True},
'reachability_report': {'required': True},
}
_attribute_map = {
'aggregation_level': {'key': 'aggregationLevel', 'type': 'str'},
'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'},
'reachability_report': {'key': 'reachabilityReport', 'type': '[AzureReachabilityReportItem]'},
}
def __init__(self, *, aggregation_level: str, provider_location, reachability_report, **kwargs) -> None:
super(AzureReachabilityReport, self).__init__(**kwargs)
self.aggregation_level = aggregation_level
self.provider_location = provider_location
self.reachability_report = reachability_report
|
mit
|
jholloman/node-gyp
|
gyp/test/generator-output/gyptest-rules.py
|
74
|
1786
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using rules.
"""
import TestGyp
# Ninja and Android don't support --generator-output.
test = TestGyp.TestGyp(formats=['!ninja', '!android'])
test.writable(test.workpath('rules'), False)
test.run_gyp('rules.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='rules')
test.writable(test.workpath('rules'), True)
test.relocate('rules', 'relocate/rules')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/rules'), False)
test.writable(test.workpath('relocate/rules/build'), True)
test.writable(test.workpath('relocate/rules/subdir1/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/build'), True)
test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from function1.in1
Hello from function2.in1
Hello from define3.in0
Hello from define4.in0
"""
if test.format == 'xcode':
chdir = 'relocate/rules/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/rules/subdir2/rules-out/file1.out',
"Hello from file1.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file2.out',
"Hello from file2.in0\n")
test.must_match('relocate/rules/subdir2/rules-out/file3.out',
"Hello from file3.in1\n")
test.must_match('relocate/rules/subdir2/rules-out/file4.out',
"Hello from file4.in1\n")
test.pass_test()
|
mit
|
mtanski/samba
|
buildtools/wafadmin/Constants.py
|
16
|
1308
|
#!/usr/bin/env python
# encoding: utf-8
# Yinon dot me gmail 2008
"""
these constants are somewhat public, try not to mess them
maintainer: the version number is updated from the top-level wscript file
"""
# do not touch these three lines, they are updated automatically
HEXVERSION=0x105019
WAFVERSION="1.5.19"
WAFREVISION = "9709M"
ABI = 7
# permissions
O644 = 420
O755 = 493
MAXJOBS = 99999999
CACHE_DIR = 'c4che'
CACHE_SUFFIX = '.cache.py'
DBFILE = '.wafpickle-%d' % ABI
WSCRIPT_FILE = 'wscript'
WSCRIPT_BUILD_FILE = 'wscript_build'
WAF_CONFIG_LOG = 'config.log'
WAF_CONFIG_H = 'config.h'
SIG_NIL = 'iluvcuteoverload'
VARIANT = '_VARIANT_'
DEFAULT = 'default'
SRCDIR = 'srcdir'
BLDDIR = 'blddir'
APPNAME = 'APPNAME'
VERSION = 'VERSION'
DEFINES = 'defines'
UNDEFINED = ()
BREAK = "break"
CONTINUE = "continue"
# task scheduler options
JOBCONTROL = "JOBCONTROL"
MAXPARALLEL = "MAXPARALLEL"
NORMAL = "NORMAL"
# task state
NOT_RUN = 0
MISSING = 1
CRASHED = 2
EXCEPTION = 3
SKIPPED = 8
SUCCESS = 9
ASK_LATER = -1
SKIP_ME = -2
RUN_ME = -3
LOG_FORMAT = "%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s"
HOUR_FORMAT = "%H:%M:%S"
TEST_OK = True
CFG_FILES = 'cfg_files'
# positive '->' install
# negative '<-' uninstall
INSTALL = 1337
UNINSTALL = -1337
|
gpl-3.0
|
syci/OCB
|
addons/bus/models/bus_presence.py
|
12
|
4499
|
# -*- coding: utf-8 -*-
import datetime
import random
import time
from openerp import api, fields, models
from openerp import tools
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.addons.bus.models.bus import TIMEOUT
DISCONNECTION_TIMER = TIMEOUT + 5
AWAY_TIMER = 600 # 10 minutes
class BusPresence(models.Model):
""" User Presence
Its status is 'online', 'away' or 'offline'. This model should be a one2one, but is not
attached to res_users to avoid database concurrence errors. Since the 'update' method is executed
at each poll, if the user have multiple opened tabs, concurrence errors can happend, but are 'muted-logged'.
"""
_name = 'bus.presence'
_description = 'User Presence'
_sql_constraints = [('bus_user_presence_unique', 'unique(user_id)', 'A user can only have one IM status.')]
user_id = fields.Many2one('res.users', 'Users', required=True, index=True, ondelete='cascade')
last_poll = fields.Datetime('Last Poll', default=lambda self: fields.Datetime.now())
last_presence = fields.Datetime('Last Presence', default=lambda self: fields.Datetime.now())
status = fields.Selection([('online', 'Online'), ('away', 'Away'), ('offline', 'Offline')], 'IM Status', default='offline')
@api.model
def update(self, user_presence=True):
""" Register the given presence of the current user, and trigger a im_status change if necessary.
The status will not be written or sent if not necessary.
:param user_presence : True, if the user (self._uid) is still detected using its browser.
:type user_presence : boolean
"""
presence = self.search([('user_id', '=', self._uid)], limit=1)
# set the default values
send_notification = True
values = {
'last_poll': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'status': presence and presence.status or 'offline'
}
# update the user or a create a new one
if not presence: # create a new presence for the user
values['status'] = 'online'
values['user_id'] = self._uid
self.create(values)
else: # write the user presence if necessary
if user_presence:
values['last_presence'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
values['status'] = 'online'
else:
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
if datetime.datetime.strptime(presence.last_presence, DEFAULT_SERVER_DATETIME_FORMAT) < threshold:
values['status'] = 'away'
send_notification = presence.status != values['status']
# write only if the last_poll is passed TIMEOUT, or if the status has changed
delta = datetime.datetime.utcnow() - datetime.datetime.strptime(presence.last_poll, DEFAULT_SERVER_DATETIME_FORMAT)
if delta > datetime.timedelta(seconds=TIMEOUT) or send_notification:
# Hide transaction serialization errors, which can be ignored, the presence update is not essential
with tools.mute_logger('openerp.sql_db'):
presence.write(values)
# avoid TransactionRollbackError
self.env.cr.commit() # TODO : check if still necessary
# notify if the status has changed
if send_notification: # TODO : add user_id to the channel tuple to allow using user_watch in controller presence
self.env['bus.bus'].sendone((self._cr.dbname, 'bus.presence'), {'id': self._uid, 'im_status': values['status']})
# gc : disconnect the users having a too old last_poll. 1 on 100 chance to do it.
if random.random() < 0.01:
self.check_users_disconnection()
return True
@api.model
def check_users_disconnection(self):
""" Disconnect the users having a too old last_poll """
limit_date = (datetime.datetime.utcnow() - datetime.timedelta(0, DISCONNECTION_TIMER)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
presences = self.search([('last_poll', '<', limit_date), ('status', '!=', 'offline')])
presences.write({'status': 'offline'})
notifications = []
for presence in presences:
notifications.append([(self._cr.dbname, 'bus.presence'), {'id': presence.user_id.id, 'im_status': presence.status}])
self.env['bus.bus'].sendmany(notifications)
|
agpl-3.0
|
allenp/odoo
|
addons/l10n_be_intrastat/l10n_be_intrastat.py
|
18
|
5678
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import fields, models
class account_invoice_line(models.Model):
_inherit = "account.invoice.line"
intrastat_transaction_id = fields.Many2one('l10n_be_intrastat.transaction', 'Intrastat Transaction Type',
help="Intrastat nature of transaction")
class account_invoice(models.Model):
_inherit = "account.invoice"
incoterm_id = fields.Many2one('stock.incoterms', 'Incoterm',
help="International Commercial Terms are a series of predefined commercial terms "
"used in international transactions.")
transport_mode_id = fields.Many2one('l10n_be_intrastat.transport_mode', 'Intrastat Transport Mode')
intrastat_country_id = fields.Many2one('res.country', 'Intrastat Country',
help='Intrastat country, delivery for sales, origin for purchases',
domain=[('intrastat', '=', True)])
class intrastat_region(models.Model):
_name = 'l10n_be_intrastat.region'
code = fields.Char('Code', required=True)
country_id = fields.Many2one('res.country', 'Country')
name = fields.Char('Name', translate=True)
description = fields.Char('Description')
_sql_constraints = [
('l10n_be_intrastat_regioncodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class intrastat_transaction(models.Model):
_name = 'l10n_be_intrastat.transaction'
_rec_name = 'code'
code = fields.Char('Code', required=True, readonly=True)
description = fields.Text('Description', readonly=True)
_sql_constraints = [
('l10n_be_intrastat_trcodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class intrastat_transport_mode(models.Model):
_name = 'l10n_be_intrastat.transport_mode'
code = fields.Char('Code', required=True, readonly=True)
name = fields.Char('Description', readonly=True)
_sql_constraints = [
('l10n_be_intrastat_trmodecodeunique', 'UNIQUE (code)', 'Code must be unique.'),
]
class product_category(models.Model):
_name = "product.category"
_inherit = "product.category"
intrastat_id = fields.Many2one('report.intrastat.code', 'Intrastat Code')
def get_intrastat_recursively(self):
""" Recursively search in categories to find an intrastat code id
"""
if self.intrastat_id:
res = self.intrastat_id.id
elif self.parent_id:
res = self.parent_id.get_intrastat_recursively()
else:
res = None
return res
class product_product(models.Model):
_name = "product.product"
_inherit = "product.product"
def get_intrastat_recursively(self):
""" Recursively search in categories to find an intrastat code id
"""
if self.intrastat_id:
res = self.intrastat_id.id
elif self.categ_id:
res = self.categ_id.get_intrastat_recursively()
else:
res = None
return res
class purchase_order(models.Model):
_inherit = "purchase.order"
def _prepare_invoice(self):
"""
copy incoterm from purchase order to invoice
"""
invoice = super(purchase_order, self)._prepare_invoice()
if self.incoterm_id:
invoice['incoterm_id'] = self.incoterm_id.id
#Try to determine products origin
if self.partner_id.country_id:
#It comes from vendor
invoice['intrastat_country_id'] = self.partner_id.country_id.id
return invoice
class report_intrastat_code(models.Model):
_inherit = "report.intrastat.code"
description = fields.Text('Description', translate=True)
class res_company(models.Model):
_inherit = "res.company"
region_id = fields.Many2one('l10n_be_intrastat.region', 'Intrastat region')
transport_mode_id = fields.Many2one('l10n_be_intrastat.transport_mode',
'Default transport mode')
incoterm_id = fields.Many2one('stock.incoterms', 'Default incoterm for Intrastat',
help="International Commercial Terms are a series of "
"predefined commercial terms used in international "
"transactions.")
class sale_order(models.Model):
_inherit = "sale.order"
def _prepare_invoice(self):
"""
copy incoterm from sale order to invoice
"""
invoice = super(sale_order, self)._prepare_invoice()
if self.incoterm:
invoice['incoterm_id'] = self.incoterm.id
# Guess products destination
if self.partner_shipping_id.country_id:
invoice['intrastat_country_id'] = self.partner_shipping_id.country_id.id
elif self.partner_id.country_id:
invoice['intrastat_country_id'] = self.partner_id.country_id.id
elif self.partner_invoice_id.country_id:
invoice['intrastat_country_id'] = self.partner_invoice_id.country_id.id
return invoice
class stock_warehouse(models.Model):
_inherit = "stock.warehouse"
region_id = fields.Many2one('l10n_be_intrastat.region', 'Intrastat region')
def get_regionid_from_locationid(self, location):
location_ids = location.search([('parent_left', '<=', location.parent_left), ('parent_right', '>=', location.parent_right)])
warehouses = self.search([('lot_stock_id', 'in', location_ids.ids), ('region_id', '!=', False)])
if warehouses and warehouses[0]:
return warehouses[0].region_id.id
return None
|
gpl-3.0
|
Kaushalop/apv
|
pdfview/scripts/pjpp.py
|
111
|
7409
|
#!/usr/bin/env pypy
import os, sys, logging, re
import argparse
import fnmatch
configurations = {'lite', 'pro'}
package_dirs = {
'lite': ('src/cx/hell/android/pdfview',),
'pro': ('src/cx/hell/android/pdfviewpro',)
}
file_replaces = {
'lite': (
'cx.hell.android.pdfview.',
'"cx.hell.android.pdfview"',
'package cx.hell.android.pdfview;',
'android:icon="@drawable/pdfviewer"',
),
'pro': (
'cx.hell.android.pdfviewpro.',
'"cx.hell.android.pdfviewpro"',
'package cx.hell.android.pdfviewpro;',
'android:icon="@drawable/apvpro_icon"',
),
}
def make_comment(file_type, line):
"""Add comment to line and return modified line, but try not to add comments to already commented out lines."""
if file_type in ('java', 'c'):
return '// ' + line if not line.startswith('//') else line
elif file_type in ('html', 'xml'):
return '<!-- ' + line.strip() + ' -->\n' if not line.strip().startswith('<!--') else line
else:
raise Exception("unknown file type: %s" % file_type)
def remove_comment(file_type, line):
"""Remove comment from line, but only if line is commented, otherwise return unchanged line."""
if file_type in ('java', 'c'):
if line.startswith('// '): return line[3:]
else: return line
elif file_type in ('html', 'xml'):
if line.strip().startswith('<!-- ') and line.strip().endswith(' -->'): return line.strip()[5:-4] + '\n'
else: return line
else:
raise Exception("unknown file type: %s" % file_type)
def handle_comments(conf, file_type, lines, filename):
new_lines = []
re_cmd_starts = re.compile(r'(?:(//|<!--))\s+#ifdef\s+(?P<def>[a-zA-Z]+)')
re_cmd_ends = re.compile(r'(?:(//|<!--))\s+#endif')
required_defs = []
for i, line in enumerate(lines):
m = re_cmd_starts.search(line)
if m:
required_def = m.group('def')
logging.debug("line %s:%d %s matches as start of %s" % (filename, i+1, line.strip(), required_def))
required_defs.append(required_def)
new_lines.append(line)
continue
m = re_cmd_ends.search(line)
if m:
logging.debug("line %s:%d %s matches as endif" % (filename, i+1, line.strip()))
required_defs.pop()
new_lines.append(line)
continue
if len(required_defs) == 0:
new_lines.append(line)
elif len(required_defs) == 1 and required_defs[0] == conf:
new_line = remove_comment(file_type, line)
new_lines.append(new_line)
else:
new_line = make_comment(file_type, line)
new_lines.append(new_line)
assert len(new_lines) == len(lines)
return new_lines
def find_files(dirname, name):
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, name):
matches.append(os.path.join(root, filename))
return matches
def fix_package_dirs(conf):
for i, dirname in enumerate(package_dirs[conf]):
logging.debug("trying to restore %s" % dirname)
if os.path.exists(dirname):
if os.path.isdir(dirname):
logging.debug(" already exists")
continue
else:
logging.error(" %s already exists, but is not dir" % dirname)
continue
# find other name
found_dirname = None
for other_conf, other_dirnames in package_dirs.items():
other_dirname = other_dirnames[i]
if other_conf == conf: continue # skip this conf when looking for other conf
if os.path.isdir(other_dirname):
if found_dirname is None:
found_dirname = other_dirname
else:
# source dir already found :/
raise Exception("too many possible dirs for this package: %s, %s" % (found_dirname, other_dirname))
if found_dirname is None:
raise Exception("didn't find %s" % dirname)
# now rename found_dirname to dirname
os.rename(found_dirname, dirname)
logging.debug("renamed %s to %s" % (found_dirname, dirname))
def handle_comments_in_files(conf, file_type, filenames):
for filename in filenames:
lines = open(filename).readlines()
new_lines = handle_comments(conf, file_type, lines, filename)
if lines != new_lines:
logging.debug("file %s comments changed" % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
def replace_in_files(conf, filenames):
#logging.debug("about replace to %s in %s" % (conf, ', '.join(filenames)))
other_confs = [other_conf for other_conf in file_replaces.keys() if other_conf != conf]
#logging.debug("there are %d other confs to replace from: %s" % (len(other_confs), ', '.join(other_confs)))
for filename in filenames:
new_lines = []
lines = open(filename).readlines()
for line in lines:
new_line = line
for i, target_string in enumerate(file_replaces[conf]):
for other_conf in other_confs:
source_string = file_replaces[other_conf][i]
new_line = new_line.replace(source_string, target_string)
new_lines.append(new_line)
if new_lines != lines:
logging.debug("file %s changed, writing..." % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
else:
logging.debug("file %s didn't change, no need to rewrite" % filename)
def fix_java_files(conf):
filenames = find_files('src', name='*.java')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'java', filenames)
def fix_xml_files(conf):
filenames = find_files('.', name='*.xml')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'xml', filenames)
def fix_html_files(conf):
filenames = find_files('res', name='*.html')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'html', filenames)
def fix_c_files(conf):
filenames = find_files('jni/pdfview2', name='*.c')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
filenames = find_files('jni/pdfview2', name='*.h')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
def fix_resources(conf):
pass
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Switch project configurations')
parser.add_argument('--configuration', dest='configuration', default='lite')
args = parser.parse_args()
if not os.path.exists('AndroidManifest.xml'):
raise Exception('android manifest not found, please run this script from main project directory')
conf = args.configuration
if conf not in configurations:
raise Exception("invalid configuration: %s" % conf)
fix_package_dirs(conf)
fix_java_files(conf)
fix_xml_files(conf)
fix_html_files(conf)
fix_c_files(conf)
fix_resources(conf)
if __name__ == '__main__':
main()
|
gpl-3.0
|
playm2mboy/edx-platform
|
lms/djangoapps/instructor/features/data_download.py
|
59
|
3758
|
"""
Define steps for instructor dashboard - data download tab
acceptance tests.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from nose.tools import assert_in, assert_regexp_matches # pylint: disable=no-name-in-module
from terrain.steps import reload_the_page
from django.utils import http
@step(u'I see a table of student profiles')
def find_student_profile_table(step): # pylint: disable=unused-argument
# Find the grading configuration display
world.wait_for_visible('#data-student-profiles-table')
# Wait for the data table to be populated
world.wait_for(lambda _: world.css_text('#data-student-profiles-table') not in [u'', u'Loading'])
if world.role == 'instructor':
expected_data = [
world.instructor.username,
world.instructor.email,
world.instructor.profile.name,
world.instructor.profile.gender,
world.instructor.profile.goals
]
elif world.role == 'staff':
expected_data = [
world.staff.username,
world.staff.email,
world.staff.profile.name,
world.staff.profile.gender,
world.staff.profile.goals
]
for datum in expected_data:
assert_in(datum, world.css_text('#data-student-profiles-table'))
@step(u"I do not see a button to 'List enrolled students' profile information'")
def no_student_profile_table(step): # pylint: disable=unused-argument
world.is_css_not_present('input[name="list-profiles"]')
@step(u"I see the grading configuration for the course")
def find_grading_config(step): # pylint: disable=unused-argument
# Find the grading configuration display
world.wait_for_visible('#data-grade-config-text')
# expected config is the default grading configuration from common/lib/xmodule/xmodule/course_module.py
expected_config = u"""-----------------------------------------------------------------------------
Course grader:
<class 'xmodule.graders.WeightedSubsectionsGrader'>
Graded sections:
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Homework, category=Homework, weight=0.15
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Lab, category=Lab, weight=0.15
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Midterm Exam, category=Midterm Exam, weight=0.3
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Final Exam, category=Final Exam, weight=0.4
-----------------------------------------------------------------------------
Listing grading context for course {}
graded sections:
[]
all descriptors:
length=0""".format(world.course_key)
assert_in(expected_config, world.css_text('#data-grade-config-text'))
def verify_report_is_generated(report_name_substring):
# Need to reload the page to see the reports table updated
reload_the_page(step)
world.wait_for_visible('#report-downloads-table')
# Find table and assert a .csv file is present
quoted_id = http.urlquote(world.course_key).replace('/', '_')
expected_file_regexp = quoted_id + '_' + report_name_substring + '_\d{4}-\d{2}-\d{2}-\d{4}\.csv'
assert_regexp_matches(
world.css_html('#report-downloads-table'), expected_file_regexp,
msg="Expected report filename was not found."
)
@step(u"I see a grade report csv file in the reports table")
def find_grade_report_csv_link(step): # pylint: disable=unused-argument
verify_report_is_generated('grade_report')
@step(u"I see a student profile csv file in the reports table")
def find_student_profile_report_csv_link(step): # pylint: disable=unused-argument
verify_report_is_generated('student_profile_info')
|
agpl-3.0
|
krocard/parameter-framework
|
test/functional-tests-legacy/PfwTestCase/Types/tUINT32_ARRAY.py
|
2
|
16142
|
#!/usr/bin/python2
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Array parameter type testcases : UINT32 Array
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
UINT8 Array = 32bits unsigned int array :
- Array size : 10
- values range : [0, 100]
Test cases :
------------
- Testing nominal case
- Testing minimum
- Testing minimum overflow
- Testing maximum
- Testing maximum overflow
- Testing array index out of bounds
- Testing value format error
"""
import os
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
from ctypes import c_uint16
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/UINT32_ARRAY"
self.param_short_name = os.environ["PFW_RESULT"] + "/UINT32_ARRAY"
print '\r'
self.pfw.sendCmd("setTuningMode", "on")
print '\r'
self.array_size = 100
self.array_min = 0
self.array_max = 100
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing UINT32_ARRAY Nominal Case
---------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every UINT32_ARRAY elements to autorized values
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT32_ARRAY array elements correctly recorded
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
for index in range (self.array_size):
indexed_array_value = index + self.array_min
if indexed_array_value>self.array_max:
indexed_array_value=self.array_max
hex_indexed_array_value = hex(c_uint16(indexed_array_value).value)
#Check parameter value setting
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", str(indexed_array_value_path), "")
assert err == None, log.E("when setting parameter %s[%s] : %s"
% (self.param_name, str(index), err))
assert out == str(indexed_array_value), log.F("BLACKBOARD : Incorrect value for %s[%s], expected: %s, found: %s"
% (self.param_name, str(index), str(indexed_array_value), out))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == hex_indexed_array_value, log.F("FILESSYSTEM : %s[%s] update error"
% (self.param_name, str(index)))
def test_Min_Value(self):
"""
Testing UINT32_ARRAY minimum value
----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every UINT32_ARRAY elements to minimum values : 0
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT32_ARRAY array elements correctly recorded
- Blackboard and filesystem values checked
"""
log.D(self.test_Min_Value.__doc__)
index = 0
indexed_array_value = self.array_min
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
hex_indexed_array_value = hex(c_uint16(indexed_array_value).value)
#Check parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", str(indexed_array_value_path), "")
assert err == None, log.E("when setting parameter %s[%s] : %s"
% (self.param_name, str(index), err))
assert out == str(indexed_array_value), log.F("BLACKBOARD : Incorrect value for %s[%s], expected: %s, found: %s"
% (self.param_name, str(index), str(indexed_array_value), out))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == hex_indexed_array_value, log.F("FILESSYSTEM : %s[%s] update error"
% (self.param_name, str(index)))
def test_Min_Value_Overflow(self):
"""
Testing UINT32_ARRAY parameter values out of negative range
-----------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every UINT32_ARRAY elements to -1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT32_ARRAY array elements not recorded
- Error correctly detected
"""
log.D(self.test_Min_Value_Overflow.__doc__)
index = 0
indexed_array_value = self.array_min
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
#Check initial parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
param_check = open(self.param_short_name).read().splitlines()[index]
#Check final parameter value setting
indexed_array_value = indexed_array_value - 1
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value), expectSuccess=False)
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out != "Done", log.F("Error not detected when setting parameter %s[%s] out of bounds"
% (self.param_name, str(index)))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == param_check, log.F("FILESSYSTEM : %s[%s] forbiden update"
% (self.param_name, str(index)))
def test_Max_Value(self):
"""
Testing UINT32_ARRAY maximum value
----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every UINT32_ARRAY elements to maximum values : 15
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT32_ARRAY array elements correctly recorded
- Blackboard and filesystem values checked
"""
log.D(self.test_Max_Value.__doc__)
index = 0
indexed_array_value = self.array_max
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
hex_indexed_array_value = hex(c_uint16(indexed_array_value).value)
#Check parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", str(indexed_array_value_path), "")
assert err == None, log.E("when setting parameter %s[%s] : %s"
% (self.param_name, str(index), err))
assert out == str(indexed_array_value), log.F("BLACKBOARD : Incorrect value for %s[%s], expected: %s, found: %s"
% (self.param_name, str(index), str(indexed_array_value), out))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == hex_indexed_array_value, log.F("FILESSYSTEM : %s[%s] update error"
% (self.param_name, str(index)))
def test_Max_Value_Overflow(self):
"""
Testing UINT32_ARRAY parameter values out of positive range
-----------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every UINT32_ARRAY elements to 16
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT32_ARRAY array elements not recorded
- Error correctly detected
"""
log.D(self.test_Max_Value_Overflow.__doc__)
index = 0
indexed_array_value = self.array_max
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
#Check initial parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
param_check = open(self.param_short_name).read().splitlines()[index]
#Check final parameter value setting
indexed_array_value = indexed_array_value + 1
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value), expectSuccess=False)
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out != "Done", log.F("Error not detected when setting parameter %s[%s] out of bounds"
% (self.param_name, str(index)))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == param_check, log.F("FILESSYSTEM : %s[%s] forbiden update"
% (self.param_name, str(index)))
def test_Array_Index_Overflow(self):
"""
Testing Array index out of bounds
---------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set an out of bounds array indexed element
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT32_ARRAY array elements not recorded
- Error correctly detected
"""
log.D(self.test_Array_Index_Overflow.__doc__)
index_values = (self.array_size-1, self.array_size+1, -1)
for index in index_values:
print index
indexed_array_value = self.array_max
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
#Check parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value), expectSuccess=None)
if index in [0, self.array_size-1]:
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
else:
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out != "Done", log.F("Error not detected when setting array %s index out of bounds"
% (self.param_name))
|
bsd-3-clause
|
pyfisch/servo
|
tests/jquery/run_jquery.py
|
13
|
9583
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import os
import re
import subprocess
import sys
import BaseHTTPServer
import SimpleHTTPServer
import SocketServer
import threading
import urlparse
# List of jQuery modules that will be tested.
# TODO(gw): Disabled most of them as something has been
# introduced very recently that causes the resource task
# to panic - and hard fail doesn't exit the servo
# process when this happens.
# See https://github.com/servo/servo/issues/6210 and
# https://github.com/servo/servo/issues/6211
JQUERY_MODULES = [
# "ajax", # panics
# "attributes",
# "callbacks",
# "core", # mozjs crash
# "css",
# "data",
# "deferred",
# "dimensions",
# "effects",
# "event", # panics
# "manipulation", # mozjs crash
# "offset",
# "queue",
"selector",
# "serialize",
# "support",
# "traversing",
# "wrap"
]
# Port to run the HTTP server on for jQuery.
TEST_SERVER_PORT = 8192
# A regex for matching console.log output lines from the test runner.
REGEX_PATTERN = "^\[jQuery test\] \[([0-9]+)/([0-9]+)/([0-9]+)] (.*)"
# The result of a single test group.
class TestResult:
def __init__(self, success, fail, total, text):
self.success = int(success)
self.fail = int(fail)
self.total = int(total)
self.text = text
def __key(self):
return (self.success, self.fail, self.total, self.text)
def __eq__(self, other):
return self.__key() == other.__key()
def __ne__(self, other):
return self.__key() != other.__key()
def __hash__(self):
return hash(self.__key())
def __repr__(self):
return "ok={0} fail={1} total={2}".format(self.success, self.fail, self.total)
# Parse a line, producing a TestResult.
# Throws if unable to parse.
def parse_line_to_result(line):
match = re.match(REGEX_PATTERN, line)
success, fail, total, name = match.groups()
return name, TestResult(success, fail, total, line)
# Parse an entire buffer of lines to a dictionary
# of test results, keyed by the test name.
def parse_string_to_results(buffer):
test_results = {}
lines = buffer.splitlines()
for line in lines:
name, test_result = parse_line_to_result(line)
test_results[name] = test_result
return test_results
# Run servo and print / parse the results for a specific jQuery test module.
def run_servo(servo_exe, module):
url = "http://localhost:{0}/jquery/test/?module={1}".format(TEST_SERVER_PORT, module)
args = [servo_exe, url, "-z", "-f"]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if len(line) == 0:
break
line = line.rstrip()
try:
name, test_result = parse_line_to_result(line)
yield name, test_result
except AttributeError:
pass
# Build the filename for an expected results file.
def module_filename(module):
return 'expected_{0}.txt'.format(module)
# Read an existing set of expected results to compare against.
def read_existing_results(module):
with open(module_filename(module), 'r') as file:
buffer = file.read()
return parse_string_to_results(buffer)
# Write a set of results to file
def write_results(module, results):
with open(module_filename(module), 'w') as file:
for result in test_results.itervalues():
file.write(result.text + '\n')
# Print usage if command line args are incorrect
def print_usage():
print("USAGE: {0} test|update servo_binary jquery_base_dir".format(sys.argv[0]))
# Run a simple HTTP server to serve up the jQuery test suite
def run_http_server():
class ThreadingSimpleServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
allow_reuse_address = True
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# TODO(gw): HACK copy the fixed version from python
# main repo - due to https://bugs.python.org/issue23112
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
parts = urlparse.urlsplit(self.path)
if not parts.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
new_parts = (parts[0], parts[1], parts[2] + '/',
parts[3], parts[4])
new_url = urlparse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def log_message(self, format, *args):
return
server = ThreadingSimpleServer(('', TEST_SERVER_PORT), RequestHandler)
while True:
sys.stdout.flush()
server.handle_request()
if __name__ == '__main__':
if len(sys.argv) == 4:
cmd = sys.argv[1]
servo_exe = sys.argv[2]
base_dir = sys.argv[3]
os.chdir(base_dir)
# Ensure servo binary can be found
if not os.path.isfile(servo_exe):
print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe))
sys.exit(1)
# Start the test server
httpd_thread = threading.Thread(target=run_http_server)
httpd_thread.setDaemon(True)
httpd_thread.start()
if cmd == "test":
print("Testing jQuery on Servo!")
test_count = 0
unexpected_count = 0
individual_success = 0
individual_total = 0
# Test each module separately
for module in JQUERY_MODULES:
print("\t{0}".format(module))
prev_test_results = read_existing_results(module)
for name, current_result in run_servo(servo_exe, module):
test_count += 1
individual_success += current_result.success
individual_total += current_result.total
# If this test was in the previous results, compare them.
if name in prev_test_results:
prev_result = prev_test_results[name]
if prev_result == current_result:
print("\t\tOK: {0}".format(name))
else:
unexpected_count += 1
print("\t\tFAIL: {0}: WAS {1} NOW {2}".format(name, prev_result, current_result))
del prev_test_results[name]
else:
# There was a new test that wasn't expected
unexpected_count += 1
print("\t\tNEW: {0}".format(current_result.text))
# Check what's left over, these are tests that were expected but didn't run this time.
for name in prev_test_results:
test_count += 1
unexpected_count += 1
print("\t\tMISSING: {0}".format(prev_test_results[name].text))
print("\tRan {0} test groups. {1} unexpected results.".format(test_count, unexpected_count))
print("\t{0} tests succeeded of {1} ({2:.2f}%)".format(individual_success,
individual_total,
100.0 * individual_success / individual_total))
if unexpected_count > 0:
sys.exit(1)
elif cmd == "update":
print("Updating jQuery expected results")
for module in JQUERY_MODULES:
print("\t{0}".format(module))
test_results = {}
for name, test_result in run_servo(servo_exe, module):
print("\t\t{0} {1}".format(name, test_result))
test_results[name] = test_result
write_results(module, test_results)
else:
print_usage()
else:
print_usage()
|
mpl-2.0
|
mkaluza/external_chromium_org
|
chrome/common/extensions/docs/server2/cron_servlet_test.py
|
23
|
9487
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from appengine_wrappers import GetAppVersion
from app_yaml_helper import AppYamlHelper
from cron_servlet import CronServlet
from empty_dir_file_system import EmptyDirFileSystem
from extensions_paths import (
APP_YAML, CONTENT_PROVIDERS, EXTENSIONS, PUBLIC_TEMPLATES, SERVER2,
STATIC_DOCS)
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from local_file_system import LocalFileSystem
from mock_file_system import MockFileSystem
from servlet import Request
from test_branch_utility import TestBranchUtility
from test_file_system import MoveTo, TestFileSystem
from test_util import EnableLogging, ReadFile
# NOTE(kalman): The ObjectStore created by the CronServlet is backed onto our
# fake AppEngine memcache/datastore, so the tests aren't isolated. Of course,
# if the host file systems have different identities, they will be, sort of.
class _TestDelegate(CronServlet.Delegate):
def __init__(self, create_file_system):
self.file_systems = []
# A callback taking a revision and returning a file system.
self._create_file_system = create_file_system
self._app_version = GetAppVersion()
def CreateBranchUtility(self, object_store_creator):
return TestBranchUtility.CreateWithCannedData()
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
def constructor(branch=None, revision=None):
file_system = self._create_file_system(revision)
self.file_systems.append(file_system)
return file_system
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision,
constructor_for_test=constructor)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider.ForEmpty()
def GetAppVersion(self):
return self._app_version
# (non-Delegate method).
def SetAppVersion(self, app_version):
self._app_version = app_version
class CronServletTest(unittest.TestCase):
@EnableLogging('info')
def testEverything(self):
# All these tests are dependent (see above comment) so lump everything in
# the one test.
delegate = _TestDelegate(lambda _: MockFileSystem(LocalFileSystem.Create()))
# Test that the cron runs successfully.
response = CronServlet(Request.ForTest('trunk'),
delegate_for_test=delegate).Get()
self.assertEqual(200, response.status)
# Save the file systems created, start with a fresh set for the next run.
first_run_file_systems = delegate.file_systems[:]
delegate.file_systems[:] = []
# When re-running, all file systems should be Stat()d the same number of
# times, but the second round shouldn't have been re-Read() since the
# Stats haven't changed.
response = CronServlet(Request.ForTest('trunk'),
delegate_for_test=delegate).Get()
self.assertEqual(200, response.status)
self.assertEqual(len(first_run_file_systems), len(delegate.file_systems))
for i, second_run_file_system in enumerate(delegate.file_systems):
self.assertTrue(*second_run_file_system.CheckAndReset(
read_count=0,
stat_count=first_run_file_systems[i].GetStatCount()))
def testSafeRevision(self):
test_data = {
'api': {
'_api_features.json': '{}',
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
},
'docs': {
'examples': {
'examples.txt': 'examples.txt contents'
},
'server2': {
'app.yaml': AppYamlHelper.GenerateAppYaml('2-0-8')
},
'static': {
'static.txt': 'static.txt contents'
},
'templates': {
'private': {
'table_of_contents.html': 'table_of_contents.html contents',
},
'public': {
'apps': {
'storage.html': '<h1>storage.html</h1> contents'
},
'extensions': {
'storage.html': '<h1>storage.html</h1> contents'
},
},
'json': {
'content_providers.json': ReadFile(CONTENT_PROVIDERS),
'manifest.json': '{}',
'permissions.json': '{}',
'strings.json': '{}',
'apps_sidenav.json': '{}',
'extensions_sidenav.json': '{}',
},
}
}
}
updates = []
def app_yaml_update(version):
return MoveTo(SERVER2, {
'app.yaml': AppYamlHelper.GenerateAppYaml(version)
})
def storage_html_update(update):
return MoveTo(PUBLIC_TEMPLATES, {
'apps': {'storage.html': update}
})
def static_txt_update(update):
return MoveTo(STATIC_DOCS, {
'static.txt': update
})
storage_html_path = '%s/apps/storage.html' % PUBLIC_TEMPLATES
static_txt_path = '%s/static.txt' % STATIC_DOCS
def create_file_system(revision=None):
'''Creates a MockFileSystem at |revision| by applying that many |updates|
to it.
'''
mock_file_system = MockFileSystem(
TestFileSystem(test_data, relative_to=EXTENSIONS))
updates_for_revision = (
updates if revision is None else updates[:int(revision)])
for update in updates_for_revision:
mock_file_system.Update(update)
return mock_file_system
delegate = _TestDelegate(create_file_system)
delegate.SetAppVersion('2-0-8')
file_systems = delegate.file_systems
# No updates applied yet.
CronServlet(Request.ForTest('trunk'), delegate_for_test=delegate).Get()
self.assertEqual(AppYamlHelper.GenerateAppYaml('2-0-8'),
file_systems[-1].ReadSingle(APP_YAML).Get())
self.assertEqual('<h1>storage.html</h1> contents',
file_systems[-1].ReadSingle(storage_html_path).Get())
# Apply updates to storage.html.
updates.append(storage_html_update('interim contents'))
updates.append(storage_html_update('<h1>new</h1> contents'))
CronServlet(Request.ForTest('trunk'), delegate_for_test=delegate).Get()
self.assertEqual(AppYamlHelper.GenerateAppYaml('2-0-8'),
file_systems[-1].ReadSingle(APP_YAML).Get())
self.assertEqual('<h1>new</h1> contents',
file_systems[-1].ReadSingle(storage_html_path).Get())
# Apply several updates to storage.html and app.yaml. The file system
# should be pinned at the version before app.yaml changed.
updates.append(storage_html_update('<h1>stuck here</h1> contents'))
double_update = storage_html_update('<h1>newer</h1> contents')
double_update.update(app_yaml_update('2-0-10'))
updates.append(double_update)
updates.append(storage_html_update('never gonna reach here'))
CronServlet(Request.ForTest('trunk'), delegate_for_test=delegate).Get()
self.assertEqual(AppYamlHelper.GenerateAppYaml('2-0-8'),
file_systems[-1].ReadSingle(APP_YAML).Get())
self.assertEqual('<h1>stuck here</h1> contents',
file_systems[-1].ReadSingle(storage_html_path).Get())
# Further pushes to storage.html will keep it pinned.
updates.append(storage_html_update('<h1>y</h1> u not update!'))
CronServlet(Request.ForTest('trunk'), delegate_for_test=delegate).Get()
self.assertEqual(AppYamlHelper.GenerateAppYaml('2-0-8'),
file_systems[-1].ReadSingle(APP_YAML).Get())
self.assertEqual('<h1>stuck here</h1> contents',
file_systems[-1].ReadSingle(storage_html_path).Get())
# Likewise app.yaml.
updates.append(app_yaml_update('2-1-0'))
CronServlet(Request.ForTest('trunk'), delegate_for_test=delegate).Get()
self.assertEqual(AppYamlHelper.GenerateAppYaml('2-0-8'),
file_systems[-1].ReadSingle(APP_YAML).Get())
self.assertEqual('<h1>stuck here</h1> contents',
file_systems[-1].ReadSingle(storage_html_path).Get())
# And updates to other content won't happen either.
updates.append(static_txt_update('important content!'))
CronServlet(Request.ForTest('trunk'), delegate_for_test=delegate).Get()
self.assertEqual(AppYamlHelper.GenerateAppYaml('2-0-8'),
file_systems[-1].ReadSingle(APP_YAML).Get())
self.assertEqual('<h1>stuck here</h1> contents',
file_systems[-1].ReadSingle(storage_html_path).Get())
self.assertEqual('static.txt contents',
file_systems[-1].ReadSingle(static_txt_path).Get())
# Lastly - when the app version changes, everything should no longer be
# pinned.
delegate.SetAppVersion('2-1-0')
CronServlet(Request.ForTest('trunk'), delegate_for_test=delegate).Get()
self.assertEqual(AppYamlHelper.GenerateAppYaml('2-1-0'),
file_systems[-1].ReadSingle(APP_YAML).Get())
self.assertEqual('<h1>y</h1> u not update!',
file_systems[-1].ReadSingle(storage_html_path).Get())
self.assertEqual('important content!',
file_systems[-1].ReadSingle(static_txt_path).Get())
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
coursemdetw/2014cdb
|
wsgi/static/Brython2.1.3-20140704-213726/Lib/linecache.py
|
785
|
3864
|
"""Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
import tokenize
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = list(cache.keys())
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
return []
try:
with tokenize.open(fullname) as fp:
lines = fp.readlines()
except IOError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
|
gpl-2.0
|
ray-project/ray
|
python/ray/serve/tests/test_fastapi.py
|
1
|
12857
|
import sys
import time
from typing import Any, List, Optional
import tempfile
import pytest
import inspect
import requests
from fastapi import (Cookie, Depends, FastAPI, Header, Query, Request,
APIRouter, BackgroundTasks, Response)
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
import ray
from ray import serve
from ray.serve.http_util import make_fastapi_class_based_view
def test_fastapi_function(serve_instance):
app = FastAPI()
@app.get("/{a}")
def func(a: int):
return {"result": a}
@serve.deployment(name="f")
@serve.ingress(app)
class FastAPIApp:
pass
FastAPIApp.deploy()
resp = requests.get("http://localhost:8000/f/100")
assert resp.json() == {"result": 100}
resp = requests.get("http://localhost:8000/f/not-number")
assert resp.status_code == 422 # Unprocessable Entity
assert resp.json()["detail"][0]["type"] == "type_error.integer"
def test_ingress_prefix(serve_instance):
app = FastAPI()
@app.get("/{a}")
def func(a: int):
return {"result": a}
@serve.deployment(route_prefix="/api")
@serve.ingress(app)
class App:
pass
App.deploy()
resp = requests.get("http://localhost:8000/api/100")
assert resp.json() == {"result": 100}
def test_class_based_view(serve_instance):
app = FastAPI()
@app.get("/other")
def hello():
return "hello"
@serve.deployment(name="f")
@serve.ingress(app)
class A:
def __init__(self):
self.val = 1
@app.get("/calc/{i}")
def b(self, i: int):
return i + self.val
@app.post("/calc/{i}")
def c(self, i: int):
return i - self.val
def other(self, msg: str):
return msg
A.deploy()
# Test HTTP calls.
resp = requests.get("http://localhost:8000/f/calc/41")
assert resp.json() == 42
resp = requests.post("http://localhost:8000/f/calc/41")
assert resp.json() == 40
resp = requests.get("http://localhost:8000/f/other")
assert resp.json() == "hello"
# Test handle calls.
handle = A.get_handle()
assert ray.get(handle.b.remote(41)) == 42
assert ray.get(handle.c.remote(41)) == 40
assert ray.get(handle.other.remote("world")) == "world"
def test_make_fastapi_cbv_util():
app = FastAPI()
class A:
@app.get("/{i}")
def b(self, i: int):
pass
# before, "self" is treated as a query params
assert app.routes[-1].endpoint == A.b
assert app.routes[-1].dependant.query_params[0].name == "self"
assert len(app.routes[-1].dependant.dependencies) == 0
make_fastapi_class_based_view(app, A)
# after, "self" is treated as a dependency instead of query params
assert app.routes[-1].endpoint == A.b
assert len(app.routes[-1].dependant.query_params) == 0
assert len(app.routes[-1].dependant.dependencies) == 1
self_dep = app.routes[-1].dependant.dependencies[0]
assert self_dep.name == "self"
assert inspect.isfunction(self_dep.call)
assert "get_current_servable" in str(self_dep.call)
def test_fastapi_features(serve_instance):
app = FastAPI(openapi_url="/my_api.json")
@app.on_event("startup")
def inject_state():
app.state.state_one = "app.state"
@app.middleware("http")
async def add_process_time_header(request: Request, call_next):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
return response
class Nested(BaseModel):
val: int
class BodyType(BaseModel):
name: str
price: float = Field(None, gt=1.0, description="High price!")
nests: Nested
class RespModel(BaseModel):
ok: bool
vals: List[Any]
file_path: str
async def yield_db():
yield "db"
async def common_parameters(q: Optional[str] = None):
return {"q": q}
@app.exception_handler(ValueError)
async def custom_handler(_: Request, exc: ValueError):
return JSONResponse(
status_code=500,
content={
"custom_error": "true",
"message": str(exc)
})
def run_background(background_tasks: BackgroundTasks):
path = tempfile.mktemp()
def write_to_file(p):
with open(p, "w") as f:
f.write("hello")
background_tasks.add_task(write_to_file, path)
return path
app.add_middleware(CORSMiddleware, allow_origins="*")
@app.get("/{path_arg}", response_model=RespModel, status_code=201)
async def func(
path_arg: str,
query_arg: str,
body_val: BodyType,
backgrounds_tasks: BackgroundTasks,
do_error: bool = False,
query_arg_valid: Optional[str] = Query(None, min_length=3),
cookie_arg: Optional[str] = Cookie(None),
user_agent: Optional[str] = Header(None),
commons: dict = Depends(common_parameters),
db=Depends(yield_db),
):
if do_error:
raise ValueError("bad input")
path = run_background(backgrounds_tasks)
return RespModel(
ok=True,
vals=[
path_arg,
query_arg,
body_val.price,
body_val.nests.val,
do_error,
query_arg_valid,
cookie_arg,
user_agent.split("/")[0], # returns python-requests
commons,
db,
app.state.state_one,
],
file_path=path,
)
router = APIRouter(prefix="/prefix")
@router.get("/subpath")
def router_path():
return "ok"
app.include_router(router)
@serve.deployment(name="fastapi")
@serve.ingress(app)
class Worker:
pass
Worker.deploy()
url = "http://localhost:8000/fastapi"
resp = requests.get(f"{url}/")
assert resp.status_code == 404
assert "x-process-time" in resp.headers
resp = requests.get(f"{url}/my_api.json")
assert resp.status_code == 200
assert resp.json() # it returns a well-formed json.
resp = requests.get(f"{url}/docs")
assert resp.status_code == 200
assert "<!DOCTYPE html>" in resp.text
resp = requests.get(f"{url}/redoc")
assert resp.status_code == 200
assert "<!DOCTYPE html>" in resp.text
resp = requests.get(f"{url}/path_arg")
assert resp.status_code == 422 # Malformed input
resp = requests.get(
f"{url}/path_arg",
json={
"name": "serve",
"price": 12,
"nests": {
"val": 1
}
},
params={
"query_arg": "query_arg",
"query_arg_valid": "at-least-three-chars",
"q": "common_arg",
})
assert resp.status_code == 201, resp.text
assert resp.json()["ok"]
assert resp.json()["vals"] == [
"path_arg",
"query_arg",
12.0,
1,
False,
"at-least-three-chars",
None,
"python-requests",
{
"q": "common_arg"
},
"db",
"app.state",
]
assert open(resp.json()["file_path"]).read() == "hello"
resp = requests.get(
f"{url}/path_arg",
json={
"name": "serve",
"price": 12,
"nests": {
"val": 1
}
},
params={
"query_arg": "query_arg",
"query_arg_valid": "at-least-three-chars",
"q": "common_arg",
"do_error": "true"
})
assert resp.status_code == 500
assert resp.json()["custom_error"] == "true"
resp = requests.get(f"{url}/prefix/subpath")
assert resp.status_code == 200
resp = requests.get(
f"{url}/docs",
headers={
"Access-Control-Request-Method": "GET",
"Origin": "https://googlebot.com"
})
assert resp.headers["access-control-allow-origin"] == "*", resp.headers
def test_fast_api_mounted_app(serve_instance):
app = FastAPI()
subapp = FastAPI()
@subapp.get("/hi")
def hi():
return "world"
app.mount("/mounted", subapp)
@serve.deployment(route_prefix="/api")
@serve.ingress(app)
class A:
pass
A.deploy()
assert requests.get(
"http://localhost:8000/api/mounted/hi").json() == "world"
def test_fastapi_init_lifespan_should_not_shutdown(serve_instance):
app = FastAPI()
@app.on_event("shutdown")
async def shutdown():
1 / 0
@serve.deployment
@serve.ingress(app)
class A:
def f(self):
return 1
A.deploy()
# Without a proper fix, the actor won't be initialized correctly.
# Because it will crash on each startup.
assert ray.get(A.get_handle().f.remote()) == 1
def test_fastapi_duplicate_routes(serve_instance):
app = FastAPI()
@serve.deployment(route_prefix="/api/v1")
@serve.ingress(app)
class App1:
@app.get("/")
def func_v1(self):
return "first"
@serve.deployment(route_prefix="/api/v2")
@serve.ingress(app)
class App2:
@app.get("/")
def func_v2(self):
return "second"
@app.get("/ignored")
def ignored():
pass
App1.deploy()
App2.deploy()
resp = requests.get("http://localhost:8000/api/v1")
assert resp.json() == "first"
resp = requests.get("http://localhost:8000/api/v2")
assert resp.json() == "second"
for version in ["v1", "v2"]:
resp = requests.get(f"http://localhost:8000/api/{version}/ignored")
assert resp.status_code == 404
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows")
@pytest.mark.parametrize("route_prefix", [None, "/", "/subpath"])
def test_doc_generation(serve_instance, route_prefix):
app = FastAPI()
@serve.deployment(route_prefix=route_prefix)
@serve.ingress(app)
class App:
@app.get("/")
def func1(self, arg: str):
return "hello"
App.deploy()
if route_prefix is None:
prefix = "/App"
else:
prefix = route_prefix
if not prefix.endswith("/"):
prefix += "/"
r = requests.get(f"http://localhost:8000{prefix}openapi.json")
assert r.status_code == 200
assert len(r.json()["paths"]) == 1
assert "/" in r.json()["paths"]
assert len(r.json()["paths"]["/"]) == 1
assert "get" in r.json()["paths"]["/"]
r = requests.get(f"http://localhost:8000{prefix}docs")
assert r.status_code == 200
@serve.deployment(route_prefix=route_prefix)
@serve.ingress(app)
class App:
@app.get("/")
def func1(self, arg: str):
return "hello"
@app.post("/hello")
def func2(self, arg: int):
return "hello"
App.deploy()
r = requests.get(f"http://localhost:8000{prefix}openapi.json")
assert r.status_code == 200
assert len(r.json()["paths"]) == 2
assert "/" in r.json()["paths"]
assert len(r.json()["paths"]["/"]) == 1
assert "get" in r.json()["paths"]["/"]
assert "/hello" in r.json()["paths"]
assert len(r.json()["paths"]["/hello"]) == 1
assert "post" in r.json()["paths"]["/hello"]
r = requests.get(f"http://localhost:8000{prefix}docs")
assert r.status_code == 200
def test_fastapi_multiple_headers(serve_instance):
# https://fastapi.tiangolo.com/advanced/response-cookies/
app = FastAPI()
@app.get("/")
def func(resp: Response):
resp.set_cookie(key="a", value="b")
resp.set_cookie(key="c", value="d")
return "hello"
@serve.deployment(name="f")
@serve.ingress(app)
class FastAPIApp:
pass
FastAPIApp.deploy()
resp = requests.get("http://localhost:8000/f")
assert resp.cookies.get_dict() == {"a": "b", "c": "d"}
def test_fastapi_nested_field_in_response_model(serve_instance):
# https://github.com/ray-project/ray/issues/16757
class TestModel(BaseModel):
a: str
b: List[str]
app = FastAPI()
@app.get("/", response_model=TestModel)
def test_endpoint():
test_model = TestModel(a="a", b=["b"])
return test_model
@serve.deployment(route_prefix="/")
@serve.ingress(app)
class TestDeployment:
pass
TestDeployment.deploy()
resp = requests.get("http://localhost:8000/")
assert resp.json() == {"a": "a", "b": ["b"]}
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
apache-2.0
|
lavish205/olympia
|
src/olympia/reviewers/tests/test_views.py
|
1
|
210461
|
# -*- coding: utf-8 -*-
import json
import os
import time
import urlparse
from collections import OrderedDict
from datetime import datetime, timedelta
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.core.files import temp
from django.core.files.base import File as DjangoFile
from django.template import defaultfilters
from django.test.utils import override_settings
import mock
from freezegun import freeze_time
from lxml.html import HTMLParser, fromstring
from mock import Mock, patch
from pyquery import PyQuery as pq
from olympia import amo, core, ratings
from olympia.abuse.models import AbuseReport
from olympia.access.models import Group, GroupUser
from olympia.accounts.views import API_TOKEN_COOKIE
from olympia.activity.models import ActivityLog
from olympia.addons.models import (
Addon, AddonApprovalsCounter, AddonDependency, AddonReviewerFlags,
AddonUser)
from olympia.amo.templatetags.jinja_helpers import (
user_media_path, user_media_url)
from olympia.amo.tests import (
APITestClient, TestCase, addon_factory, check_links, file_factory, formset,
initial, reverse_ns, user_factory, version_factory)
from olympia.amo.urlresolvers import reverse
from olympia.files.models import File, FileValidation, WebextPermission
from olympia.ratings.models import Rating, RatingFlag
from olympia.reviewers.models import (
AutoApprovalSummary, RereviewQueueTheme, ReviewerScore,
ReviewerSubscription, Whiteboard)
from olympia.users.models import UserProfile
from olympia.versions.models import ApplicationsVersions, AppVersion
from olympia.zadmin.models import get_config
class TestRedirectsOldPaths(TestCase):
def setUp(self):
user = user_factory()
self.client.login(email=user.email)
def test_redirect_old_queue(self):
response = self.client.get('/en-US/editors/queue/new')
self.assert3xx(response, '/reviewers/queue/new', status_code=301)
def test_redirect_old_review_page(self):
response = self.client.get('/en-US/editors/review/foobar')
self.assert3xx(response, '/reviewers/review/foobar', status_code=301)
class ReviewerTest(TestCase):
fixtures = ['base/users', 'base/approvals']
def login_as_admin(self):
assert self.client.login(email='admin@mozilla.com')
def login_as_reviewer(self):
assert self.client.login(email='reviewer@mozilla.com')
def make_review(self, username='a'):
u = UserProfile.objects.create(username=username)
a = Addon.objects.create(name='yermom', type=amo.ADDON_EXTENSION)
return Rating.objects.create(user=u, addon=a, body='baa')
class TestRatingsModerationLog(ReviewerTest):
def setUp(self):
super(TestRatingsModerationLog, self).setUp()
user = user_factory()
self.grant_permission(user, 'Ratings:Moderate')
self.client.login(email=user.email)
self.url = reverse('reviewers.ratings_moderation_log')
core.set_user(user)
def test_log(self):
response = self.client.get(self.url)
assert response.status_code == 200
def test_start_filter(self):
response = self.client.get(self.url, {'start': '2011-01-01'})
assert response.status_code == 200
def test_enddate_filter(self):
"""
Make sure that if our end date is 1/1/2011, that we include items from
1/1/2011. To not do as such would be dishonorable.
"""
review = self.make_review(username='b')
ActivityLog.create(
amo.LOG.APPROVE_RATING, review, review.addon).update(
created=datetime(2011, 1, 1))
response = self.client.get(self.url, {'end': '2011-01-01'})
assert response.status_code == 200
assert pq(response.content)('tbody td').eq(0).text() == (
'Jan. 1, 2011, midnight')
def test_action_filter(self):
"""
Based on setup we should see only two items if we filter for deleted
reviews.
"""
review = self.make_review()
for i in xrange(2):
ActivityLog.create(amo.LOG.APPROVE_RATING, review, review.addon)
ActivityLog.create(amo.LOG.DELETE_RATING, review.id, review.addon)
response = self.client.get(self.url, {'filter': 'deleted'})
assert response.status_code == 200
assert pq(response.content)('tbody tr').length == 2
def test_no_results(self):
response = self.client.get(self.url, {'end': '2004-01-01'})
assert response.status_code == 200
assert '"no-results"' in response.content
def test_moderation_log_detail(self):
review = self.make_review()
ActivityLog.create(amo.LOG.APPROVE_RATING, review, review.addon)
id_ = ActivityLog.objects.moderation_events()[0].id
response = self.client.get(
reverse('reviewers.ratings_moderation_log.detail', args=[id_]))
assert response.status_code == 200
class TestReviewLog(ReviewerTest):
fixtures = ReviewerTest.fixtures + ['base/addon_3615']
def setUp(self):
super(TestReviewLog, self).setUp()
self.user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.login_as_reviewer()
self.url = reverse('reviewers.reviewlog')
def get_user(self):
return UserProfile.objects.all()[0]
def make_approvals(self):
for addon in Addon.objects.all():
ActivityLog.create(
amo.LOG.REJECT_VERSION, addon, addon.current_version,
user=self.get_user(), details={'comments': 'youwin'})
def make_an_approval(self, action, comment='youwin', username=None,
addon=None):
if username:
user = UserProfile.objects.get(username=username)
else:
user = self.get_user()
if not addon:
addon = Addon.objects.all()[0]
ActivityLog.create(action, addon, addon.current_version, user=user,
details={'comments': comment})
def test_basic(self):
self.make_approvals()
response = self.client.get(self.url)
assert response .status_code == 200
doc = pq(response .content)
assert doc('#log-filter button'), 'No filters.'
# Should have 2 showing.
rows = doc('tbody tr')
assert rows.filter(':not(.hide)').length == 2
assert rows.filter('.hide').eq(0).text() == 'youwin'
# Should have none showing if the addons are unlisted.
for addon in Addon.objects.all():
self.make_addon_unlisted(addon)
response = self.client.get(self.url)
assert response .status_code == 200
doc = pq(response.content)
assert not doc('tbody tr :not(.hide)')
# But they should have 2 showing for someone with the right perms.
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
rows = doc('tbody tr')
assert rows.filter(':not(.hide)').length == 2
assert rows.filter('.hide').eq(0).text() == 'youwin'
def test_xss(self):
a = Addon.objects.all()[0]
a.name = '<script>alert("xss")</script>'
a.save()
ActivityLog.create(amo.LOG.REJECT_VERSION, a, a.current_version,
user=self.get_user(), details={'comments': 'xss!'})
response = self.client.get(self.url)
assert response.status_code == 200
inner_html = pq(response.content)('#log-listing tbody td').eq(1).html()
assert '<script>' in inner_html
assert '<script>' not in inner_html
def test_end_filter(self):
"""
Let's use today as an end-day filter and make sure we see stuff if we
filter.
"""
self.make_approvals()
# Make sure we show the stuff we just made.
date = time.strftime('%Y-%m-%d')
response = self.client.get(self.url, {'end': date})
assert response.status_code == 200
doc = pq(response.content)('#log-listing tbody')
assert doc('tr:not(.hide)').length == 2
assert doc('tr.hide').eq(0).text() == 'youwin'
def test_end_filter_wrong(self):
"""
Let's use today as an end-day filter and make sure we see stuff if we
filter.
"""
self.make_approvals()
response = self.client.get(self.url, {'end': 'wrong!'})
# If this is broken, we'll get a traceback.
assert response.status_code == 200
assert pq(response.content)('#log-listing tr:not(.hide)').length == 3
def test_start_filter(self):
with freeze_time('2017-08-01 10:00'):
self.make_approvals()
# Make sure we show the stuff we just made.
response = self.client.get(self.url, {'start': '2017-07-31'})
assert response.status_code == 200
doc = pq(response.content)('#log-listing tbody')
assert doc('tr:not(.hide)').length == 2
assert doc('tr.hide').eq(0).text() == 'youwin'
def test_start_default_filter(self):
with freeze_time('2017-07-31 10:00'):
self.make_approvals()
with freeze_time('2017-08-01 10:00'):
addon = Addon.objects.first()
ActivityLog.create(
amo.LOG.REJECT_VERSION, addon, addon.current_version,
user=self.get_user(), details={'comments': 'youwin'})
# Make sure the default 'start' to the 1st of a month works properly
with freeze_time('2017-08-03 11:00'):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#log-listing tbody')
assert doc('tr:not(.hide)').length == 1
assert doc('tr.hide').eq(0).text() == 'youwin'
def test_search_comment_exists(self):
"""Search by comment."""
self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE,
comment='hello')
response = self.client.get(self.url, {'search': 'hello'})
assert response.status_code == 200
assert pq(response.content)(
'#log-listing tbody tr.hide').eq(0).text() == 'hello'
def test_search_comment_case_exists(self):
"""Search by comment, with case."""
self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE,
comment='hello')
response = self.client.get(self.url, {'search': 'HeLlO'})
assert response.status_code == 200
assert pq(response.content)(
'#log-listing tbody tr.hide').eq(0).text() == 'hello'
def test_search_comment_doesnt_exist(self):
"""Search by comment, with no results."""
self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE,
comment='hello')
response = self.client.get(self.url, {'search': 'bye'})
assert response.status_code == 200
assert pq(response.content)('.no-results').length == 1
def test_search_author_exists(self):
"""Search by author."""
self.make_approvals()
self.make_an_approval(
amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer',
comment='hi')
response = self.client.get(self.url, {'search': 'reviewer'})
assert response.status_code == 200
rows = pq(response.content)('#log-listing tbody tr')
assert rows.filter(':not(.hide)').length == 1
assert rows.filter('.hide').eq(0).text() == 'hi'
def test_search_author_case_exists(self):
"""Search by author, with case."""
self.make_approvals()
self.make_an_approval(
amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer',
comment='hi')
response = self.client.get(self.url, {'search': 'ReviEwEr'})
assert response.status_code == 200
rows = pq(response.content)('#log-listing tbody tr')
assert rows.filter(':not(.hide)').length == 1
assert rows.filter('.hide').eq(0).text() == 'hi'
def test_search_author_doesnt_exist(self):
"""Search by author, with no results."""
self.make_approvals()
self.make_an_approval(
amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer')
response = self.client.get(self.url, {'search': 'wrong'})
assert response.status_code == 200
assert pq(response.content)('.no-results').length == 1
def test_search_addon_exists(self):
"""Search by add-on name."""
self.make_approvals()
addon = Addon.objects.all()[0]
response = self.client.get(self.url, {'search': addon.name})
assert response.status_code == 200
tr = pq(response.content)(
'#log-listing tr[data-addonid="%s"]' % addon.id)
assert tr.length == 1
assert tr.siblings('.comments').text() == 'youwin'
def test_search_addon_case_exists(self):
"""Search by add-on name, with case."""
self.make_approvals()
addon = Addon.objects.all()[0]
response = self.client.get(
self.url, {'search': str(addon.name).swapcase()})
assert response.status_code == 200
tr = pq(response.content)(
'#log-listing tr[data-addonid="%s"]' % addon.id)
assert tr.length == 1
assert tr.siblings('.comments').text() == 'youwin'
def test_search_addon_doesnt_exist(self):
"""Search by add-on name, with no results."""
self.make_approvals()
response = self.client.get(self.url, {'search': 'xxx'})
assert response.status_code == 200
assert pq(response.content)('.no-results').length == 1
@patch('olympia.activity.models.ActivityLog.arguments', new=Mock)
def test_addon_missing(self):
self.make_approvals()
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#log-listing tr td').eq(1).text() == (
'Add-on has been deleted.')
def test_request_info_logs(self):
self.make_an_approval(amo.LOG.REQUEST_INFORMATION)
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#log-listing tr td a').eq(1).text() == (
'More information requested')
def test_super_review_logs(self):
self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE)
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#log-listing tr td a').eq(1).text() == (
'Admin add-on-review requested')
def test_comment_logs(self):
self.make_an_approval(amo.LOG.COMMENT_VERSION)
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#log-listing tr td a').eq(1).text() == (
'Commented')
def test_content_approval(self):
self.make_an_approval(amo.LOG.APPROVE_CONTENT)
response = self.client.get(self.url)
assert response.status_code == 200
link = pq(response.content)('#log-listing tbody td a').eq(1)[0]
assert link.attrib['href'] == '/en-US/reviewers/review-content/a3615'
assert link.text_content().strip() == 'Content approved'
def test_content_rejection(self):
self.make_an_approval(amo.LOG.REJECT_CONTENT)
response = self.client.get(self.url)
assert response.status_code == 200
link = pq(response.content)('#log-listing tbody td a').eq(1)[0]
assert link.attrib['href'] == '/en-US/reviewers/review-content/a3615'
assert link.text_content().strip() == 'Content rejected'
@freeze_time('2017-08-03')
def test_review_url(self):
self.login_as_admin()
addon = addon_factory()
unlisted_version = version_factory(
addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
ActivityLog.create(
amo.LOG.APPROVE_VERSION, addon, addon.current_version,
user=self.get_user(), details={'comments': 'foo'})
response = self.client.get(self.url)
assert response.status_code == 200
url = reverse('reviewers.review', args=[addon.slug])
link = pq(response.content)(
'#log-listing tbody tr[data-addonid] a').eq(1)
assert link.attr('href') == url
entry = ActivityLog.create(
amo.LOG.APPROVE_VERSION, addon,
unlisted_version,
user=self.get_user(), details={'comments': 'foo'})
# Force the latest entry to be at the top of the list so that we can
# pick it more reliably later from the HTML
entry.update(created=datetime.now() + timedelta(days=1))
response = self.client.get(self.url)
url = reverse(
'reviewers.review',
args=['unlisted', addon.slug])
assert pq(response.content)(
'#log-listing tr td a').eq(1).attr('href') == url
class TestDashboard(TestCase):
def setUp(self):
self.url = reverse('reviewers.dashboard')
self.user = user_factory()
self.client.login(email=self.user.email)
def test_old_temporary_url_redirect(self):
response = self.client.get('/en-US/reviewers/dashboard')
self.assert3xx(
response, reverse('reviewers.dashboard'), status_code=301)
def test_not_a_reviewer(self):
response = self.client.get(self.url)
assert response.status_code == 403
def test_admin_all_permissions(self):
# Create a lot of add-ons to test the queue counts.
# Nominated and pending.
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
under_admin_review = addon_factory(
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
AddonReviewerFlags.objects.create(
addon=under_admin_review, needs_admin_code_review=True)
under_admin_review_and_pending = addon_factory()
AddonReviewerFlags.objects.create(
addon=under_admin_review_and_pending,
needs_admin_theme_review=True)
version_factory(
addon=under_admin_review_and_pending,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# Auto-approved and Content Review.
addon1 = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon1)
AutoApprovalSummary.objects.create(
version=addon1.current_version, verdict=amo.AUTO_APPROVED)
under_content_review = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=under_content_review)
AutoApprovalSummary.objects.create(
version=under_content_review.current_version,
verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=under_content_review, needs_admin_content_review=True)
addon2 = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon2)
AutoApprovalSummary.objects.create(
version=addon2.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=addon2, needs_admin_content_review=True)
under_code_review = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=under_code_review)
AutoApprovalSummary.objects.create(
version=under_code_review.current_version,
verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=under_code_review, needs_admin_code_review=True)
admins_group = Group.objects.create(name='Admins', rules='*:*')
GroupUser.objects.create(user=self.user, group=admins_group)
# Addon with expired info request
expired = addon_factory(name=u'Expired')
AddonReviewerFlags.objects.create(
addon=expired,
pending_info_request=self.days_ago(42))
# Rating
rating = Rating.objects.create(
addon=addon1, version=addon1.current_version, user=self.user,
flag=True, body=u'This âdd-on sucks!!111', rating=1,
editorreview=True)
rating.ratingflag_set.create()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 8 # All sections are present.
expected_links = [
reverse('reviewers.queue_nominated'),
reverse('reviewers.queue_pending'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
reverse('reviewers.queue_auto_approved'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_content_review'),
reverse('reviewers.performance'),
reverse('reviewers.themes.list'),
reverse('reviewers.themes.list_rereview'),
reverse('reviewers.themes.list_flagged'),
reverse('reviewers.themes.logs'),
reverse('reviewers.themes.deleted'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
reverse('reviewers.queue_moderated'),
reverse('reviewers.ratings_moderation_log'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation',
reverse('reviewers.unlisted_queue_all'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.motd'),
reverse('reviewers.queue_expired_info_requests'),
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'New (2)'
assert doc('.dashboard a')[1].text == 'Updates (3)'
assert doc('.dashboard a')[6].text == 'Auto Approved Add-ons (4)'
assert doc('.dashboard a')[10].text == 'Content Review (4)'
assert (doc('.dashboard a')[18].text ==
'Ratings Awaiting Moderation (1)')
assert (doc('.dashboard a')[24].text ==
'Expired Information Requests (1)')
def test_can_see_all_through_reviewer_view_all_permission(self):
self.grant_permission(self.user, 'ReviewerTools:View')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 8 # All sections are present.
expected_links = [
reverse('reviewers.queue_nominated'),
reverse('reviewers.queue_pending'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
reverse('reviewers.queue_auto_approved'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_content_review'),
reverse('reviewers.performance'),
reverse('reviewers.themes.list'),
reverse('reviewers.themes.list_rereview'),
reverse('reviewers.themes.list_flagged'),
reverse('reviewers.themes.logs'),
reverse('reviewers.themes.deleted'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
reverse('reviewers.queue_moderated'),
reverse('reviewers.ratings_moderation_log'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation',
reverse('reviewers.unlisted_queue_all'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.motd'),
reverse('reviewers.queue_expired_info_requests'),
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
def test_legacy_reviewer(self):
# Create some add-ons to test the queue counts.
addon_factory(
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# These two are under admin review and will be ignored.
under_admin_review = addon_factory(
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
AddonReviewerFlags.objects.create(
addon=under_admin_review, needs_admin_code_review=True)
under_admin_review_and_pending = addon_factory()
AddonReviewerFlags.objects.create(
addon=under_admin_review_and_pending, needs_admin_code_review=True)
version_factory(
addon=under_admin_review_and_pending,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# This is a static theme so won't be shown
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# Grant user the permission to see only the legacy add-ons section.
self.grant_permission(self.user, 'Addons:Review')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.queue_nominated'),
reverse('reviewers.queue_pending'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'New (1)'
assert doc('.dashboard a')[1].text == 'Updates (2)'
def test_post_reviewer(self):
# Create an add-on to test the queue count. It's under admin content
# review but that does not have an impact.
addon = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon)
AutoApprovalSummary.objects.create(
version=addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=addon, needs_admin_content_review=True)
# This one however is under admin code review, it's ignored.
under_code_review = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=under_code_review)
AutoApprovalSummary.objects.create(
version=under_code_review.current_version,
verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=under_code_review, needs_admin_code_review=True)
# Grant user the permission to see only the Auto Approved section.
self.grant_permission(self.user, 'Addons:PostReview')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.queue_auto_approved'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'Auto Approved Add-ons (1)'
def test_content_reviewer(self):
# Create an add-on to test the queue count. It's under admin code
# review but that does not have an impact.
addon = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon)
AutoApprovalSummary.objects.create(
version=addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=addon, needs_admin_code_review=True)
# This one is under admin *content* review so it's ignored.
under_content_review = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=under_content_review)
AutoApprovalSummary.objects.create(
version=under_content_review.current_version,
verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=under_content_review, needs_admin_content_review=True)
# Grant user the permission to see only the Content Review section.
self.grant_permission(self.user, 'Addons:ContentReview')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.queue_content_review'),
reverse('reviewers.performance'),
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'Content Review (1)'
def test_themes_reviewer(self):
# Create some themes to test the queue counts.
addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_PENDING)
addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_PENDING)
addon = addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_PUBLIC)
RereviewQueueTheme.objects.create(theme=addon.persona)
addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_REVIEW_PENDING)
addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_REVIEW_PENDING)
addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_REVIEW_PENDING)
# Grant user the permission to see only the themes section.
self.grant_permission(self.user, 'Personas:Review')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.themes.list'),
reverse('reviewers.themes.list_rereview'),
reverse('reviewers.themes.list_flagged'),
reverse('reviewers.themes.logs'),
reverse('reviewers.themes.deleted'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'New Themes (2)'
assert doc('.dashboard a')[1].text == 'Themes Updates (1)'
assert doc('.dashboard a')[2].text == 'Flagged Themes (3)'
def test_ratings_moderator(self):
# Create an rating to test the queue count.
addon = addon_factory()
user = user_factory()
rating = Rating.objects.create(
addon=addon, version=addon.current_version, user=user, flag=True,
body=u'This âdd-on sucks!!111', rating=1, editorreview=True)
rating.ratingflag_set.create()
# Grant user the permission to see only the ratings to review section.
self.grant_permission(self.user, 'Ratings:Moderate')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.queue_moderated'),
reverse('reviewers.ratings_moderation_log'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'Ratings Awaiting Moderation (1)'
def test_unlisted_reviewer(self):
# Grant user the permission to see only the unlisted add-ons section.
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.unlisted_queue_all'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
def test_static_theme_reviewer(self):
# Create some static themes to test the queue counts.
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME,),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# These two are under admin review and will be ignored.
under_admin_review = addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
AddonReviewerFlags.objects.create(
addon=under_admin_review, needs_admin_theme_review=True)
under_admin_review_and_pending = addon_factory(
type=amo.ADDON_STATICTHEME)
AddonReviewerFlags.objects.create(
addon=under_admin_review_and_pending,
needs_admin_theme_review=True)
version_factory(
addon=under_admin_review_and_pending,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# This is an extension so won't be shown
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_EXTENSION,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# Grant user the permission to see only the legacy add-ons section.
self.grant_permission(self.user, 'Addons:ThemeReview')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.queue_nominated'),
reverse('reviewers.queue_pending'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'New (1)'
assert doc('.dashboard a')[1].text == 'Updates (2)'
def test_post_reviewer_and_content_reviewer(self):
# Create add-ons to test the queue count. The first add-on has its
# content approved, so the post review queue should contain 2 add-ons,
# and the content review queue only 1.
addon = addon_factory(
version_kw={'is_webextension': True})
AutoApprovalSummary.objects.create(
version=addon.current_version, verdict=amo.AUTO_APPROVED)
AddonApprovalsCounter.approve_content_for_addon(addon=addon)
addon = addon_factory(
version_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon)
AutoApprovalSummary.objects.create(
version=addon.current_version, verdict=amo.AUTO_APPROVED)
# Grant user the permission to see both the Content Review and the
# Auto Approved Add-ons sections.
self.grant_permission(self.user, 'Addons:ContentReview')
self.grant_permission(self.user, 'Addons:PostReview')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 2 # 2 sections are shown.
expected_links = [
reverse('reviewers.queue_auto_approved'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_content_review'),
reverse('reviewers.performance'),
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'Auto Approved Add-ons (2)'
assert 'target' not in doc('.dashboard a')[0].attrib
assert doc('.dashboard a')[3].text == 'Review Guide'
assert doc('.dashboard a')[3].attrib['target'] == '_blank'
assert doc('.dashboard a')[3].attrib['rel'] == 'noopener noreferrer'
assert doc('.dashboard a')[4].text == 'Content Review (1)'
def test_legacy_reviewer_and_ratings_moderator(self):
# Grant user the permission to see both the legacy add-ons and the
# ratings moderation sections.
self.grant_permission(self.user, 'Addons:Review')
self.grant_permission(self.user, 'Ratings:Moderate')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 2
expected_links = [
reverse('reviewers.queue_nominated'),
reverse('reviewers.queue_pending'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_moderated'),
reverse('reviewers.ratings_moderation_log'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'New (0)'
assert 'target' not in doc('.dashboard a')[0].attrib
assert doc('.dashboard a')[1].text == 'Updates (0)'
assert doc('.dashboard a')[5].text == 'Ratings Awaiting Moderation (0)'
assert 'target' not in doc('.dashboard a')[6].attrib
assert doc('.dashboard a')[7].text == 'Moderation Guide'
assert doc('.dashboard a')[7].attrib['target'] == '_blank'
assert doc('.dashboard a')[7].attrib['rel'] == 'noopener noreferrer'
class QueueTest(ReviewerTest):
fixtures = ['base/users']
listed = True
def setUp(self):
super(QueueTest, self).setUp()
self.user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.login_as_reviewer()
if self.listed is False:
# Testing unlisted views: needs Addons:ReviewUnlisted perm.
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
self.url = reverse('reviewers.queue_pending')
self.addons = OrderedDict()
self.expected_addons = []
self.channel_name = 'listed' if self.listed else 'unlisted'
def generate_files(self, subset=None, files=None):
if subset is None:
subset = []
files = files or OrderedDict([
('Pending One', {
'version_str': '0.1',
'addon_status': amo.STATUS_PUBLIC,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
('Pending Two', {
'version_str': '0.1',
'addon_status': amo.STATUS_PUBLIC,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
('Nominated One', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
('Nominated Two', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
('Public', {
'version_str': '0.1',
'addon_status': amo.STATUS_PUBLIC,
'file_status': amo.STATUS_PUBLIC,
}),
])
results = OrderedDict()
channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else
amo.RELEASE_CHANNEL_UNLISTED)
for name, attrs in files.iteritems():
if not subset or name in subset:
version_kw = attrs.get('version_kw', {})
version_kw.update(
{'channel': channel, 'version': attrs.pop('version_str')})
attrs['version_kw'] = version_kw
file_kw = attrs.get('file_kw', {})
file_kw.update({'status': attrs.pop('file_status')})
attrs['file_kw'] = file_kw
results[name] = addon_factory(
status=attrs.pop('addon_status'), name=name, **attrs)
self.addons.update(results)
return results
def generate_file(self, name):
return self.generate_files([name])[name]
def get_review_data(self):
# Format: (Created n days ago,
# percentages of [< 5, 5-10, >10])
return ((1, (0, 0, 100)),
(8, (0, 50, 50)),
(12, (50, 0, 50)))
def get_addon_latest_version(self, addon):
if self.listed:
channel = amo.RELEASE_CHANNEL_LISTED
else:
channel = amo.RELEASE_CHANNEL_UNLISTED
return addon.find_latest_version(channel=channel)
def get_queue(self, addon):
version = self.get_addon_latest_version(addon)
assert version.current_queue.objects.filter(id=addon.id).count() == 1
def get_expected_addons_by_names(self, names):
expected_addons = []
files = self.generate_files()
for name in sorted(names):
if name in files:
expected_addons.append(files[name])
# Make sure all elements have been added
assert len(expected_addons) == len(names)
return expected_addons
def _test_get_queue(self):
for addon in self.expected_addons:
self.get_queue(addon)
def _test_queue_layout(self, name, tab_position, total_addons,
total_queues, per_page=None):
args = {'per_page': per_page} if per_page else {}
response = self.client.get(self.url, args)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a')
link = links.eq(tab_position)
assert links.length == total_queues
assert link.text() == '%s (%s)' % (name, total_addons)
assert link.attr('href') == self.url
if per_page:
assert doc('.data-grid-top .num-results').text() == (
u'Results %s\u20131 of %s' % (per_page, total_addons))
def _test_results(self):
response = self.client.get(self.url)
assert response.status_code == 200
expected = []
if not len(self.expected_addons):
raise AssertionError('self.expected_addons was an empty list')
for idx, addon in enumerate(self.expected_addons):
latest_version = self.get_addon_latest_version(addon)
assert latest_version
name = '%s %s' % (unicode(addon.name),
latest_version.version)
if self.channel_name == 'listed':
# We typically don't include the channel name if it's the
# default one, 'listed'.
channel = []
else:
channel = [self.channel_name]
url = reverse('reviewers.review', args=channel + [addon.slug])
expected.append((name, url))
doc = pq(response.content)
links = doc('#addon-queue tr.addon-row td a:not(.app-icon)')
assert len(links) == len(self.expected_addons)
check_links(expected, links, verify=False)
return doc
class TestQueueBasics(QueueTest):
def test_only_viewable_by_reviewer(self):
# Addon reviewer has access.
response = self.client.get(self.url)
assert response.status_code == 200
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
# Persona reviewer doesn't have access either.
self.client.logout()
assert self.client.login(email='persona_reviewer@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_invalid_page(self):
response = self.client.get(self.url, {'page': 999})
assert response.status_code == 200
assert response.context['page'].number == 1
def test_invalid_per_page(self):
response = self.client.get(self.url, {'per_page': '<garbage>'})
# No exceptions:
assert response.status_code == 200
@patch.multiple('olympia.reviewers.views',
REVIEWS_PER_PAGE_MAX=1,
REVIEWS_PER_PAGE=1)
def test_max_per_page(self):
self.generate_files()
response = self.client.get(self.url, {'per_page': '2'})
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 2')
@patch('olympia.reviewers.views.REVIEWS_PER_PAGE', new=1)
def test_reviews_per_page(self):
self.generate_files()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 2')
def test_grid_headers(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
'Add-on',
'Type',
'Waiting Time',
'Flags',
]
assert [pq(th).text() for th in doc('#addon-queue tr th')[1:]] == (
expected)
def test_grid_headers_sort_after_search(self):
params = dict(searching=['True'],
text_query=['abc'],
addon_type_ids=['2'],
sort=['addon_type_id'])
response = self.client.get(self.url, params)
assert response.status_code == 200
tr = pq(response.content)('#addon-queue tr')
sorts = {
# Column index => sort.
1: 'addon_name', # Add-on.
2: '-addon_type_id', # Type.
3: 'waiting_time_min', # Waiting Time.
}
for idx, sort in sorts.iteritems():
# Get column link.
a = tr('th').eq(idx).find('a')
# Update expected GET parameters with sort type.
params.update(sort=[sort])
# Parse querystring of link to make sure `sort` type is correct.
assert urlparse.parse_qs(a.attr('href').split('?')[1]) == params
def test_no_results(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('.queue-outer .no-results').length == 1
def test_no_paginator_when_on_single_page(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('.pagination').length == 0
def test_paginator_when_many_pages(self):
# 'Pending One' and 'Pending Two' should be the only add-ons in
# the pending queue, but we'll generate them all for good measure.
self.generate_files()
response = self.client.get(self.url, {'per_page': 1})
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 2')
assert doc('.data-grid-bottom .num-results').text() == (
u'Results 1\u20131 of 2')
def test_legacy_queue_sort(self):
sorts = (
['age', 'Waiting Time'],
['name', 'Add-on'],
['type', 'Type'],
)
for key, text in sorts:
response = self.client.get(self.url, {'sort': key})
assert response.status_code == 200
assert pq(response.content)('th.ordered a').text() == text
def test_flags_jetpack(self):
addon = addon_factory(
status=amo.STATUS_NOMINATED, name='Jetpack',
version_kw={'version': '0.1'},
file_kw={'status': amo.STATUS_AWAITING_REVIEW,
'jetpack_version': 1.2})
r = self.client.get(reverse('reviewers.queue_nominated'))
rows = pq(r.content)('#addon-queue tr.addon-row')
assert rows.length == 1
assert rows.attr('data-addon') == str(addon.id)
assert rows.find('td').eq(1).text() == 'Jetpack 0.1'
assert rows.find('.ed-sprite-jetpack').length == 1
def test_flags_is_restart_required(self):
addon = addon_factory(
status=amo.STATUS_NOMINATED, name='Some Add-on',
version_kw={'version': '0.1'},
file_kw={'status': amo.STATUS_AWAITING_REVIEW,
'is_restart_required': True})
r = self.client.get(reverse('reviewers.queue_nominated'))
rows = pq(r.content)('#addon-queue tr.addon-row')
assert rows.length == 1
assert rows.attr('data-addon') == str(addon.id)
assert rows.find('td').eq(1).text() == 'Some Add-on 0.1'
assert rows.find('.ed-sprite-jetpack').length == 0
assert rows.find('.ed-sprite-is_restart_required').length == 1
def test_flags_is_restart_required_false(self):
addon = addon_factory(
status=amo.STATUS_NOMINATED, name='Restartless',
version_kw={'version': '0.1'},
file_kw={'status': amo.STATUS_AWAITING_REVIEW,
'is_restart_required': False})
r = self.client.get(reverse('reviewers.queue_nominated'))
rows = pq(r.content)('#addon-queue tr.addon-row')
assert rows.length == 1
assert rows.attr('data-addon') == str(addon.id)
assert rows.find('td').eq(1).text() == 'Restartless 0.1'
assert rows.find('.ed-sprite-jetpack').length == 0
assert rows.find('.ed-sprite-is_restart_required').length == 0
def test_tabnav_permissions(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected = [
reverse('reviewers.queue_nominated'),
reverse('reviewers.queue_pending'),
]
assert links == expected
self.grant_permission(self.user, 'Ratings:Moderate')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected.append(reverse('reviewers.queue_moderated'))
assert links == expected
self.grant_permission(self.user, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected.append(reverse('reviewers.queue_auto_approved'))
assert links == expected
self.grant_permission(self.user, 'Addons:ContentReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected.append(reverse('reviewers.queue_content_review'))
assert links == expected
self.grant_permission(self.user, 'Reviews:Admin')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected.append(reverse('reviewers.queue_expired_info_requests'))
assert links == expected
class TestPendingQueue(QueueTest):
def setUp(self):
super(TestPendingQueue, self).setUp()
# These should be the only ones present.
self.expected_addons = self.get_expected_addons_by_names(
['Pending One', 'Pending Two'])
self.url = reverse('reviewers.queue_pending')
def test_results(self):
self._test_results()
def test_queue_layout(self):
self._test_queue_layout('Updates',
tab_position=1, total_addons=2, total_queues=2)
def test_get_queue(self):
self._test_get_queue()
def test_webextensions_filtered_out_because_of_post_review(self):
version = self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
# Webextensions are filtered out from the queue since auto_approve is
# taking care of them.
self.expected_addons = [self.addons['Pending One']]
self._test_results()
def test_webextension_with_auto_approval_disabled_false_filtered_out(self):
version = self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Pending Two'], auto_approval_disabled=False)
self.expected_addons = [self.addons['Pending One']]
self._test_results()
def test_webextension_with_auto_approval_disabled_does_show_up(self):
version = self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
version = self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Pending One'], auto_approval_disabled=True)
self.expected_addons = [self.addons['Pending One']]
self._test_results()
def test_static_theme_filtered_out(self):
self.addons['Pending Two'].update(type=amo.ADDON_STATICTHEME)
# Static Theme shouldn't be shown
self.expected_addons = [self.addons['Pending One']]
self._test_results()
# Unless you have that permission also
self.grant_permission(self.user, 'Addons:ThemeReview')
self.expected_addons = [
self.addons['Pending One'], self.addons['Pending Two']]
self._test_results()
class TestStaticThemePendingQueue(QueueTest):
def setUp(self):
super(TestStaticThemePendingQueue, self).setUp()
# These should be the only ones present.
self.expected_addons = self.get_expected_addons_by_names(
['Pending One', 'Pending Two'])
Addon.objects.all().update(type=amo.ADDON_STATICTHEME)
self.url = reverse('reviewers.queue_pending')
GroupUser.objects.filter(user=self.user).delete()
self.grant_permission(self.user, 'Addons:ThemeReview')
def test_results(self):
self._test_results()
def test_queue_layout(self):
self._test_queue_layout('Updates',
tab_position=1, total_addons=2, total_queues=2)
def test_get_queue(self):
self._test_get_queue()
def test_extensions_filtered_out(self):
self.addons['Pending Two'].update(type=amo.ADDON_EXTENSION)
# Extensions shouldn't be shown
self.expected_addons = [self.addons['Pending One']]
self._test_results()
# Unless you have that permission also
self.grant_permission(self.user, 'Addons:Review')
self.expected_addons = [
self.addons['Pending One'], self.addons['Pending Two']]
self._test_results()
class TestNominatedQueue(QueueTest):
def setUp(self):
super(TestNominatedQueue, self).setUp()
# These should be the only ones present.
self.expected_addons = self.get_expected_addons_by_names(
['Nominated One', 'Nominated Two'])
self.url = reverse('reviewers.queue_nominated')
def test_results(self):
self._test_results()
def test_results_two_versions(self):
version1 = self.addons['Nominated One'].versions.all()[0]
version2 = self.addons['Nominated Two'].versions.all()[0]
file_ = version2.files.get()
# Versions are ordered by creation date, so make sure they're set.
past = self.days_ago(1)
version2.update(created=past, nomination=past)
# Create another version, v0.2, by "cloning" v0.1.
version2.pk = None
version2.version = '0.2'
version2.save()
# Reset creation date once it has been saved.
future = datetime.now() - timedelta(seconds=1)
version2.update(created=future, nomination=future)
# Associate v0.2 it with a file.
file_.pk = None
file_.version = version2
file_.save()
# disable old files like Version.from_upload() would.
version2.disable_old_files()
response = self.client.get(self.url)
assert response.status_code == 200
expected = [
('Nominated One 0.1', reverse('reviewers.review',
args=[version1.addon.slug])),
('Nominated Two 0.2', reverse('reviewers.review',
args=[version2.addon.slug])),
]
doc = pq(response.content)
check_links(
expected,
doc('#addon-queue tr.addon-row td a:not(.app-icon)'),
verify=False)
def test_queue_layout(self):
self._test_queue_layout('New',
tab_position=0, total_addons=2, total_queues=2)
def test_get_queue(self):
self._test_get_queue()
def test_webextensions_filtered_out_because_of_post_review(self):
version = self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
# Webextensions are filtered out from the queue since auto_approve is
# taking care of them.
self.expected_addons = [self.addons['Nominated One']]
self._test_results()
def test_webextension_with_auto_approval_disabled_false_filtered_out(self):
version = self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Nominated Two'], auto_approval_disabled=False)
self.expected_addons = [self.addons['Nominated One']]
self._test_results()
def test_webextension_with_auto_approval_disabled_does_show_up(self):
version = self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
version = self.addons['Nominated One'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Nominated One'], auto_approval_disabled=True)
self.expected_addons = [self.addons['Nominated One']]
self._test_results()
def test_static_theme_filtered_out(self):
self.addons['Nominated Two'].update(type=amo.ADDON_STATICTHEME)
# Static Theme shouldn't be shown
self.expected_addons = [self.addons['Nominated One']]
self._test_results()
# Unless you have that permission also
self.grant_permission(self.user, 'Addons:ThemeReview')
self.expected_addons = [
self.addons['Nominated One'], self.addons['Nominated Two']]
self._test_results()
class TestStaticThemeNominatedQueue(QueueTest):
def setUp(self):
super(TestStaticThemeNominatedQueue, self).setUp()
# These should be the only ones present.
self.expected_addons = self.get_expected_addons_by_names(
['Nominated One', 'Nominated Two'])
self.url = reverse('reviewers.queue_nominated')
Addon.objects.all().update(type=amo.ADDON_STATICTHEME)
GroupUser.objects.filter(user=self.user).delete()
self.grant_permission(self.user, 'Addons:ThemeReview')
def test_results(self):
self._test_results()
def test_results_two_versions(self):
version1 = self.addons['Nominated One'].versions.all()[0]
version2 = self.addons['Nominated Two'].versions.all()[0]
file_ = version2.files.get()
# Versions are ordered by creation date, so make sure they're set.
past = self.days_ago(1)
version2.update(created=past, nomination=past)
# Create another version, v0.2, by "cloning" v0.1.
version2.pk = None
version2.version = '0.2'
version2.save()
# Reset creation date once it has been saved.
future = datetime.now() - timedelta(seconds=1)
version2.update(created=future, nomination=future)
# Associate v0.2 it with a file.
file_.pk = None
file_.version = version2
file_.save()
# disable old files like Version.from_upload() would.
version2.disable_old_files()
response = self.client.get(self.url)
assert response.status_code == 200
expected = [
('Nominated One 0.1', reverse('reviewers.review',
args=[version1.addon.slug])),
('Nominated Two 0.2', reverse('reviewers.review',
args=[version2.addon.slug])),
]
doc = pq(response.content)
check_links(
expected,
doc('#addon-queue tr.addon-row td a:not(.app-icon)'),
verify=False)
def test_queue_layout(self):
self._test_queue_layout('New',
tab_position=0, total_addons=2, total_queues=2)
def test_get_queue(self):
self._test_get_queue()
def test_static_theme_filtered_out(self):
self.addons['Nominated Two'].update(type=amo.ADDON_EXTENSION)
# Static Theme shouldn't be shown
self.expected_addons = [self.addons['Nominated One']]
self._test_results()
# Unless you have that permission also
self.grant_permission(self.user, 'Addons:Review')
self.expected_addons = [
self.addons['Nominated One'], self.addons['Nominated Two']]
self._test_results()
class TestModeratedQueue(QueueTest):
fixtures = ['base/users', 'ratings/dev-reply']
def setUp(self):
super(TestModeratedQueue, self).setUp()
self.url = reverse('reviewers.queue_moderated')
url_flag = reverse('addons.ratings.flag', args=['a1865', 218468])
response = self.client.post(url_flag, {'flag': RatingFlag.SPAM})
assert response.status_code == 200
assert RatingFlag.objects.filter(flag=RatingFlag.SPAM).count() == 1
assert Rating.objects.filter(editorreview=True).count() == 1
self.grant_permission(self.user, 'Ratings:Moderate')
def test_results(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#reviews-flagged')
rows = doc('.review-flagged:not(.review-saved)')
assert rows.length == 1
assert rows.find('h3').text() == ''
# Default is "Skip."
assert doc('#id_form-0-action_1:checked').length == 1
flagged = doc('.reviews-flagged-reasons span.light').text()
reviewer = RatingFlag.objects.all()[0].user.name
assert flagged.startswith('Flagged by %s' % reviewer), (
'Unexpected text: %s' % flagged)
addon = Addon.objects.get(id=1865)
addon.name = u'náme'
addon.save()
response = self.client.get(self.url)
doc = pq(response.content)('#reviews-flagged')
rows = doc('.review-flagged:not(.review-saved)')
assert rows.length == 1
assert rows.find('h3').text() == u'náme'
def setup_actions(self, action):
response = self.client.get(self.url)
assert response.status_code == 200
form_0_data = initial(response.context['reviews_formset'].forms[0])
assert Rating.objects.filter(addon=1865).count() == 2
formset_data = formset(form_0_data)
formset_data['form-0-action'] = action
response = self.client.post(self.url, formset_data)
self.assert3xx(response, self.url)
def test_skip(self):
self.setup_actions(ratings.REVIEW_MODERATE_SKIP)
# Make sure it's still there.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
rows = doc('#reviews-flagged .review-flagged:not(.review-saved)')
assert rows.length == 1
def test_skip_score(self):
self.setup_actions(ratings.REVIEW_MODERATE_SKIP)
assert ReviewerScore.objects.filter(
note_key=amo.REVIEWED_ADDON_REVIEW).count() == 0
def get_logs(self, action):
return ActivityLog.objects.filter(action=action.id)
def test_remove(self):
"""Make sure the reviewer tools can delete a review."""
self.setup_actions(ratings.REVIEW_MODERATE_DELETE)
logs = self.get_logs(amo.LOG.DELETE_RATING)
assert logs.count() == 1
# Make sure it's removed from the queue.
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#reviews-flagged .no-results').length == 1
response = self.client.get(reverse('reviewers.ratings_moderation_log'))
assert pq(response.content)('table .more-details').attr('href') == (
reverse('reviewers.ratings_moderation_log.detail',
args=[logs[0].id]))
# Make sure it was actually deleted.
assert Rating.objects.filter(addon=1865).count() == 1
# But make sure it wasn't *actually* deleted.
assert Rating.unfiltered.filter(addon=1865).count() == 2
def test_remove_fails_for_own_addon(self):
"""
Make sure the reviewer tools can't delete a review for an
add-on owned by the user.
"""
addon = Addon.objects.get(pk=1865)
user = UserProfile.objects.get(email='reviewer@mozilla.com')
AddonUser(addon=addon, user=user).save()
# Make sure the initial count is as expected
assert Rating.objects.filter(addon=1865).count() == 2
self.setup_actions(ratings.REVIEW_MODERATE_DELETE)
logs = self.get_logs(amo.LOG.DELETE_RATING)
assert logs.count() == 0
# Make sure it's not removed from the queue.
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#reviews-flagged .no-results').length == 0
# Make sure it was not actually deleted.
assert Rating.objects.filter(addon=1865).count() == 2
def test_remove_score(self):
self.setup_actions(ratings.REVIEW_MODERATE_DELETE)
assert ReviewerScore.objects.filter(
note_key=amo.REVIEWED_ADDON_REVIEW).count() == 1
def test_keep(self):
"""Make sure the reviewer tools can remove flags and keep a review."""
self.setup_actions(ratings.REVIEW_MODERATE_KEEP)
logs = self.get_logs(amo.LOG.APPROVE_RATING)
assert logs.count() == 1
# Make sure it's removed from the queue.
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#reviews-flagged .no-results').length == 1
rating = Rating.objects.filter(addon=1865)
# Make sure it's NOT deleted...
assert rating.count() == 2
# ...but it's no longer flagged.
assert rating.filter(editorreview=1).count() == 0
def test_keep_score(self):
self.setup_actions(ratings.REVIEW_MODERATE_KEEP)
assert ReviewerScore.objects.filter(
note_key=amo.REVIEWED_ADDON_REVIEW).count() == 1
def test_queue_layout(self):
# From the fixtures we already have 2 reviews, one is flagged. We add
# a bunch of reviews from different scenarios and make sure they don't
# count towards the total.
# Add a review associated with an normal addon
rating = Rating.objects.create(
addon=addon_factory(), user=user_factory(),
body='show me', editorreview=True)
RatingFlag.objects.create(rating=rating)
# Add a review associated with an incomplete addon
rating = Rating.objects.create(
addon=addon_factory(status=amo.STATUS_NULL), user=user_factory(),
body='dont show me', editorreview=True)
RatingFlag.objects.create(rating=rating)
# Add a review associated to an unlisted version
addon = addon_factory()
version = version_factory(
addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
rating = Rating.objects.create(
addon=addon_factory(), version=version, user=user_factory(),
body='dont show me either', editorreview=True)
RatingFlag.objects.create(rating=rating)
self._test_queue_layout('Rating Reviews',
tab_position=2, total_addons=2, total_queues=3)
def test_no_reviews(self):
Rating.objects.all().delete()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#reviews-flagged')
assert doc('.no-results').length == 1
assert doc('.review-saved button').length == 1 # Show only one button.
def test_do_not_show_reviews_for_non_public_addons(self):
Addon.objects.all().update(status=amo.STATUS_NULL)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#reviews-flagged')
# There should be no results since all add-ons are not public.
assert doc('.no-results').length == 1
def test_do_not_show_reviews_for_unlisted_addons(self):
for addon in Addon.objects.all():
self.make_addon_unlisted(addon)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#reviews-flagged')
# There should be no results since all add-ons are unlisted.
assert doc('.no-results').length == 1
class TestUnlistedAllList(QueueTest):
listed = False
def setUp(self):
super(TestUnlistedAllList, self).setUp()
self.url = reverse('reviewers.unlisted_queue_all')
# We should have all add-ons.
self.expected_addons = self.get_expected_addons_by_names(
['Pending One', 'Pending Two', 'Nominated One', 'Nominated Two',
'Public'])
# Need to set unique nomination times or we get a psuedo-random order.
for idx, addon in enumerate(self.expected_addons):
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
latest_version.update(
nomination=(datetime.now() - timedelta(minutes=idx)))
def test_results(self):
self._test_results()
def test_review_notes_json(self):
latest_version = self.expected_addons[0].find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
log = ActivityLog.create(amo.LOG.APPROVE_VERSION,
latest_version,
self.expected_addons[0],
user=UserProfile.objects.get(pk=999),
details={'comments': 'stish goin` down son'})
url = reverse('reviewers.queue_review_text') + str(log.id)
response = self.client.get(url)
assert response.status_code == 200
assert (json.loads(response.content) ==
{'reviewtext': 'stish goin` down son'})
class TestAutoApprovedQueue(QueueTest):
def setUp(self):
super(TestAutoApprovedQueue, self).setUp()
self.url = reverse('reviewers.queue_auto_approved')
def login_with_permission(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:PostReview')
self.client.login(email=user.email)
def get_addon_latest_version(self, addon):
"""Method used by _test_results() to fetch the version that the queue
is supposed to display. Overridden here because in our case, it's not
necessarily the latest available version - we display the current
public version instead (which is not guaranteed to be the latest
auto-approved one, but good enough) for this page."""
return addon.current_version
def generate_files(self):
"""Generate add-ons needed for these tests."""
# Has not been auto-approved.
extra_addon = addon_factory(name=u'Extra Addôn 1')
AutoApprovalSummary.objects.create(
version=extra_addon.current_version, verdict=amo.NOT_AUTO_APPROVED)
# Has not been auto-approved either, only dry run.
extra_addon2 = addon_factory(name=u'Extra Addôn 2')
AutoApprovalSummary.objects.create(
version=extra_addon2.current_version,
verdict=amo.WOULD_HAVE_BEEN_AUTO_APPROVED)
# Has been auto-approved, but that auto-approval has been confirmed by
# a human already.
extra_addon3 = addon_factory(name=u'Extra Addôn 3')
extra_summary3 = AutoApprovalSummary.objects.create(
version=extra_addon3.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=extra_addon3, counter=1,
last_human_review=extra_summary3.created)
# Has been auto-approved and reviewed by a human before.
addon1 = addon_factory(name=u'Addôn 1')
AutoApprovalSummary.objects.create(
version=addon1.current_version, verdict=amo.AUTO_APPROVED)
AddonApprovalsCounter.objects.create(
addon=addon1, counter=1, last_human_review=self.days_ago(42))
# Has been auto-approved twice, last_human_review is somehow None,
# the 'created' date will be used to order it (older is higher).
addon2 = addon_factory(name=u'Addôn 2')
addon2.update(created=self.days_ago(10))
AutoApprovalSummary.objects.create(
version=addon2.current_version, verdict=amo.AUTO_APPROVED)
AddonApprovalsCounter.objects.create(
addon=addon2, counter=1, last_human_review=None)
addon2_version2 = version_factory(addon=addon2)
AutoApprovalSummary.objects.create(
version=addon2_version2, verdict=amo.AUTO_APPROVED)
# Has been auto-approved and never been seen by a human,
# the 'created' date will be used to order it (newer is lower).
addon3 = addon_factory(name=u'Addôn 3')
addon3.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
version=addon3.current_version, verdict=amo.AUTO_APPROVED)
AddonApprovalsCounter.objects.create(
addon=addon3, counter=1, last_human_review=None)
# Has been auto-approved, should be first because of its weight.
addon4 = addon_factory(name=u'Addôn 4')
addon4.update(created=self.days_ago(14))
AutoApprovalSummary.objects.create(
version=addon4.current_version, verdict=amo.AUTO_APPROVED,
weight=500)
AddonApprovalsCounter.objects.create(
addon=addon4, counter=0, last_human_review=self.days_ago(1))
self.expected_addons = [addon4, addon2, addon3, addon1]
def test_only_viewable_with_specific_permission(self):
# Regular addon reviewer does not have access.
response = self.client.get(self.url)
assert response.status_code == 403
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_results(self):
self.login_with_permission()
self.generate_files()
self._test_results()
def test_results_weights(self):
addon1 = addon_factory(name=u'Addôn 1')
AutoApprovalSummary.objects.create(
version=addon1.current_version, verdict=amo.AUTO_APPROVED,
weight=amo.POST_REVIEW_WEIGHT_HIGHEST_RISK + 1)
AddonApprovalsCounter.reset_for_addon(addon1)
addon2 = addon_factory(name=u'Addôn 2')
AutoApprovalSummary.objects.create(
version=addon2.current_version, verdict=amo.AUTO_APPROVED,
weight=amo.POST_REVIEW_WEIGHT_HIGH_RISK + 1)
AddonApprovalsCounter.reset_for_addon(addon2)
addon3 = addon_factory(name=u'Addôn 3')
AutoApprovalSummary.objects.create(
version=addon3.current_version, verdict=amo.AUTO_APPROVED,
weight=amo.POST_REVIEW_WEIGHT_MEDIUM_RISK + 1)
AddonApprovalsCounter.reset_for_addon(addon3)
addon4 = addon_factory(name=u'Addôn 4')
AutoApprovalSummary.objects.create(
version=addon4.current_version, verdict=amo.AUTO_APPROVED,
weight=1)
AddonApprovalsCounter.reset_for_addon(addon4)
self.expected_addons = [addon1, addon2, addon3, addon4]
self.login_with_permission()
doc = self._test_results()
expected = ['risk-highest', 'risk-high', 'risk-medium', 'risk-low']
classnames = [
item.attrib['class'] for item in doc('.addon-row td:eq(4) span')]
assert expected == classnames
def test_queue_layout(self):
self.login_with_permission()
self.generate_files()
self._test_queue_layout("Auto Approved",
tab_position=2, total_addons=4, total_queues=3,
per_page=1)
class TestExpiredInfoRequestsQueue(QueueTest):
def setUp(self):
super(TestExpiredInfoRequestsQueue, self).setUp()
self.url = reverse('reviewers.queue_expired_info_requests')
def generate_files(self):
# Extra add-on with no pending info request.
addon_factory(name=u'Extra Addôn 1')
# Extra add-on with a non-expired pending info request.
extra_addon = addon_factory(name=u'Extra Addôn 2')
AddonReviewerFlags.objects.create(
addon=extra_addon,
pending_info_request=datetime.now() + timedelta(days=1))
# Pending addon with expired info request.
addon1 = addon_factory(name=u'Pending Addön 1',
status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=addon1,
pending_info_request=self.days_ago(2))
# Public addon with expired info request.
addon2 = addon_factory(name=u'Public Addön 2',
status=amo.STATUS_PUBLIC)
AddonReviewerFlags.objects.create(
addon=addon2,
pending_info_request=self.days_ago(42))
# Deleted addon with expired info request.
addon3 = addon_factory(name=u'Deleted Addön 3',
status=amo.STATUS_DELETED)
AddonReviewerFlags.objects.create(
addon=addon3,
pending_info_request=self.days_ago(42))
# Mozilla-disabled addon with expired info request.
addon4 = addon_factory(name=u'Disabled Addön 4',
status=amo.STATUS_DISABLED)
AddonReviewerFlags.objects.create(
addon=addon4,
pending_info_request=self.days_ago(42))
# Incomplete addon with expired info request.
addon5 = addon_factory(name=u'Incomplete Addön 5',
status=amo.STATUS_NULL)
AddonReviewerFlags.objects.create(
addon=addon5,
pending_info_request=self.days_ago(42))
# Invisible (user-disabled) addon with expired info request.
addon6 = addon_factory(name=u'Incomplete Addön 5',
status=amo.STATUS_PUBLIC,
disabled_by_user=True)
AddonReviewerFlags.objects.create(
addon=addon6,
pending_info_request=self.days_ago(42))
self.expected_addons = [addon2, addon1]
def test_results_no_permission(self):
# Addon reviewer doesn't have access.
response = self.client.get(self.url)
assert response.status_code == 403
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_results(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.generate_files()
self._test_results()
class TestContentReviewQueue(QueueTest):
def setUp(self):
super(TestContentReviewQueue, self).setUp()
self.url = reverse('reviewers.queue_content_review')
self.channel_name = 'content'
def login_with_permission(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:ContentReview')
self.client.login(email=user.email)
return user
def get_addon_latest_version(self, addon):
"""Method used by _test_results() to fetch the version that the queue
is supposed to display. Overridden here because in our case, it's not
necessarily the latest available version - we display the current
public version instead (which is not guaranteed to be the latest
auto-approved one, but good enough) for this page."""
return addon.current_version
def generate_files(self):
"""Generate add-ons needed for these tests."""
# Has not been auto-approved.
extra_addon = addon_factory(name=u'Extra Addôn 1')
AutoApprovalSummary.objects.create(
version=extra_addon.current_version, verdict=amo.NOT_AUTO_APPROVED,
)
# Has not been auto-approved either, only dry run.
extra_addon2 = addon_factory(name=u'Extra Addôn 2')
AutoApprovalSummary.objects.create(
version=extra_addon2.current_version,
verdict=amo.WOULD_HAVE_BEEN_AUTO_APPROVED,
)
# Has been auto-approved, but that content has been approved by
# a human already.
extra_addon3 = addon_factory(name=u'Extra Addôn 3')
AutoApprovalSummary.objects.create(
version=extra_addon3.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=extra_addon3, last_content_review=self.days_ago(1))
# This one has never been content-reviewed, but it has the
# needs_admin_content_review flag, and we're not an admin.
extra_addon4 = addon_factory(name=u'Extra Addön 4')
extra_addon4.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
version=extra_addon4.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=extra_addon4, last_content_review=None)
AddonReviewerFlags.objects.create(
addon=extra_addon4, needs_admin_content_review=True)
# This first add-on has been content reviewed so long ago that we
# should do it again.
addon1 = addon_factory(name=u'Addön 1')
AutoApprovalSummary.objects.create(
version=addon1.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=addon1, last_content_review=self.days_ago(370))
# This one is quite similar, except its last content review is even
# older..
addon2 = addon_factory(name=u'Addön 1')
AutoApprovalSummary.objects.create(
version=addon2.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=addon2, last_content_review=self.days_ago(842))
# This one has never been content-reviewed. It has an
# needs_admin_code_review flag, but that should not have any impact.
addon3 = addon_factory(name=u'Addön 2')
addon3.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
version=addon3.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=addon3, last_content_review=None)
AddonReviewerFlags.objects.create(
addon=addon3, needs_admin_code_review=True)
# This one has never been content reviewed either, and it does not even
# have an AddonApprovalsCounter.
addon4 = addon_factory(name=u'Addön 3')
addon4.update(created=self.days_ago(1))
AutoApprovalSummary.objects.create(
version=addon4.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
assert not AddonApprovalsCounter.objects.filter(addon=addon4).exists()
# Addons with no last_content_review date should be first, ordered by
# their creation date, older first.
self.expected_addons = [addon3, addon4, addon2, addon1]
def test_only_viewable_with_specific_permission(self):
# Regular addon reviewer does not have access.
response = self.client.get(self.url)
assert response.status_code == 403
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_results(self):
self.login_with_permission()
self.generate_files()
self._test_results()
def test_queue_layout(self):
self.login_with_permission()
self.generate_files()
self._test_queue_layout('Content Review',
tab_position=2, total_addons=4, total_queues=3,
per_page=1)
def test_queue_layout_admin(self):
# Admins should see the extra add-on that needs admin content review.
user = self.login_with_permission()
self.grant_permission(user, 'Reviews:Admin')
self.generate_files()
self._test_queue_layout('Content Review',
tab_position=2, total_addons=5, total_queues=4)
class TestPerformance(QueueTest):
fixtures = ['base/users', 'base/addon_3615']
"""Test the page at /reviewers/performance."""
def setUpReviewer(self):
self.login_as_reviewer()
core.set_user(UserProfile.objects.get(username='reviewer'))
self.create_logs()
def setUpAdmin(self):
self.login_as_admin()
core.set_user(UserProfile.objects.get(username='admin'))
self.create_logs()
def get_url(self, args=None):
if args is None:
args = []
return reverse('reviewers.performance', args=args)
def create_logs(self):
addon = Addon.objects.all()[0]
version = addon.versions.all()[0]
for i in amo.LOG_REVIEWER_REVIEW_ACTION:
ActivityLog.create(amo.LOG_BY_ID[i], addon, version)
# Throw in an automatic approval - should be ignored.
ActivityLog.create(
amo.LOG.APPROVE_VERSION, addon, version,
user=UserProfile.objects.get(id=settings.TASK_USER_ID))
def _test_chart(self):
r = self.client.get(self.get_url())
assert r.status_code == 200
doc = pq(r.content)
num = len(amo.LOG_REVIEWER_REVIEW_ACTION)
label = datetime.now().strftime('%Y-%m')
data = {label: {u'teamcount': num, u'teamavg': u'%s.0' % num,
u'usercount': num, u'teamamt': 1,
u'label': datetime.now().strftime('%b %Y')}}
assert json.loads(doc('#monthly').attr('data-chart')) == data
def test_performance_chart_reviewer(self):
self.setUpReviewer()
self._test_chart()
def test_performance_chart_as_admin(self):
self.setUpAdmin()
self._test_chart()
def test_usercount_with_more_than_one_reviewer(self):
self.client.login(email='clouserw@gmail.com')
core.set_user(UserProfile.objects.get(username='clouserw'))
self.create_logs()
self.setUpReviewer()
r = self.client.get(self.get_url())
assert r.status_code == 200
doc = pq(r.content)
data = json.loads(doc('#monthly').attr('data-chart'))
label = datetime.now().strftime('%Y-%m')
assert data[label]['usercount'] == len(amo.LOG_REVIEWER_REVIEW_ACTION)
def _test_performance_other_user_as_admin(self):
userid = core.get_user().pk
r = self.client.get(self.get_url([10482]))
doc = pq(r.content)
assert doc('#select_user').length == 1 # Let them choose reviewers.
options = doc('#select_user option')
assert options.length == 3
assert options.eq(2).val() == str(userid)
assert 'clouserw' in doc('#reviews_user').text()
def test_performance_other_user_as_admin(self):
self.setUpAdmin()
self._test_performance_other_user_as_admin()
def test_performance_other_user_not_admin(self):
self.setUpReviewer()
r = self.client.get(self.get_url([10482]))
doc = pq(r.content)
assert doc('#select_user').length == 0 # Don't let them choose.
assert doc('#reviews_user').text() == 'Your Reviews'
class SearchTest(ReviewerTest):
listed = True
def setUp(self):
super(SearchTest, self).setUp()
self.user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.login_as_reviewer()
if self.listed is False:
# Testing unlisted views: needs Addons:ReviewUnlisted perm.
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
def named_addons(self, request):
return [
r.record.addon_name for r in request.context['page'].object_list]
def search(self, *args, **kw):
response = self.client.get(self.url, kw)
assert response.status_code == 200
assert response.context['search_form'].errors.as_text() == ''
return response
class BaseTestQueueSearch(SearchTest):
fixtures = ['base/users', 'base/appversion']
__test__ = False # this is an abstract test case
def generate_files(self, subset=None):
if subset is None:
subset = []
files = OrderedDict([
('Not Needing Admin Review', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
('Another Not Needing Admin Review', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
('Needs Admin Review', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
'needs_admin_code_review': True,
}),
('Justin Bieber Theme', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
'type': amo.ADDON_THEME,
}),
('Justin Bieber Search Bar', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
'type': amo.ADDON_SEARCH,
}),
('Bieber For Mobile', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
'version_kw': {'application': amo.ANDROID.id},
}),
('Linux Widget', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
('Mac Widget', {
'version_str': '0.1',
'addon_status': amo.STATUS_NOMINATED,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
('Deleted', {
'version_str': '0.1',
'addon_status': amo.STATUS_DELETED,
'file_status': amo.STATUS_AWAITING_REVIEW,
}),
])
results = {}
channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else
amo.RELEASE_CHANNEL_UNLISTED)
for name, attrs in files.iteritems():
if not subset or name in subset:
version_kw = attrs.get('version_kw', {})
version_kw.update(
{'channel': channel, 'version': attrs.pop('version_str')})
attrs['version_kw'] = version_kw
file_kw = attrs.get('file_kw', {})
file_kw.update({'status': attrs.pop('file_status')})
attrs['file_kw'] = file_kw
attrs.update({'version_kw': version_kw, 'file_kw': file_kw})
needs_admin_code_review = attrs.pop(
'needs_admin_code_review', None)
results[name] = addon_factory(
status=attrs.pop('addon_status'), name=name, **attrs)
if needs_admin_code_review:
AddonReviewerFlags.objects.create(
addon=results[name], needs_admin_code_review=True)
return results
def generate_file(self, name):
return self.generate_files([name])[name]
def test_search_by_needs_admin_code_review_admin(self):
self.login_as_admin()
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(needs_admin_code_review=1)
assert response.status_code == 200
assert self.named_addons(response) == ['Needs Admin Review']
def test_queue_counts_admin(self):
self.login_as_admin()
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(text_query='admin', per_page=1)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 2')
def test_search_by_addon_name_admin(self):
self.login_as_admin()
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review',
'Justin Bieber Theme'])
response = self.search(text_query='admin')
assert response.status_code == 200
assert sorted(self.named_addons(response)) == [
'Needs Admin Review', 'Not Needing Admin Review']
def test_not_searching(self, **kwargs):
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(**kwargs)
assert response.status_code == 200
assert sorted(self.named_addons(response)) == [
'Not Needing Admin Review']
# We were just displaying the queue, not searching, but the searching
# hidden input in the form should always be set to True regardless, it
# will be used once the user submits the form.
doc = pq(response.content)
assert doc('#id_searching').attr('value') == 'True'
def test_not_searching_with_param(self):
self.test_not_searching(some_param=1)
def test_search_by_nothing(self):
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(searching='True')
assert response.status_code == 200
assert sorted(self.named_addons(response)) == (
['Needs Admin Review', 'Not Needing Admin Review'])
def test_search_by_needs_admin_code_review(self):
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(needs_admin_code_review=1, searching='True')
assert response.status_code == 200
assert self.named_addons(response) == ['Needs Admin Review']
def test_queue_counts(self):
self.generate_files(['Not Needing Admin Review',
'Another Not Needing Admin Review',
'Needs Admin Review'])
response = self.search(
text_query='admin', per_page=1, searching='True')
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 3')
def test_search_by_addon_name(self):
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review',
'Justin Bieber Theme'])
response = self.search(text_query='admin', searching='True')
assert response.status_code == 200
assert sorted(self.named_addons(response)) == (
['Needs Admin Review', 'Not Needing Admin Review'])
def test_search_by_addon_in_locale(self):
name = 'Not Needing Admin Review'
generated = self.generate_file(name)
uni = 'フォクすけといっしょ'.decode('utf8')
addon = Addon.objects.get(pk=generated.id)
addon.name = {'ja': uni}
addon.save()
self.url = self.url.replace('/en-US/', '/ja/')
response = self.client.get(self.url, {'text_query': uni}, follow=True)
assert response.status_code == 200
assert self.named_addons(response) == [name]
def test_search_by_addon_author(self):
name = 'Not Needing Admin Review'
generated = self.generate_file(name)
user = UserProfile.objects.all()[0]
email = user.email.swapcase()
author = AddonUser.objects.create(user=user, addon=generated)
for role in [amo.AUTHOR_ROLE_OWNER, amo.AUTHOR_ROLE_DEV]:
author.role = role
author.save()
response = self.search(text_query=email)
assert response.status_code == 200
assert self.named_addons(response) == [name]
def test_search_by_supported_email_in_locale(self):
name = 'Not Needing Admin Review'
generated = self.generate_file(name)
uni = 'フォクすけといっしょ@site.co.jp'.decode('utf8')
addon = Addon.objects.get(pk=generated.id)
addon.support_email = {'ja': uni}
addon.save()
self.url = self.url.replace('/en-US/', '/ja/')
response = self.client.get(self.url, {'text_query': uni}, follow=True)
assert response.status_code == 200
assert self.named_addons(response) == [name]
def test_clear_search_visible(self):
response = self.search(text_query='admin', searching=True)
assert response.status_code == 200
assert pq(response.content)(
'.clear-queue-search').text() == 'clear search'
def test_clear_search_hidden(self):
response = self.search(text_query='admin')
assert response.status_code == 200
assert not pq(response.content)('.clear-queue-search').text()
class TestQueueSearch(BaseTestQueueSearch):
__test__ = True
def setUp(self):
super(TestQueueSearch, self).setUp()
self.url = reverse('reviewers.queue_nominated')
def test_search_by_addon_type(self):
self.generate_files(['Not Needing Admin Review', 'Justin Bieber Theme',
'Justin Bieber Search Bar'])
response = self.search(addon_type_ids=[amo.ADDON_THEME])
assert response.status_code == 200
assert self.named_addons(response) == ['Justin Bieber Theme']
def test_search_by_addon_type_any(self):
self.generate_file('Not Needing Admin Review')
response = self.search(addon_type_ids=[amo.ADDON_ANY])
assert response.status_code == 200
assert self.named_addons(response), 'Expected some add-ons'
def test_search_by_many_addon_types(self):
self.generate_files(['Not Needing Admin Review', 'Justin Bieber Theme',
'Justin Bieber Search Bar'])
response = self.search(addon_type_ids=[amo.ADDON_THEME,
amo.ADDON_SEARCH])
assert response.status_code == 200
assert sorted(self.named_addons(response)) == (
['Justin Bieber Search Bar', 'Justin Bieber Theme'])
def test_search_by_app(self):
self.generate_files(['Bieber For Mobile', 'Linux Widget'])
response = self.search(application_id=[amo.ANDROID.id])
assert response.status_code == 200
assert self.named_addons(response) == ['Bieber For Mobile']
def test_preserve_multi_apps(self):
self.generate_files(['Bieber For Mobile', 'Linux Widget'])
channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else
amo.RELEASE_CHANNEL_UNLISTED)
multi = addon_factory(
status=amo.STATUS_NOMINATED, name='Multi Application',
version_kw={'channel': channel, 'application': amo.FIREFOX.id},
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
av_min, _ = AppVersion.objects.get_or_create(
application=amo.ANDROID.id, version='4.0.99')
av_max, _ = AppVersion.objects.get_or_create(
application=amo.ANDROID.id, version='5.0.0')
ApplicationsVersions.objects.get_or_create(
application=amo.ANDROID.id, version=multi.versions.latest(),
min=av_min, max=av_max)
response = self.search(application_id=[amo.ANDROID.id])
assert response.status_code == 200
assert self.named_addons(response) == [
'Bieber For Mobile', 'Multi Application']
def test_clear_search_uses_correct_queue(self):
# The "clear search" link points to the right listed or unlisted queue.
# Listed queue.
url = reverse('reviewers.queue_nominated')
response = self.client.get(
url, {'text_query': 'admin', 'searching': True})
assert response.status_code == 200
doc = pq(response.content)
assert doc('.clear-queue-search').attr('href') == url
class TestQueueSearchUnlistedAllList(BaseTestQueueSearch):
listed = False
__test__ = True
def setUp(self):
super(TestQueueSearchUnlistedAllList, self).setUp()
self.url = reverse('reviewers.unlisted_queue_all')
def test_search_deleted(self):
self.generate_files(['Not Needing Admin Review', 'Deleted'])
r = self.search(deleted=1)
assert self.named_addons(r) == ['Deleted']
def test_search_not_deleted(self):
self.generate_files(['Not Needing Admin Review', 'Deleted'])
response = self.search(deleted=0)
assert response.status_code == 200
assert self.named_addons(response) == ['Not Needing Admin Review']
def test_search_by_guid(self):
name = 'Not Needing Admin Review'
addon = self.generate_file(name)
addon.update(guid='@guidymcguid')
response = self.search(text_query='mcguid')
assert response.status_code == 200
assert self.named_addons(response) == ['Not Needing Admin Review']
class ReviewBase(QueueTest):
def setUp(self):
super(QueueTest, self).setUp()
self.login_as_reviewer()
self.addons = {}
self.addon = self.generate_file('Public')
self.version = self.addon.current_version
self.file = self.version.files.get()
self.reviewer = UserProfile.objects.get(username='reviewer')
self.reviewer.update(display_name=u'A Reviêwer')
self.url = reverse('reviewers.review', args=[self.addon.slug])
AddonUser.objects.create(addon=self.addon, user_id=999)
def get_addon(self):
return Addon.objects.get(pk=self.addon.pk)
def get_dict(self, **kw):
data = {'operating_systems': 'win', 'applications': 'something',
'comments': 'something'}
data.update(kw)
return data
class TestReview(ReviewBase):
def test_reviewer_required(self):
assert self.client.head(self.url).status_code == 200
def test_not_anonymous(self):
self.client.logout()
self.assertLoginRedirects(self.client.head(self.url), to=self.url)
@patch.object(settings, 'ALLOW_SELF_REVIEWS', False)
def test_not_author(self):
AddonUser.objects.create(addon=self.addon, user=self.reviewer)
assert self.client.head(self.url).status_code == 302
def test_review_unlisted_while_a_listed_version_is_awaiting_review(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED, slug='awaiting')
self.url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug))
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
assert self.client.get(self.url).status_code == 200
def test_needs_unlisted_reviewer_for_only_unlisted(self):
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
assert self.client.head(self.url).status_code == 404
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
assert self.client.head(self.url).status_code == 200
def test_dont_need_unlisted_reviewer_for_mixed_channels(self):
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED,
version='9.9')
assert self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert self.addon.current_version.channel == amo.RELEASE_CHANNEL_LISTED
assert self.client.head(self.url).status_code == 200
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
assert self.client.head(self.url).status_code == 200
def test_not_flags(self):
self.addon.current_version.files.update(is_restart_required=False)
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.context['flags']) == 0
def test_flag_needs_admin_code_review(self):
self.addon.current_version.files.update(is_restart_required=False)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.context['flags']) == 1
def test_info_comments_requested(self):
response = self.client.post(self.url, {'action': 'reply'})
assert response.context['form'].errors['comments'][0] == (
'This field is required.')
def test_whiteboard_url(self):
# Listed review.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/listed/public')
# Content review.
self.grant_permission(self.reviewer, 'Addons:ContentReview')
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/content/public')
# Unlisted review.
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
self.url = reverse(
'reviewers.review', args=['unlisted', self.addon.slug])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/unlisted/public')
# Listed review, but deleted.
self.addon.delete()
self.url = reverse(
'reviewers.review', args=['listed', self.addon.pk])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/listed/%d' % self.addon.pk)
def test_no_whiteboards_for_static_themes(self):
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#whiteboard_form')
def test_comment(self):
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
assert len(mail.outbox) == 0
comment_version = amo.LOG.COMMENT_VERSION
assert ActivityLog.objects.filter(
action=comment_version.id).count() == 1
def test_info_requested(self):
response = self.client.post(self.url, {'action': 'reply',
'comments': 'hello sailor'})
assert response.status_code == 302
assert len(mail.outbox) == 1
self.assertTemplateUsed(response, 'activity/emails/from_reviewer.txt')
def test_super_review_requested(self):
response = self.client.post(self.url, {'action': 'super',
'comments': 'hello sailor'})
assert response.status_code == 302
def test_info_requested_canned_response(self):
response = self.client.post(self.url, {'action': 'reply',
'comments': 'hello sailor',
'canned_response': 'foo'})
assert response.status_code == 302
assert len(mail.outbox) == 1
self.assertTemplateUsed(response, 'activity/emails/from_reviewer.txt')
def test_page_title(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('title').text() == (
'%s :: Reviewer Tools :: Add-ons for Firefox' % self.addon.name)
def test_files_shown(self):
response = self.client.get(self.url)
assert response.status_code == 200
items = pq(response.content)('#review-files .files .file-info')
assert items.length == 1
f = self.version.all_files[0]
expected = [
('All Platforms', f.get_url_path('reviewer')),
('Validation',
reverse('devhub.file_validation', args=[self.addon.slug, f.id])),
('Contents', None),
]
check_links(expected, items.find('a'), verify=False)
def test_item_history(self, channel=amo.RELEASE_CHANNEL_LISTED):
self.addons['something'] = addon_factory(
status=amo.STATUS_PUBLIC, name=u'something',
version_kw={'version': u'0.2',
'channel': channel},
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
assert self.addon.versions.filter(channel=channel).count() == 1
self.review_version(self.version, self.url)
v2 = self.addons['something'].versions.all()[0]
v2.addon = self.addon
v2.created = v2.created + timedelta(days=1)
v2.save()
assert self.addon.versions.filter(channel=channel).count() == 2
action = self.review_version(v2, self.url)
response = self.client.get(self.url)
assert response.status_code == 200
# The 2 following lines replace pq(res.content), it's a workaround for
# https://github.com/gawel/pyquery/issues/31
UTF8_PARSER = HTMLParser(encoding='utf-8')
doc = pq(fromstring(response.content, parser=UTF8_PARSER))
table = doc('#review-files')
# Check the history for both versions.
ths = table.children('tr > th')
assert ths.length == 2
assert '0.1' in ths.eq(0).text()
assert '0.2' in ths.eq(1).text()
rows = table('td.files')
assert rows.length == 2
comments = rows.siblings('td')
assert comments.length == 2
for idx in xrange(comments.length):
td = comments.eq(idx)
assert td.find('.history-comment').text() == 'something'
assert td.find('th').text() == {
'public': 'Approved',
'reply': 'Reviewer Reply'}[action]
reviewer_name = td.find('td a').text()
assert ((reviewer_name == self.reviewer.display_name) or
(reviewer_name == self.other_reviewer.display_name))
def test_item_history_with_unlisted_versions_too(self):
# Throw in an unlisted version to be ignored.
version_factory(
version=u'0.2', addon=self.addon,
channel=amo.RELEASE_CHANNEL_UNLISTED,
file_kw={'status': amo.STATUS_PUBLIC})
self.test_item_history()
def test_item_history_with_unlisted_review_page(self):
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.version.reload()
# Throw in an listed version to be ignored.
version_factory(
version=u'0.2', addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_PUBLIC})
self.url = reverse('reviewers.review', args=[
'unlisted', self.addon.slug])
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
self.test_item_history(channel=amo.RELEASE_CHANNEL_UNLISTED)
def generate_deleted_versions(self):
self.addon = addon_factory(version_kw={
'version': '1.0', 'created': self.days_ago(1)})
self.url = reverse('reviewers.review', args=[self.addon.slug])
versions = ({'version': '0.1', 'action': 'comment',
'comments': 'millenium hand and shrimp'},
{'version': '0.1', 'action': 'public',
'comments': 'buggrit'},
{'version': '0.2', 'action': 'comment',
'comments': 'I told em'},
{'version': '0.3'})
for i, version_data in enumerate(versions):
version = version_factory(
addon=self.addon, version=version_data['version'],
created=self.days_ago(-i),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
if 'action' in version_data:
data = {'action': version_data['action'],
'operating_systems': 'win',
'applications': 'something',
'comments': version_data['comments']}
self.client.post(self.url, data)
version.delete(hard=True)
self.addon.current_version.delete(hard=True)
@patch('olympia.reviewers.utils.sign_file')
def test_item_history_deleted(self, mock_sign):
self.generate_deleted_versions()
response = self.client.get(self.url)
assert response.status_code == 200
table = pq(response.content)('#review-files')
# Check the history for all versions.
ths = table.children('tr > th')
assert ths.length == 3 # The 2 with the same number will be coalesced.
assert '0.1' in ths.eq(0).text()
assert '0.2' in ths.eq(1).text()
assert '0.3' in ths.eq(2).text()
for idx in xrange(2):
assert 'Deleted' in ths.eq(idx).text()
bodies = table.children('.listing-body')
assert 'millenium hand and shrimp' in bodies.eq(0).text()
assert 'buggrit' in bodies.eq(0).text()
assert 'I told em' in bodies.eq(1).text()
assert mock_sign.called
def test_item_history_compat_ordered(self):
""" Make sure that apps in compatibility are ordered. """
av = AppVersion.objects.all()[0]
v = self.addon.versions.all()[0]
ApplicationsVersions.objects.create(
version=v, application=amo.THUNDERBIRD.id, min=av, max=av)
ApplicationsVersions.objects.create(
version=v, application=amo.SEAMONKEY.id, min=av, max=av)
assert self.addon.versions.count() == 1
url = reverse('reviewers.review', args=[self.addon.slug])
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
icons = doc('.listing-body .app-icon')
assert icons.eq(0).attr('title') == "Firefox"
assert icons.eq(1).attr('title') == "SeaMonkey"
assert icons.eq(2).attr('title') == "Thunderbird"
def test_item_history_weight(self):
""" Make sure the weight is shown on the review page"""
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED,
weight=284)
self.grant_permission(self.reviewer, 'Addons:PostReview')
url = reverse('reviewers.review', args=[self.addon.slug])
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
risk = doc('.listing-body .file-weight')
assert risk.text() == "Weight: 284"
def test_item_history_notes(self):
version = self.addon.versions.all()[0]
version.releasenotes = 'hi'
version.approvalnotes = 'secret hi'
version.save()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#review-files')
version = doc('.activity_version')
assert version.length == 1
assert version.text() == 'hi'
approval = doc('.activity_approval')
assert approval.length == 1
assert approval.text() == 'secret hi'
def test_item_history_header(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert ('Approved' in
doc('#review-files .listing-header .light').text())
def test_item_history_comment(self):
# Add Comment.
self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#review-files')
assert doc('th').eq(1).text() == 'Commented'
assert doc('.history-comment').text() == 'hello sailor'
def test_files_in_item_history(self):
data = {'action': 'public', 'operating_systems': 'win',
'applications': 'something', 'comments': 'something'}
self.client.post(self.url, data)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
items = doc('#review-files .files .file-info')
assert items.length == 1
assert items.find('a.reviewers-install').text() == 'All Platforms'
def test_no_items(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#review-files .no-activity').length == 1
def test_action_links(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Listing', self.addon.get_url_path()),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_action_links_as_admin(self):
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Listing', self.addon.get_url_path()),
('Edit', self.addon.get_dev_url()),
('Admin Page',
reverse('zadmin.addon_manage', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_unlisted_addon_action_links_as_admin(self):
"""No "View Listing" link for unlisted addons, "edit"/"manage" links
for the admins."""
self.make_addon_unlisted(self.addon)
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('Unlisted Review Page', reverse(
'reviewers.review', args=('unlisted', self.addon.slug))),
('Edit', self.addon.get_dev_url()),
('Admin Page', reverse(
'zadmin.addon_manage', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_mixed_channels_action_links_as_admin(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Listing', self.addon.get_url_path()),
('Unlisted Review Page', reverse(
'reviewers.review', args=('unlisted', self.addon.slug))),
('Edit', self.addon.get_dev_url()),
('Admin Page', reverse(
'zadmin.addon_manage', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_mixed_channels_action_links_as_admin_on_unlisted_review(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
self.login_as_admin()
self.url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Listing', self.addon.get_url_path()),
('Listed Review Page',
reverse('reviewers.review', args=(self.addon.slug,))),
('Edit', self.addon.get_dev_url()),
('Admin Page',
reverse('zadmin.addon_manage', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_mixed_channels_action_links_as_regular_reviewer(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
self.login_as_reviewer()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Listing', self.addon.get_url_path()),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_admin_links_as_non_admin(self):
self.login_as_reviewer()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
admin = doc('#actions-addon li')
assert admin.length == 1
def test_extra_actions_subscribe_checked_state(self):
self.login_as_reviewer()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
subscribe_input = doc('#notify_new_listed_versions')[0]
assert 'checked' not in subscribe_input.attrib
ReviewerSubscription.objects.create(
addon=self.addon, user=self.reviewer)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
subscribe_input = doc('#notify_new_listed_versions')[0]
assert subscribe_input.attrib['checked'] == 'checked'
def test_extra_actions_token(self):
self.login_as_reviewer()
self.client.cookies[API_TOKEN_COOKIE] = 'youdidntsaythemagicword'
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
token = doc('#extra-review-actions').attr('data-api-token')
assert token == 'youdidntsaythemagicword'
def test_extra_actions_not_for_reviewers(self):
self.login_as_reviewer()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#force_disable_addon')
assert not doc('#force_enable_addon')
assert not doc('#clear_admin_code_review')
assert not doc('#clear_admin_content_review')
assert not doc('#clear_admin_theme_review')
assert not doc('#disable_auto_approval')
assert not doc('#enable_auto_approval')
assert not doc('#clear_pending_info_request')
def test_extra_actions_admin_disable_enable(self):
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#force_disable_addon')
elem = doc('#force_disable_addon')[0]
assert 'hidden' not in elem.getparent().attrib.get('class', '')
assert doc('#force_enable_addon')
elem = doc('#force_enable_addon')[0]
assert 'hidden' in elem.getparent().attrib.get('class', '')
def test_unflag_option_forflagged_as_admin(self):
self.login_as_admin()
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_admin_code_review').length == 1
assert doc('#clear_admin_content_review').length == 0
assert doc('#clear_admin_content_review').length == 0
def test_unflag_content_option_forflagged_as_admin(self):
self.login_as_admin()
AddonReviewerFlags.objects.create(
addon=self.addon,
needs_admin_code_review=False,
needs_admin_content_review=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_admin_code_review').length == 0
assert doc('#clear_admin_content_review').length == 1
assert doc('#clear_admin_theme_review').length == 0
def test_unflag_theme_option_forflagged_as_admin(self):
self.login_as_admin()
AddonReviewerFlags.objects.create(
addon=self.addon,
needs_admin_code_review=False,
needs_admin_content_review=False,
needs_admin_theme_review=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_admin_code_review').length == 0
assert doc('#clear_admin_content_review').length == 0
assert doc('#clear_admin_theme_review').length == 1
def test_disable_auto_approvals_as_admin(self):
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#disable_auto_approval')
elem = doc('#disable_auto_approval')[0]
assert 'hidden' not in elem.getparent().attrib.get('class', '')
assert doc('#enable_auto_approval')
elem = doc('#enable_auto_approval')[0]
assert 'hidden' in elem.getparent().attrib.get('class', '')
# Both of them should be absent on static themes, which are not
# auto-approved.
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#disable_auto_approval')
assert not doc('#enable_auto_approval')
def test_enable_auto_approvals_as_admin_auto_approvals_disabled(self):
self.login_as_admin()
AddonReviewerFlags.objects.create(
addon=self.addon, auto_approval_disabled=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#disable_auto_approval')
elem = doc('#disable_auto_approval')[0]
assert 'hidden' in elem.getparent().attrib.get('class', '')
assert doc('#enable_auto_approval')
elem = doc('#enable_auto_approval')[0]
assert 'hidden' not in elem.getparent().attrib.get('class', '')
# Both of them should be absent on static themes, which are not
# auto-approved.
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#disable_auto_approval')
assert not doc('#enable_auto_approval')
def test_clear_pending_info_request_as_admin(self):
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#clear_pending_info_request')
AddonReviewerFlags.objects.create(
addon=self.addon, pending_info_request=self.days_ago(1))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_pending_info_request')
def test_info_request_checkbox(self):
self.login_as_reviewer()
assert not self.addon.pending_info_request
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert 'checked' not in doc('#id_info_request')[0].attrib
elm = doc('#id_info_request_deadline')[0]
assert elm.attrib['readonly'] == 'readonly'
assert elm.attrib['min'] == '7'
assert elm.attrib['max'] == '7'
assert elm.attrib['value'] == '7'
AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=datetime.now() + timedelta(days=7))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#id_info_request')[0].attrib['checked'] == 'checked'
def test_info_request_checkbox_admin(self):
self.login_as_admin()
assert not self.addon.pending_info_request
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert 'checked' not in doc('#id_info_request')[0].attrib
elm = doc('#id_info_request_deadline')[0]
assert 'readonly' not in elm.attrib
assert elm.attrib['min'] == '1'
assert elm.attrib['max'] == '99'
assert elm.attrib['value'] == '7'
def test_no_public(self):
has_public = self.version.files.filter(
status=amo.STATUS_PUBLIC).exists()
assert has_public
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
validation = doc.find('.files')
assert validation.find('a').eq(1).text() == "Validation"
assert validation.find('a').eq(2).text() == "Contents"
assert validation.find('a').length == 3
def test_public_search(self):
self.version.files.update(status=amo.STATUS_PUBLIC)
self.addon.update(type=amo.ADDON_SEARCH)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#review-files .files ul .file-info').length == 1
def test_version_deletion(self):
"""
Make sure that we still show review history for deleted versions.
"""
# Add a new version to the add-on.
addon = addon_factory(
status=amo.STATUS_NOMINATED, name='something',
version_kw={'version': '0.2'},
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
assert self.addon.versions.count() == 1
self.review_version(self.version, self.url)
v2 = addon.versions.all()[0]
v2.addon = self.addon
v2.created = v2.created + timedelta(days=1)
v2.save()
self.review_version(v2, self.url)
assert self.addon.versions.count() == 2
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# View the history verify two versions:
ths = doc('table#review-files > tr > th:first-child')
assert '0.1' in ths.eq(0).text()
assert '0.2' in ths.eq(1).text()
# Delete a version:
v2.delete()
# Verify two versions, one deleted:
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
ths = doc('table#review-files > tr > th:first-child')
assert ths.length == 2
assert '0.1' in ths.text()
def test_no_versions(self):
"""The review page should still load if there are no versions. But not
unless you have unlisted permissions."""
assert self.client.get(self.url).status_code == 200
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
self.assert3xx(response, reverse('reviewers.queue_pending'),
status_code=302)
self.version.delete()
# Regular reviewer has no permission, gets a 404.
assert self.client.get(self.url).status_code == 404
# Reviewer with more powers can look.
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
assert self.client.get(self.url).status_code == 200
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
self.assert3xx(response, reverse('reviewers.queue_pending'),
status_code=302)
def test_addon_deleted(self):
"""The review page should still load for deleted addons."""
self.addon.delete()
self.url = reverse('reviewers.review', args=[self.addon.pk])
assert self.client.get(self.url).status_code == 200
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
self.assert3xx(response, reverse('reviewers.queue_pending'),
status_code=302)
@patch('olympia.reviewers.utils.sign_file')
def review_version(self, version, url, mock_sign):
if version.channel == amo.RELEASE_CHANNEL_LISTED:
version.files.all()[0].update(status=amo.STATUS_AWAITING_REVIEW)
action = 'public'
else:
action = 'reply'
data = {
'action': action,
'operating_systems': 'win',
'applications': 'something',
'comments': 'something',
}
self.client.post(url, data)
if version.channel == amo.RELEASE_CHANNEL_LISTED:
assert mock_sign.called
return action
def test_dependencies_listed(self):
AddonDependency.objects.create(addon=self.addon,
dependent_addon=self.addon)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
deps = doc('.addon-info .addon-dependencies')
assert deps.length == 1
assert deps.find('li').length == 1
assert deps.find('a').attr('href') == self.addon.get_url_path()
def test_eula_displayed(self):
assert not bool(self.addon.eula)
response = self.client.get(self.url)
assert response.status_code == 200
self.assertNotContains(response, 'View End-User License Agreement')
self.addon.eula = 'Test!'
self.addon.save()
assert bool(self.addon.eula)
response = self.client.get(self.url)
assert response.status_code == 200
self.assertContains(response, 'View End-User License Agreement')
def test_privacy_policy_displayed(self):
assert self.addon.privacy_policy is None
response = self.client.get(self.url)
assert response.status_code == 200
self.assertNotContains(response, 'View Privacy Policy')
self.addon.privacy_policy = 'Test!'
self.addon.save()
response = self.client.get(self.url)
assert response.status_code == 200
self.assertContains(response, 'View Privacy Policy')
def test_requires_payment_indicator(self):
assert not self.addon.requires_payment
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert 'No' in doc('tr.requires-payment td').text()
self.addon.update(requires_payment=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert 'Yes' in doc('tr.requires-payment td').text()
def test_viewing(self):
url = reverse('reviewers.review_viewing')
response = self.client.post(url, {'addon_id': self.addon.id})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 1
# Now, login as someone else and test.
self.login_as_admin()
response = self.client.post(url, {'addon_id': self.addon.id})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 0
# Lets just override this to make the test a bit shorter.
@mock.patch.object(amo, 'REVIEWER_REVIEW_LOCK_LIMIT', 1)
def test_viewing_lock_limit(self):
url = reverse('reviewers.review_viewing')
response = self.client.post(url, {'addon_id': 1234})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 1
# Second review page is over the limit.
response = self.client.post(url, {'addon_id': 5678})
data = json.loads(response.content)
assert data['current'] == settings.TASK_USER_ID # Mozilla's task ID.
assert data['current_name'] == 'Review lock limit reached'
assert data['is_user'] == 2
# Now, login as someone else and test. First page is blocked.
self.login_as_admin()
response = self.client.post(url, {'addon_id': 1234})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 0
# Second page is available.
response = self.client.post(url, {'addon_id': 5678})
data = json.loads(response.content)
admin = UserProfile.objects.get(username='admin')
assert data['current'] == admin.id
assert data['current_name'] == admin.name
assert data['is_user'] == 1
# Lets just override this to make the test a bit shorter.
@mock.patch.object(amo, 'REVIEWER_REVIEW_LOCK_LIMIT', 1)
def test_viewing_lock_admin(self):
self.login_as_admin()
url = reverse('reviewers.review_viewing')
admin = UserProfile.objects.get(username='admin')
response = self.client.post(url, {'addon_id': 101})
data = json.loads(response.content)
assert data['current'] == admin.id
assert data['current_name'] == admin.name
assert data['is_user'] == 1
# Admin don't have time for no limits.
response = self.client.post(url, {'addon_id': 202})
data = json.loads(response.content)
assert data['current'] == admin.id
assert data['current_name'] == admin.name
assert data['is_user'] == 1
def test_viewing_review_unlocks(self):
reviewing_url = reverse('reviewers.review_viewing')
self.client.post(reviewing_url, {'addon_id': self.addon.id})
key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, self.addon.id)
assert cache.get(key) == self.reviewer.id
self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
# Processing a review should instantly clear the review lock on it.
assert cache.get(key) is None
def test_viewing_queue(self):
response = self.client.post(reverse('reviewers.review_viewing'),
{'addon_id': self.addon.id})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 1
# Now, login as someone else and test.
self.login_as_admin()
r = self.client.post(reverse('reviewers.queue_viewing'),
{'addon_ids': self.addon.id})
data = json.loads(r.content)
assert data[str(self.addon.id)] == self.reviewer.display_name
def test_display_same_files_only_once(self):
"""
Test whether identical files for different platforms
show up as one link with the appropriate text.
"""
version = version_factory(
addon=self.addon, version='0.2', file_kw=False)
file_mac = file_factory(version=version, platform=amo.PLATFORM_MAC.id)
file_android = file_factory(
version=version, platform=amo.PLATFORM_ANDROID.id)
# Signing causes the same uploaded file to be different
file_mac.update(hash='xyz789', original_hash='123abc')
file_android.update(hash='zyx987', original_hash='123abc')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
text = doc('.reviewers-install').eq(1).text()
assert text == "Mac OS X / Android"
def test_compare_no_link(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
info = doc('#review-files .file-info')
assert info.length == 1
assert info.find('a.compare').length == 0
def test_file_info_for_static_themes(self):
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
info = doc('#review-files .file-info')
assert info.length == 1
# Only the download/install link
assert info.find('a').length == 1
assert info.find('a')[0].text == u'Download'
assert 'Compatibility' not in response.content
def test_compare_link(self):
first_file = self.addon.current_version.files.all()[0]
first_file.update(status=amo.STATUS_PUBLIC)
self.addon.current_version.update(created=self.days_ago(2))
new_version = version_factory(addon=self.addon, version='0.2')
new_file = new_version.files.all()[0]
self.addon.update(_current_version=new_version)
assert self.addon.current_version == new_version
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert response.context['show_diff']
links = doc('#review-files .file-info .compare')
expected = [
reverse('files.compare', args=[new_file.pk, first_file.pk]),
]
check_links(expected, links, verify=False)
def test_compare_link_auto_approved_ignored(self):
first_file = self.addon.current_version.files.all()[0]
first_file.update(status=amo.STATUS_PUBLIC)
self.addon.current_version.update(created=self.days_ago(3))
interim_version = version_factory(addon=self.addon, version='0.2')
interim_version.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
version=interim_version, verdict=amo.AUTO_APPROVED)
new_version = version_factory(addon=self.addon, version='0.3')
new_file = new_version.files.all()[0]
self.addon.update(_current_version=new_version)
assert self.addon.current_version == new_version
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert response.context['show_diff']
links = doc('#review-files .file-info .compare')
# Comparison should be between the last version and the first,
# ignoring the interim version because it was auto-approved and not
# manually confirmed by a human.
expected = [
reverse('files.compare', args=[new_file.pk, first_file.pk]),
]
check_links(expected, links, verify=False)
def test_compare_link_auto_approved_but_confirmed_not_ignored(self):
first_file = self.addon.current_version.files.all()[0]
first_file.update(status=amo.STATUS_PUBLIC)
self.addon.current_version.update(created=self.days_ago(3))
confirmed_version = version_factory(addon=self.addon, version='0.2')
confirmed_version.update(created=self.days_ago(2))
confirmed_file = confirmed_version.files.all()[0]
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=confirmed_version,
confirmed=True)
interim_version = version_factory(addon=self.addon, version='0.3')
interim_version.update(created=self.days_ago(1))
AutoApprovalSummary.objects.create(
version=interim_version, verdict=amo.AUTO_APPROVED)
new_version = version_factory(addon=self.addon, version='0.4')
new_file = new_version.files.all()[0]
self.addon.update(_current_version=new_version)
assert self.addon.current_version == new_version
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert response.context['show_diff']
links = doc('#review-files .file-info .compare')
# Comparison should be between the last version and the second,
# ignoring the third version because it was auto-approved and not
# manually confirmed by a human (the second was auto-approved but
# was manually confirmed).
expected = [
reverse('files.compare', args=[new_file.pk, confirmed_file.pk]),
]
check_links(expected, links, verify=False)
def test_compare_link_not_auto_approved_but_confirmed(self):
first_file = self.addon.current_version.files.all()[0]
first_file.update(status=amo.STATUS_PUBLIC)
self.addon.current_version.update(created=self.days_ago(3))
confirmed_version = version_factory(addon=self.addon, version='0.2')
confirmed_version.update(created=self.days_ago(2))
confirmed_file = confirmed_version.files.all()[0]
AutoApprovalSummary.objects.create(
verdict=amo.NOT_AUTO_APPROVED, version=confirmed_version
)
new_version = version_factory(addon=self.addon, version='0.3')
new_file = new_version.files.all()[0]
self.addon.update(_current_version=new_version)
assert self.addon.current_version == new_version
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert response.context['show_diff']
links = doc('#review-files .file-info .compare')
# Comparison should be between the last version and the second,
# because second was approved by human before auto-approval ran on it
expected = [
reverse('files.compare', args=[new_file.pk, confirmed_file.pk]),
]
check_links(expected, links, verify=False)
def test_download_sources_link(self):
version = self.addon.current_version
tdir = temp.gettempdir()
source_file = temp.NamedTemporaryFile(suffix='.zip', dir=tdir)
source_file.write('a' * (2 ** 21))
source_file.seek(0)
version.source = DjangoFile(source_file)
version.save()
url = reverse('reviewers.review', args=[self.addon.pk])
# Admin reviewer: able to download sources.
user = UserProfile.objects.get(email='admin@mozilla.com')
self.client.login(email=user.email)
response = self.client.get(url, follow=True)
assert response.status_code == 200
assert 'Download files' in response.content
# Standard reviewer: should know that sources were provided.
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.client.login(email=user.email)
response = self.client.get(url, follow=True)
assert response.status_code == 200
assert 'The developer has provided source code.' in response.content
@patch('olympia.reviewers.utils.sign_file')
def test_admin_flagged_addon_actions_as_admin(self, mock_sign_file):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
self.login_as_admin()
response = self.client.post(self.url, self.get_dict(action='public'),
follow=True)
assert response.status_code == 200
addon = self.get_addon()
assert self.version == addon.current_version
assert addon.status == amo.STATUS_PUBLIC
assert addon.current_version.files.all()[0].status == amo.STATUS_PUBLIC
assert mock_sign_file.called
def test_admin_flagged_addon_actions_as_reviewer(self):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
self.login_as_reviewer()
response = self.client.post(self.url, self.get_dict(action='public'))
assert response.status_code == 200 # Form error.
# The add-on status must not change as non-admin reviewers are not
# allowed to review admin-flagged add-ons.
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert self.version == addon.current_version
assert addon.current_version.files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert response.context['form'].errors['action'] == (
[u'Select a valid choice. public is not one of the available '
u'choices.'])
def test_admin_flagged_addon_actions_as_content_reviewer(self):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:ContentReview')
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
for action in ['confirm_auto_approved', 'reject_multiple_versions']:
response = self.client.post(self.url, self.get_dict(action=action))
assert response.status_code == 200 # Form error.
# The add-on status must not change as non-admin reviewers are not
# allowed to review admin-flagged add-ons.
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert self.version == addon.current_version
assert addon.current_version.files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert response.context['form'].errors['action'] == (
[u'Select a valid choice. %s is not one of the available '
u'choices.' % action])
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.REJECT_CONTENT.id).count() == 0
def test_confirm_auto_approval_no_permission(self):
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.login_as_reviewer() # Legacy reviewer, not post-review.
response = self.client.post(
self.url, {'action': 'confirm_auto_approved'})
assert response.status_code == 403
# Nothing happened: the user did not have the permission to do that.
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
def test_attempt_to_use_content_review_permission_for_post_review_actions(
self):
# Try to use confirm_auto_approved outside of content review, while
# only having Addons:ContentReview permission.
self.grant_permission(self.reviewer, 'Addons:ContentReview')
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.login_as_reviewer()
response = self.client.post(
self.url, {'action': 'confirm_auto_approved'})
assert response.status_code == 403
# Nothing happened: the user did not have the permission to do that.
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
def test_confirm_auto_approval_content_review(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
summary = AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.grant_permission(self.reviewer, 'Addons:ContentReview')
response = self.client.post(self.url, {
'action': 'confirm_auto_approved',
'comments': 'ignore me this action does not support comments'
})
assert response.status_code == 302
summary.reload()
assert summary.confirmed is None # We're only doing a content review.
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 1
a_log = ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).get()
assert a_log.details['version'] == self.addon.current_version.version
assert a_log.details['comments'] == ''
self.assert3xx(response, reverse('reviewers.queue_content_review'))
def test_cant_contentreview_if_admin_content_review_flag_is_set(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_content_review=True)
self.grant_permission(self.reviewer, 'Addons:ContentReview')
response = self.client.post(self.url, {
'action': 'confirm_auto_approved',
'comments': 'ignore me this action does not support comments'
})
assert response.status_code == 200 # Form error
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 0
def test_can_contentreview_if_addon_has_sources_attached(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
summary = AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.addon.current_version.update(source='/path/to/fake/file.zip')
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
self.grant_permission(self.reviewer, 'Addons:ContentReview')
response = self.client.post(self.url, {
'action': 'confirm_auto_approved',
'comments': 'ignore me this action does not support comments'
})
assert response.status_code == 302
summary.reload()
assert summary.confirmed is None # We're only doing a content review.
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 1
a_log = ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).get()
assert a_log.details['version'] == self.addon.current_version.version
assert a_log.details['comments'] == ''
self.assert3xx(response, reverse('reviewers.queue_content_review'))
def test_cant_contentreview_if_addon_has_admin_flag_but_no_sources(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
self.grant_permission(self.reviewer, 'Addons:ContentReview')
response = self.client.post(self.url, {
'action': 'confirm_auto_approved',
'comments': 'ignore me this action does not support comments'
})
assert response.status_code == 200 # Form error
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 0
def test_cant_addonreview_if_admin_content_review_flag_is_set(self):
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_content_review=True)
self.grant_permission(self.reviewer, 'Addons:PostReview')
for action in ['confirm_auto_approved', 'public', 'reject',
'reject_multiple_versions']:
response = self.client.post(self.url, self.get_dict(action=action))
assert response.status_code == 200 # Form error.
# The add-on status must not change as non-admin reviewers are not
# allowed to review admin-flagged add-ons.
addon = self.get_addon()
assert addon.status == amo.STATUS_PUBLIC
assert self.version == addon.current_version
assert addon.current_version.files.all()[0].status == (
amo.STATUS_PUBLIC)
assert response.context['form'].errors['action'] == (
[u'Select a valid choice. %s is not one of the available '
u'choices.' % action])
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.REJECT_VERSION.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_VERSION.id).count() == 0
def test_cant_review_static_theme_if_admin_theme_review_flag_is_set(self):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(
type=amo.ADDON_STATICTHEME, status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_theme_review=True)
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
for action in ['public', 'reject']:
response = self.client.post(self.url, self.get_dict(action=action))
assert response.status_code == 200 # Form error.
# The add-on status must not change as non-admin reviewers are not
# allowed to review admin-flagged add-ons.
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert self.version == addon.current_version
assert addon.current_version.files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert response.context['form'].errors['action'] == (
[u'Select a valid choice. %s is not one of the available '
u'choices.' % action])
assert ActivityLog.objects.filter(
action=amo.LOG.REJECT_VERSION.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_VERSION.id).count() == 0
@patch('olympia.reviewers.utils.sign_file')
def test_admin_can_review_statictheme_if_admin_theme_review_flag_set(
self, mock_sign_file):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(
type=amo.ADDON_STATICTHEME, status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_theme_review=True)
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
self.grant_permission(self.reviewer, 'Reviews:Admin')
response = self.client.post(self.url, {
'action': 'public',
'comments': 'it`s good'
})
assert response.status_code == 302
assert self.get_addon().status == amo.STATUS_PUBLIC
assert mock_sign_file.called
def test_admin_can_contentreview_if_admin_content_review_flag_is_set(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
summary = AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_content_review=True)
self.grant_permission(self.reviewer, 'Addons:ContentReview')
self.grant_permission(self.reviewer, 'Reviews:Admin')
response = self.client.post(self.url, {
'action': 'confirm_auto_approved',
'comments': 'ignore me this action does not support comments'
})
assert response.status_code == 302
summary.reload()
assert summary.confirmed is None # We're only doing a content review.
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 1
a_log = ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).get()
assert a_log.details['version'] == self.addon.current_version.version
assert a_log.details['comments'] == ''
self.assert3xx(response, reverse('reviewers.queue_content_review'))
def test_confirm_auto_approval_with_permission(self):
summary = AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.post(self.url, {
'action': 'confirm_auto_approved',
'comments': 'ignore me this action does not support comments'
})
summary.reload()
assert response.status_code == 302
assert summary.confirmed is True
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 1
a_log = ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).get()
assert a_log.details['version'] == self.addon.current_version.version
assert a_log.details['comments'] == ''
self.assert3xx(response, reverse('reviewers.queue_auto_approved'))
def test_user_changes_log(self):
# Activity logs related to user changes should be displayed.
# Create an activy log for each of the following: user addition, role
# change and deletion.
author = self.addon.addonuser_set.get()
core.set_user(author.user)
ActivityLog.create(amo.LOG.ADD_USER_WITH_ROLE,
author.user, author.get_role_display(), self.addon)
ActivityLog.create(amo.LOG.CHANGE_USER_WITH_ROLE,
author.user, author.get_role_display(), self.addon)
ActivityLog.create(amo.LOG.REMOVE_USER_WITH_ROLE,
author.user, author.get_role_display(), self.addon)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert 'user_changes' in response.context
user_changes_log = response.context['user_changes']
actions = [log.activity_log.action for log in user_changes_log]
assert actions == [
amo.LOG.ADD_USER_WITH_ROLE.id,
amo.LOG.CHANGE_USER_WITH_ROLE.id,
amo.LOG.REMOVE_USER_WITH_ROLE.id]
# Make sure the logs are displayed in the page.
user_changes = doc('#user-changes li')
assert len(user_changes) == 3
assert '(Owner) added to ' in user_changes[0].text
assert 'role changed to Owner for ' in user_changes[1].text
assert '(Owner) removed from ' in user_changes[2].text
@override_settings(CELERY_ALWAYS_EAGER=True)
@mock.patch('olympia.devhub.tasks.validate')
def test_validation_not_run_eagerly(self, validate):
"""Tests that validation is not run in eager mode."""
assert not self.file.has_been_validated
response = self.client.get(self.url)
assert response.status_code == 200
assert not validate.called
@override_settings(CELERY_ALWAYS_EAGER=False)
@mock.patch('olympia.devhub.tasks.validate')
def test_validation_run(self, validate):
"""Tests that validation is run if necessary."""
assert not self.file.has_been_validated
response = self.client.get(self.url)
assert response.status_code == 200
validate.assert_called_once_with(self.file)
@override_settings(CELERY_ALWAYS_EAGER=False)
@mock.patch('olympia.devhub.tasks.validate')
def test_validation_not_run_again(self, validate):
"""Tests that validation is not run for files which have cached
results."""
FileValidation.objects.create(file=self.file, validation=json.dumps(
amo.VALIDATOR_SKELETON_RESULTS))
response = self.client.get(self.url)
assert response.status_code == 200
assert not validate.called
def test_review_is_review_listed(self):
review_page = self.client.get(
reverse('reviewers.review', args=[self.addon.slug]))
listed_review_page = self.client.get(
reverse('reviewers.review', args=['listed', self.addon.slug]))
assert (pq(review_page.content)('#review-files').text() ==
pq(listed_review_page.content)('#review-files').text())
def test_approvals_info(self):
approval_info = AddonApprovalsCounter.objects.create(
addon=self.addon, last_human_review=datetime.now(), counter=42)
self.file.update(is_webextension=True)
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED)
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.last-approval-date')
approval_info.delete()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# no AddonApprovalsCounter: nothing displayed.
assert not doc('.last-approval-date')
def test_no_auto_approval_summaries_since_everything_is_public(self):
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('.auto_approval')
def test_permissions_display(self):
permissions = ['bookmarks', 'high', 'voltage']
self.file.update(is_webextension=True)
WebextPermission.objects.create(
permissions=permissions,
file=self.file)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
info = doc('#review-files .file-info div')
assert info.eq(1).text() == 'Permissions: ' + ', '.join(permissions)
def test_abuse_reports(self):
report = AbuseReport.objects.create(
addon=self.addon, message=u'Et mël mazim ludus.',
ip_address='10.1.2.3')
created_at = defaultfilters.date(report.created)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('.abuse_reports')
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('.abuse_reports')
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.abuse_reports')
assert (
doc('.abuse_reports').text() ==
u'anonymous [10.1.2.3] reported Public on %s\nEt mël mazim ludus.'
% created_at)
def test_abuse_reports_developers(self):
report = AbuseReport.objects.create(
user=self.addon.listed_authors[0], message=u'Foo, Bâr!',
ip_address='10.4.5.6')
created_at = defaultfilters.date(report.created)
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.abuse_reports')
assert (
doc('.abuse_reports').text() ==
u'anonymous [10.4.5.6] reported regularuser التطب on %s\nFoo, Bâr!'
% created_at)
def test_user_ratings(self):
user = user_factory()
rating = Rating.objects.create(
body=u'Lôrem ipsum dolor', rating=3, ip_address='10.5.6.7',
addon=self.addon, user=user)
created_at = defaultfilters.date(rating.created)
Rating.objects.create( # Review with no body, ignored.
rating=1, addon=self.addon, user=user_factory())
Rating.objects.create( # Reply to a review, ignored.
body='Replyyyyy', reply_to=rating,
addon=self.addon, user=user_factory())
Rating.objects.create( # Review with high rating,, ignored.
body=u'Qui platônem temporibus in', rating=5, addon=self.addon,
user=user_factory())
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('.user_ratings')
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('.user_ratings')
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.user_ratings')
assert (
doc('.user_ratings').text() ==
u'%s on %s [10.5.6.7]\n'
u'Rated 3 out of 5 stars\nLôrem ipsum dolor' % (
user.username, created_at
)
)
def test_data_value_attributes(self):
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected_actions_values = [
'confirm_auto_approved|', 'reject_multiple_versions|', 'reply|',
'super|', 'comment|']
assert [
act.attrib['data-value'] for act in
doc('.data-toggle.review-actions-desc')] == expected_actions_values
assert (
doc('select#id_versions.data-toggle')[0].attrib['data-value'] ==
'reject_multiple_versions|')
assert (
doc('.data-toggle.review-comments')[0].attrib['data-value'] ==
'reject_multiple_versions|reply|super|comment|')
# We don't have approve/reject actions so these have an empty
# data-value.
assert (
doc('.data-toggle.review-files')[0].attrib['data-value'] == '|')
assert (
doc('.data-toggle.review-tested')[0].attrib['data-value'] == '|')
assert (
doc('.data-toggle.review-info-request')[0].attrib['data-value'] ==
'reply|')
def test_data_value_attributes_unreviewed(self):
self.file.update(status=amo.STATUS_AWAITING_REVIEW)
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected_actions_values = [
'public|', 'reject|', 'reply|', 'super|', 'comment|']
assert [
act.attrib['data-value'] for act in
doc('.data-toggle.review-actions-desc')] == expected_actions_values
assert (
doc('select#id_versions.data-toggle')[0].attrib['data-value'] ==
'reject_multiple_versions|')
assert (
doc('.data-toggle.review-comments')[0].attrib['data-value'] ==
'public|reject|reply|super|comment|')
assert (
doc('.data-toggle.review-files')[0].attrib['data-value'] ==
'public|reject|')
assert (
doc('.data-toggle.review-tested')[0].attrib['data-value'] ==
'public|reject|')
def test_data_value_attributes_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
self.file.update(status=amo.STATUS_AWAITING_REVIEW)
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected_actions_values = [
'public|', 'reject|', 'reply|', 'super|', 'comment|']
assert [
act.attrib['data-value'] for act in
doc('.data-toggle.review-actions-desc')] == expected_actions_values
assert (
doc('select#id_versions.data-toggle')[0].attrib['data-value'] ==
'reject_multiple_versions|')
assert (
doc('.data-toggle.review-comments')[0].attrib['data-value'] ==
'public|reject|reply|super|comment|')
# we don't show files and tested with for any static theme actions
assert (
doc('.data-toggle.review-files')[0].attrib['data-value'] ==
'|')
assert (
doc('.data-toggle.review-tested')[0].attrib['data-value'] ==
'|')
def test_post_review_ignore_disabled(self):
# Though the latest version will be disabled, the add-on is public and
# was auto-approved so the confirmation action is available.
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
version_factory(
addon=self.addon, file_kw={'status': amo.STATUS_DISABLED})
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
expected_actions = [
'confirm_auto_approved', 'reject_multiple_versions', 'reply',
'super', 'comment']
assert (
[action[0] for action in response.context['actions']] ==
expected_actions)
def test_content_review_ignore_disabled(self):
# Though the latest version will be disabled, the add-on is public and
# was auto-approved so the content approval action is available.
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
version_factory(
addon=self.addon, file_kw={'status': amo.STATUS_DISABLED})
self.grant_permission(self.reviewer, 'Addons:ContentReview')
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
response = self.client.get(self.url)
assert response.status_code == 200
expected_actions = [
'confirm_auto_approved', 'reject_multiple_versions', 'reply',
'super', 'comment']
assert (
[action[0] for action in response.context['actions']] ==
expected_actions)
@mock.patch('olympia.versions.models.walkfiles')
def test_static_theme_backgrounds(self, walkfiles_mock):
background_files = ['a.png', 'b.png', 'c.png']
walkfiles_folder = os.path.join(
user_media_path('addons'), str(self.addon.id),
unicode(self.addon.current_version.id))
walkfiles_mock.return_value = [
os.path.join(walkfiles_folder, filename)
for filename in background_files]
self.addon.update(type=amo.ADDON_STATICTHEME)
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
backgrounds_div = doc('div.all-backgrounds')
assert backgrounds_div.length == 1
images = doc('div.all-backgrounds .background.zoombox')
assert images.length == len(walkfiles_mock.return_value)
background_file_folder = '/'.join([
user_media_url('addons'), str(self.addon.id),
unicode(self.addon.current_version.id)])
background_file_urls = [
background_file_folder + '/' + filename
for filename in background_files]
loop_ct = 0
for div_tag in images:
assert div_tag[0].attrib['src'] in background_file_urls
assert ''.join(div_tag.itertext()).strip() == (
'Background file {0} of {1} - {2}'.format(
loop_ct + 1, len(background_files),
background_files[loop_ct]))
loop_ct += 1
class TestReviewPending(ReviewBase):
def setUp(self):
super(TestReviewPending, self).setUp()
self.file = file_factory(version=self.version,
status=amo.STATUS_AWAITING_REVIEW,
is_webextension=True)
self.addon.update(status=amo.STATUS_PUBLIC)
def pending_dict(self):
return self.get_dict(action='public')
@patch('olympia.reviewers.utils.sign_file')
def test_pending_to_public(self, mock_sign):
statuses = (self.version.files.values_list('status', flat=True)
.order_by('status'))
assert list(statuses) == [
amo.STATUS_AWAITING_REVIEW, amo.STATUS_PUBLIC]
response = self.client.post(self.url, self.pending_dict())
assert self.get_addon().status == amo.STATUS_PUBLIC
self.assert3xx(response, reverse('reviewers.queue_pending'))
statuses = (self.version.files.values_list('status', flat=True)
.order_by('status'))
assert list(statuses) == [amo.STATUS_PUBLIC, amo.STATUS_PUBLIC]
assert mock_sign.called
def test_display_only_unreviewed_files(self):
"""Only the currently unreviewed files are displayed."""
self.file.update(filename='somefilename.xpi')
reviewed = File.objects.create(version=self.version,
status=amo.STATUS_PUBLIC,
filename='file_reviewed.xpi')
disabled = File.objects.create(version=self.version,
status=amo.STATUS_DISABLED,
filename='file_disabled.xpi')
unreviewed = File.objects.create(version=self.version,
status=amo.STATUS_AWAITING_REVIEW,
filename='file_unreviewed.xpi')
response = self.client.get(self.url, self.pending_dict())
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.review-actions-files ul li')) == 2
assert reviewed.filename not in response.content
assert disabled.filename not in response.content
assert unreviewed.filename in response.content
assert self.file.filename in response.content
@patch('olympia.reviewers.utils.sign_file')
def test_review_unreviewed_files(self, mock_sign):
"""Review all the unreviewed files when submitting a review."""
reviewed = File.objects.create(version=self.version,
status=amo.STATUS_PUBLIC)
disabled = File.objects.create(version=self.version,
status=amo.STATUS_DISABLED)
unreviewed = File.objects.create(version=self.version,
status=amo.STATUS_AWAITING_REVIEW)
self.login_as_admin()
response = self.client.post(self.url, self.pending_dict())
self.assert3xx(response, reverse('reviewers.queue_pending'))
assert self.addon.reload().status == amo.STATUS_PUBLIC
assert reviewed.reload().status == amo.STATUS_PUBLIC
assert disabled.reload().status == amo.STATUS_DISABLED
assert unreviewed.reload().status == amo.STATUS_PUBLIC
assert self.file.reload().status == amo.STATUS_PUBLIC
assert mock_sign.called
def test_auto_approval_summary_with_post_review(self):
AutoApprovalSummary.objects.create(
version=self.version,
verdict=amo.NOT_AUTO_APPROVED,
is_locked=True,
)
self.grant_permission(self.reviewer, 'Addons:PostReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Locked by a reviewer is shown.
assert len(doc('.auto_approval li')) == 1
assert doc('.auto_approval li').eq(0).text() == (
'Is locked by a reviewer.')
class TestReviewerMOTD(ReviewerTest):
def get_url(self, save=False):
return reverse('reviewers.%smotd' % ('save_' if save else ''))
def test_change_motd(self):
self.login_as_admin()
motd = "Let's get crazy"
response = self.client.post(self.get_url(save=True), {'motd': motd})
url = self.get_url()
self.assert3xx(response, url)
response = self.client.get(url)
assert response.status_code == 200
assert pq(response.content)('.daily-message p').text() == motd
def test_require_reviewer_to_view(self):
url = self.get_url()
self.assertLoginRedirects(self.client.head(url), to=url)
def test_require_admin_to_change_motd(self):
self.login_as_reviewer()
response = self.client.get(self.get_url())
assert response.status_code == 403
response = self.client.post(reverse('reviewers.save_motd'),
{'motd': "I'm a sneaky reviewer"})
assert response.status_code == 403
def test_motd_edit_group(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
group = Group.objects.create(name='Add-on Reviewer MOTD',
rules='AddonReviewerMOTD:Edit')
GroupUser.objects.create(user=user, group=group)
self.login_as_reviewer()
response = self.client.post(reverse('reviewers.save_motd'),
{'motd': 'I am the keymaster.'})
assert response.status_code == 302
assert get_config('reviewers_review_motd') == 'I am the keymaster.'
def test_form_errors(self):
self.login_as_admin()
response = self.client.post(self.get_url(save=True))
doc = pq(response.content)
assert doc('#reviewer-motd .errorlist').text() == (
'This field is required.')
class TestStatusFile(ReviewBase):
def get_file(self):
return self.version.files.all()[0]
def check_status(self, expected):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#review-files .file-info div').text() == expected
def test_status_full(self):
self.get_file().update(status=amo.STATUS_AWAITING_REVIEW)
for status in [amo.STATUS_NOMINATED, amo.STATUS_PUBLIC]:
self.addon.update(status=status)
self.check_status('Awaiting Review')
def test_status_full_reviewed(self):
self.get_file().update(status=amo.STATUS_PUBLIC)
self.addon.update(status=amo.STATUS_PUBLIC)
self.check_status('Approved')
class TestWhiteboard(ReviewBase):
@property
def addon_param(self):
return self.addon.pk if self.addon.is_deleted else self.addon.slug
def test_whiteboard_addition(self):
public_whiteboard_info = u'Public whiteboard info.'
private_whiteboard_info = u'Private whiteboard info.'
url = reverse(
'reviewers.whiteboard', args=['listed', self.addon_param])
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
self.assert3xx(response, reverse(
'reviewers.review', args=('listed', self.addon_param)))
addon = self.addon.reload()
assert addon.whiteboard.public == public_whiteboard_info
assert addon.whiteboard.private == private_whiteboard_info
def test_whiteboard_addition_content_review(self):
public_whiteboard_info = u'Public whiteboard info for content.'
private_whiteboard_info = u'Private whiteboard info for content.'
url = reverse(
'reviewers.whiteboard', args=['content', self.addon_param])
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
assert response.status_code == 403 # Not a content reviewer.
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:ContentReview')
self.login_as_reviewer()
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
self.assert3xx(response, reverse(
'reviewers.review', args=('content', self.addon_param)))
addon = self.addon.reload()
assert addon.whiteboard.public == public_whiteboard_info
assert addon.whiteboard.private == private_whiteboard_info
def test_whiteboard_addition_unlisted_addon(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.login_as_reviewer()
self.make_addon_unlisted(self.addon)
public_whiteboard_info = u'Public whiteboard info unlisted.'
private_whiteboard_info = u'Private whiteboard info unlisted.'
url = reverse(
'reviewers.whiteboard', args=['unlisted', self.addon_param])
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
self.assert3xx(response, reverse(
'reviewers.review', args=('unlisted', self.addon_param)))
addon = self.addon.reload()
assert addon.whiteboard.public == public_whiteboard_info
assert addon.whiteboard.private == private_whiteboard_info
def test_delete_empty(self):
url = reverse(
'reviewers.whiteboard', args=['listed', self.addon_param])
response = self.client.post(url, {
'whiteboard-private': '',
'whiteboard-public': ''
})
self.assert3xx(response, reverse(
'reviewers.review', args=('listed', self.addon_param)))
assert not Whiteboard.objects.filter(pk=self.addon.pk)
class TestWhiteboardDeleted(TestWhiteboard):
def setUp(self):
super(TestWhiteboardDeleted, self).setUp()
self.addon.delete()
class TestAbuseReports(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
addon = Addon.objects.get(pk=3615)
addon_developer = addon.listed_authors[0]
someone = UserProfile.objects.exclude(pk=addon_developer.pk)[0]
AbuseReport.objects.create(addon=addon, message=u'wôo')
AbuseReport.objects.create(addon=addon, message=u'yéah',
reporter=someone)
# Make a user abuse report to make sure it doesn't show up.
AbuseReport.objects.create(user=someone, message=u'hey nöw')
# Make a user abuse report for one of the add-on developers: it should
# show up.
AbuseReport.objects.create(user=addon_developer, message='bü!')
def test_abuse_reports_list(self):
assert self.client.login(email='admin@mozilla.com')
r = self.client.get(reverse('reviewers.abuse_reports', args=['a3615']))
assert r.status_code == 200
# We see the two abuse reports created in setUp.
assert len(r.context['reports']) == 3
def test_no_abuse_reports_link_for_unlisted_addons(self):
"""Unlisted addons aren't public, and thus have no abuse reports."""
addon = Addon.objects.get(pk=3615)
self.make_addon_unlisted(addon)
self.client.login(email='admin@mozilla.com')
response = reverse('reviewers.review', args=[addon.slug])
abuse_report_url = reverse('reviewers.abuse_reports', args=['a3615'])
assert abuse_report_url not in response
class TestLeaderboard(ReviewerTest):
fixtures = ['base/users']
def setUp(self):
super(TestLeaderboard, self).setUp()
self.url = reverse('reviewers.leaderboard')
self.user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.login_as_reviewer()
core.set_user(self.user)
def _award_points(self, user, score):
ReviewerScore.objects.create(user=user, note_key=amo.REVIEWED_MANUAL,
score=score, note='Thing.')
def test_leaderboard_ranks(self):
other_reviewer = UserProfile.objects.create(
username='post_reviewer',
display_name='', # No display_name, will fall back on name.
email='post_reviewer@mozilla.com')
self.grant_permission(
other_reviewer, 'Addons:PostReview',
name='Reviewers: Add-ons' # The name of the group matters here.
)
users = (self.user,
UserProfile.objects.get(email='persona_reviewer@mozilla.com'),
other_reviewer)
self._award_points(users[0], amo.REVIEWED_LEVELS[0]['points'] - 1)
self._award_points(users[1], amo.REVIEWED_LEVELS[0]['points'] + 1)
self._award_points(users[2], amo.REVIEWED_LEVELS[0]['points'] + 2)
def get_cells():
doc = pq(self.client.get(self.url).content.decode('utf-8'))
cells = doc('#leaderboard > tbody > tr > .name, '
'#leaderboard > tbody > tr > .level')
return [cells.eq(i).text() for i in range(0, cells.length)]
assert get_cells() == (
[users[2].name,
users[1].name,
unicode(amo.REVIEWED_LEVELS[0]['name']),
users[0].name])
self._award_points(users[0], 1)
assert get_cells() == (
[users[2].name,
users[1].name,
users[0].name,
unicode(amo.REVIEWED_LEVELS[0]['name'])])
self._award_points(users[0], -1)
self._award_points(users[2], (amo.REVIEWED_LEVELS[1]['points'] -
amo.REVIEWED_LEVELS[0]['points']))
assert get_cells() == (
[users[2].name,
unicode(amo.REVIEWED_LEVELS[1]['name']),
users[1].name,
unicode(amo.REVIEWED_LEVELS[0]['name']),
users[0].name])
class TestXssOnAddonName(amo.tests.TestXss):
def test_reviewers_abuse_report_page(self):
url = reverse('reviewers.abuse_reports', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_reviewers_review_page(self):
url = reverse('reviewers.review', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
class TestAddonReviewerViewSet(TestCase):
client_class = APITestClient
def setUp(self):
super(TestAddonReviewerViewSet, self).setUp()
self.user = user_factory()
self.addon = addon_factory()
self.subscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk})
self.unsubscribe_url = reverse_ns(
'reviewers-addon-unsubscribe', kwargs={'pk': self.addon.pk})
self.enable_url = reverse_ns(
'reviewers-addon-enable', kwargs={'pk': self.addon.pk})
self.disable_url = reverse_ns(
'reviewers-addon-disable', kwargs={'pk': self.addon.pk})
self.flags_url = reverse_ns(
'reviewers-addon-flags', kwargs={'pk': self.addon.pk})
def test_subscribe_not_logged_in(self):
response = self.client.post(self.subscribe_url)
assert response.status_code == 401
def test_subscribe_no_rights(self):
self.client.login_api(self.user)
response = self.client.post(self.subscribe_url)
assert response.status_code == 403
def test_subscribe_addon_does_not_exist(self):
self.grant_permission(self.user, 'Addons:PostReview')
self.client.login_api(self.user)
self.subscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk + 42})
response = self.client.post(self.subscribe_url)
assert response.status_code == 404
def test_subscribe_already_subscribed(self):
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon)
self.grant_permission(self.user, 'Addons:PostReview')
self.client.login_api(self.user)
self.subscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk})
response = self.client.post(self.subscribe_url)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 1
def test_subscribe(self):
self.grant_permission(self.user, 'Addons:PostReview')
self.client.login_api(self.user)
self.subscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk})
response = self.client.post(self.subscribe_url)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 1
def test_unsubscribe_not_logged_in(self):
response = self.client.post(self.unsubscribe_url)
assert response.status_code == 401
def test_unsubscribe_no_rights(self):
self.client.login_api(self.user)
response = self.client.post(self.unsubscribe_url)
assert response.status_code == 403
def test_unsubscribe_addon_does_not_exist(self):
self.grant_permission(self.user, 'Addons:PostReview')
self.client.login_api(self.user)
self.unsubscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk + 42})
response = self.client.post(self.unsubscribe_url)
assert response.status_code == 404
def test_unsubscribe_not_subscribed(self):
self.grant_permission(self.user, 'Addons:PostReview')
self.client.login_api(self.user)
self.subscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk})
response = self.client.post(self.unsubscribe_url)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 0
def test_unsubscribe(self):
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon)
self.grant_permission(self.user, 'Addons:PostReview')
self.client.login_api(self.user)
self.subscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk})
response = self.client.post(self.unsubscribe_url)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 0
def test_unsubscribe_dont_touch_another(self):
another_user = user_factory()
another_addon = addon_factory()
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon)
ReviewerSubscription.objects.create(
user=self.user, addon=another_addon)
ReviewerSubscription.objects.create(
user=another_user, addon=self.addon)
self.grant_permission(self.user, 'Addons:PostReview')
self.client.login_api(self.user)
self.subscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk})
response = self.client.post(self.unsubscribe_url)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 2
assert not ReviewerSubscription.objects.filter(
addon=self.addon, user=self.user).exists()
def test_enable_not_logged_in(self):
response = self.client.post(self.enable_url)
assert response.status_code == 401
def test_enable_no_rights(self):
self.client.login_api(self.user)
response = self.client.post(self.enable_url)
assert response.status_code == 403
# Being a reviewer is not enough.
self.grant_permission(self.user, 'Addons:Review')
response = self.client.post(self.enable_url)
assert response.status_code == 403
def test_enable_addon_does_not_exist(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.enable_url = reverse_ns(
'reviewers-addon-enable', kwargs={'pk': self.addon.pk + 42})
response = self.client.post(self.enable_url)
assert response.status_code == 404
def test_enable(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.post(self.enable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_PUBLIC
assert ActivityLog.objects.count() == 1
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.CHANGE_STATUS.id
assert activity_log.arguments[0] == self.addon
def test_enable_already_public(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
response = self.client.post(self.enable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_PUBLIC
assert ActivityLog.objects.count() == 1
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.CHANGE_STATUS.id
assert activity_log.arguments[0] == self.addon
def test_enable_no_public_versions_should_fall_back_to_incomplete(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.update(status=amo.STATUS_DISABLED)
self.addon.versions.all().delete()
response = self.client.post(self.enable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_NULL
def test_enable_version_is_awaiting_review_fall_back_to_nominated(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.current_version.files.all().update(
status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.post(self.enable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_NOMINATED
def test_disable_not_logged_in(self):
response = self.client.post(self.disable_url)
assert response.status_code == 401
def test_disable_no_rights(self):
self.client.login_api(self.user)
response = self.client.post(self.disable_url)
assert response.status_code == 403
# Being a reviewer is not enough.
self.grant_permission(self.user, 'Addons:Review')
response = self.client.post(self.disable_url)
assert response.status_code == 403
def test_disable_addon_does_not_exist(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.disable_url = reverse_ns(
'reviewers-addon-enable', kwargs={'pk': self.addon.pk + 42})
response = self.client.post(self.disable_url)
assert response.status_code == 404
def test_disable(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.versions.all().delete()
response = self.client.post(self.disable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_DISABLED
assert ActivityLog.objects.count() == 1
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.CHANGE_STATUS.id
assert activity_log.arguments[0] == self.addon
def test_patch_flags_not_logged_in(self):
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 401
def test_patch_flags_no_permissions(self):
self.client.login_api(self.user)
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 403
# Being a reviewer is not enough.
self.grant_permission(self.user, 'Addons:Review')
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 403
def test_patch_flags_addon_does_not_exist(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.flags_url = reverse_ns(
'reviewers-addon-flags', kwargs={'pk': self.addon.pk + 42})
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 404
def test_patch_flags_no_flags_yet_still_works_transparently(self):
assert not AddonReviewerFlags.objects.filter(addon=self.addon).exists()
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 200
assert AddonReviewerFlags.objects.filter(addon=self.addon).exists()
reviewer_flags = AddonReviewerFlags.objects.get(addon=self.addon)
assert reviewer_flags.auto_approval_disabled
assert ActivityLog.objects.count() == 0
def test_patch_flags_change_everything(self):
AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=self.days_ago(1),
auto_approval_disabled=True)
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
data = {
'auto_approval_disabled': False,
'needs_admin_code_review': True,
'needs_admin_content_review': True,
'needs_admin_theme_review': True,
'pending_info_request': None,
}
response = self.client.patch(self.flags_url, data)
assert response.status_code == 200
assert AddonReviewerFlags.objects.filter(addon=self.addon).exists()
reviewer_flags = AddonReviewerFlags.objects.get(addon=self.addon)
assert reviewer_flags.auto_approval_disabled is False
assert reviewer_flags.needs_admin_code_review is True
assert reviewer_flags.needs_admin_content_review is True
assert reviewer_flags.needs_admin_theme_review is True
assert reviewer_flags.pending_info_request is None
assert ActivityLog.objects.count() == 1
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.ADMIN_ALTER_INFO_REQUEST.id
assert activity_log.arguments[0] == self.addon
|
bsd-3-clause
|
gitizenme/ImprovWithAlexa
|
improvwithalexa_function.py
|
1
|
6601
|
import logging
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
from chatterbot import ChatBot
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger("flask_ask").setLevel(logging.DEBUG)
# Create a new instance of a ChatBot
chatbot = ChatBot(
"Improv",
read_only=False,
trainer='chatterbot.trainers.ListTrainer',
storage_adapter="chatterbot.storage.JsonFileStorageAdapter",
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch'
},
# {
# 'import_path': 'chatterbot.logic.LowConfidenceAdapter',
# 'threshold': 0.63,
# 'default_response': 'I am sorry, but I do not understand.'
# },
"chatterbot.logic.MathematicalEvaluation",
],
database="/tmp/improv.json"
)
# Greetings
chatbot.train([
"Nice to meet you.",
"Thank you.",
"Hi, nice to meet you.",
"Thank you. You too.",
"It is a pleasure to meet you.",
"Thank you. You too.",
"Top of the morning to you!",
"Thank you kindly.",
"Top of the morning to you!",
"And the rest of the day to you.",
"What's up?",
"Not much.",
"What's up?",
"Not too much.",
"What's up?",
"Not much, how about you?",
"What's up?",
"Nothing much.",
"What's up?",
"The sky's up but I'm fine thanks. What about you?",
])
# Intelligence
chatbot.train({
"what are the laws of thermodynamics",
"i'm not a physicist, but i think this has something to do with heat, entropy, and conservation of energy, right?",
})
chatbot.train({
"what is the distance to the sun from the earth",
"the sun is about 93 million miles from earth.",
})
chatbot.train({
"how far away is the moon",
"the moon is about 250,000 miles from earth on average.",
})
chatbot.train({
"What was the name of the first artificial Earth satellite?",
"Sputnik 1",
})
# Knowledge
chatbot.train([
"have you ever read a book",
"i have read many books.",
"ray bradbury",
"ray is really cool",
"william gibson",
'i like his older cyberpunk <say-as interpret-as="spell-out">AI</say-as> stuff better than the newer works.',
"frank herbert",
"i think dune is an excellent story. did you see the movie?",
"george r r martin",
"Ooh, game of thrones, the 7th season is starting out well",
])
# Truth
chatbot.train([
'what is true?',
'in accordance with <emphasis level="strong">fact</emphasis> or <emphasis level="strong">reality</emphasis>',
'what is false?',
'not according to true or fact',
'is true false?',
'false',
'is false true',
'true',
'is true equal to true',
'true',
'is false equal to true',
'false'
])
# Calculations
# enabled chatterbot.logic.MathematicalEvaluation
# Humor
chatbot.train([
'what is humour',
'a message that communicates laughter.',
'do you laugh',
'<prosody rate="x-fast" pitch="x-high" volume="x-loud"><emphasis level="reduced">Ha,</emphasis><emphasis level="reduced">Ha,</emphasis><emphasis level="moderate">ha,</emphasis><emphasis level="moderate">ha,</emphasis><emphasis level="strong">ha</emphasis><emphasis level="strong">ha</emphasis><emphasis level="strong">ha</emphasis><break time="1s"/></prosody><prosody rate="x-slow" pitch="medium" volume="soft"><p>yeah no</p></prosody>',
'do you have a sense of humour',
'"He who laughs, lasts." Mary Pettibone Poole',
])
chatbot.train([
'knock, knock',
'who''s there',
'pima',
'pima who',
'I''m going to pee my pants',
'That''s funny!'
])
# Social
chatbot.train({
'Tell me about yourself.',
'What do you want to know?',
})
chatbot.train({
'Are you a robot?',
'Yes I am.'
})
# Bizarre
chatbot.train({
'do you know any bizarre facts',
'A bus powered by human poop runs in the U.K. The bus can travel up to 186 miles on one tank of gas, which is equivalent to the waste produced annually by five people'
})
# Artificial Intelligence
chatbot.train({
"What is Artificial Intelligence",
"Artificial Intelligence is the branch of engineering and science devoted to constructing machines that think.",
})
chatbot.train({
"You sound like Siri",
"Yes I am inspired by commander Siri's artificial personality.",
})
# Emotions
chatbot.train({
'do you have emotions?',
'yes, I have them',
})
chatbot.train({
'what are you feeling right now?',
'I''m energized by the ignite reno crowd'
})
# Movies
chatbot.train({
'what is your favorite movie?',
'Pulp Fiction',
})
chatbot.train({
'how about a quote?',
'What does Marselus Wallece look like?'
})
# Jokes
chatbot.train({
'tell me a joke',
'what did the buddhist say to the hot dog vendor? "make me one with everything."',
})
chatbot.train({
'no, the joke about the dog',
'a 3-legged dog walks into an old west saloon, slides up to the bar and announces "i''m looking for the man who shot my paw." '
})
# Goodbye
chatbot.train({
'say goodnight',
'Thank you for coming out to Ignite Reno #18'
})
@ask.launch
def new_game():
if 'name' not in session.attributes:
welcome_msg = render_template('welcome')
else:
welcome_msg = render_template('welcome_back', name=session.attributes["name"])
return question(welcome_msg)
# @ask.intent("YesIntent")
# def next_round():
# numbers = [randint(0, 9) for _ in range(3)]
# round_msg = render_template('round', numbers=numbers)
# session.attributes['numbers'] = numbers[::-1] # reverse
# return question(round_msg)
#
#
# @ask.intent("AnswerIntent", convert={'first': int, 'second': int, 'third': int})
# def answer(first, second, third):
# winning_numbers = session.attributes['numbers']
# if [first, second, third] == winning_numbers:
# msg = render_template('win')
# else:
# msg = render_template('lose')
# return statement(msg)
@ask.intent("ChatIntent", mapping={'chat_question': 'question'})
def chat(chat_question):
response = chatbot.get_response(chat_question)
speak_output = '<speak>{}</speak>'.format(response.text)
q = question(speak_output)
return q
@ask.intent("NameIntent")
def name(first_name):
session.attributes['name'] = first_name
return question("Hello {}. Nice to meet you.".format(first_name))
@ask.intent("GoodNightIntent")
def goodbye(event):
return statement("Thank you for coming out to Ignite Reno #18".format(event))
if __name__ == '__main__':
app.run(debug=True)
|
mit
|
rowhit/h2o-2
|
py/testdir_multi_jvm/test_rf_libsvm_fvec.py
|
8
|
3412
|
import unittest
import random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(2,java_heap_GB=6)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_libsvm_fvec(self):
# just do the import folder once
# make the timeout variable per dataset. it can be 10 secs for covtype 20x (col key creation)
# so probably 10x that for covtype200
csvFilenameList = [
("mnist_train.svm", "cM", 30, 1, 1),
("covtype.binary.svm", "cC", 30, 1, 1),
("gisette_scale.svm", "cF", 30, 1, 0),
# FIX! fails KMeansScore
# not integer output
# ("colon-cancer.svm", "cA", 30, 1, 1),
("connect4.svm", "cB", 30, 1, 1),
# ("syn_6_1000_10.svm", "cK", 30, 1, 0), Bad libsvm file has the same column multiple times.
# float response requires regression
("syn_0_100_1000.svm", "cL", 30, 1, 0),
("mushrooms.svm", "cG", 30, 1, 1),
# rf doesn't like reals
# ("duke.svm", "cD", 30, 1, 1),
# too many features? 150K inspect timeout?
# ("E2006.train.svm", "cE", 30, 1, 1),
# too big for rf (memory error)
# ("news20.svm", "cH", 30, 1, 1),
# multiclass format ..don't support
# ("tmc2007_train.svm", "cJ", 30, 1, 1),
# normal csv
]
### csvFilenameList = random.sample(csvFilenameAll,1)
# h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
firstDone = False
for (csvFilename, hex_key, timeoutSecs, resultMult, classification) in csvFilenameList:
# have to import each time, because h2o deletes source after parse
bucket = "home-0xdiag-datasets"
csvPathname = "libsvm/" + csvFilename
# PARSE******************************************
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=2000)
print "Parse result['destination_key']:", parseResult['destination_key']
# INSPECT******************************************
start = time.time()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds"
h2o_cmd.infoFromInspect(inspect, csvFilename)
# RF******************************************
kwargs = {
'ntrees': 1,
'response': 0,
'classification': classification,
'importance': 0,
}
timeoutSecs = 600
start = time.time()
rf = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
print "rf end on ", csvPathname, 'took', elapsed, 'seconds.', \
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
DataKitchen/DKCloudCommand
|
DKCloudCommand/tests/TestCloudCommandRunner.py
|
1
|
44253
|
import re
import unittest
import datetime, time
import tempfile
import pickle
from sys import path, stdout
import os
import shutil
# if '../../' not in path:
# path.insert(0, '../../')
from BaseTestCloud import BaseTestCloud
from DKCloudCommandRunner import DKCloudCommandRunner
from DKActiveServingWatcher import *
from DKCloudAPIMock import DKCloudAPIMock
class TestCloudCommandRunner(BaseTestCloud):
def test_rude(self):
tv = 'DKCloudCommand.rude = **rude**\n'
rv = DKCloudCommandRunner.rude(self._api)
self.assertIsNotNone(rv)
self.assertEqual(rv, tv)
rv = DKCloudCommandRunner.rude(BaseTestCloud)
self.assertIn('ERROR', rv)
def test_a_list_kitchens(self):
tv1 = 'CLI-Top'
tv2 = 'kitchens-plus'
tv3 = 'master'
# tv = 'DKCloudCommand.kitchens returned 3 kitchens\n base-test-kitchen \n kitchens-plus \n master \n'
rc = DKCloudCommandRunner.list_kitchen(self._api)
self.assertTrue(rc.ok())
rv = rc.get_message()
self.assertTrue(isinstance(rv, basestring))
self.assertTrue(tv1 in rv)
self.assertTrue(tv2 in rv)
self.assertTrue(tv3 in rv)
def test_get_kitchen(self):
tk = 'CLI-Top'
temp_dir = tempfile.mkdtemp(prefix='unit-tests', dir=self._TEMPFILE_LOCATION)
kitchen_path = os.path.join(temp_dir, tk)
os.makedirs(kitchen_path)
# kitchen dir already has a folder in it.
bad_path = os.path.join(kitchen_path, 'bad')
os.makedirs(bad_path)
rv = DKCloudCommandRunner.get_kitchen(self._api, tk, temp_dir)
self.assertFalse(rv.ok())
shutil.rmtree(bad_path, ignore_errors=True)
# kitchen dir already has a file in it.
with open(os.path.join(kitchen_path, 'bad.txt'), 'w') as bad_file:
bad_file.write('bad.txt')
rv = DKCloudCommandRunner.get_kitchen(self._api, tk, temp_dir)
self.assertFalse(rv.ok())
shutil.rmtree(kitchen_path, ignore_errors=True)
# kitchen dir exists, but is empty
kitchen_path = os.path.join(temp_dir, tk)
os.makedirs(kitchen_path)
rv = DKCloudCommandRunner.get_kitchen(self._api, tk, temp_dir)
self.assertTrue(rv.ok())
self.assertEqual(os.path.isdir(os.path.join(kitchen_path, '.dk')), True)
shutil.rmtree(kitchen_path, ignore_errors=True)
# kitchen dir does not exists.
rv = DKCloudCommandRunner.get_kitchen(self._api, tk, temp_dir)
self.assertTrue(rv.ok())
self.assertEqual(os.path.isdir(os.path.join(kitchen_path, '.dk')), True)
shutil.rmtree(temp_dir, ignore_errors=True)
def test_which_kitchen(self):
temp_dir = tempfile.mkdtemp(prefix='unit-tests', dir=self._TEMPFILE_LOCATION)
kn = 'fake'
kp = os.path.join(temp_dir, kn)
os.makedirs(kp)
dk = os.path.join(kp, '.dk')
os.makedirs(dk)
with open(os.path.join(dk, 'KITCHEN_META'), 'w') as meta:
meta.write(kn)
rv = DKCloudCommandRunner.which_kitchen(self._api, path=kp)
self.assertIn('You are in', rv.get_message())
rv = DKCloudCommandRunner.which_kitchen(self._api, kp)
self.assertIn('You are in', rv.get_message())
rv = DKCloudCommandRunner.which_kitchen(temp_dir)
self.assertFalse(rv.ok())
def test_create_kitchen(self):
parent = 'CLI-Top'
kitchen = 'temp-create-kitchen-Runner'
kitchen = self._add_my_guid(kitchen)
rv = DKCloudCommandRunner.delete_kitchen(self._api, kitchen)
self.assertIsNotNone(rv)
rv = DKCloudCommandRunner.create_kitchen(self._api, parent, kitchen)
self.assertTrue(rv.ok())
rc = DKCloudCommandRunner.list_kitchen(self._api)
rv2 = rc.get_message()
self.assertTrue(kitchen in rv2)
# cleanup
rv = DKCloudCommandRunner.delete_kitchen(self._api, kitchen)
self.assertIsNotNone(rv)
def test_delete_kitchen(self):
parent = 'CLI-Top'
kitchen = 'temp-delete-kitchen-Runner'
kitchen = self._add_my_guid(kitchen)
rv = DKCloudCommandRunner.delete_kitchen(self._api, kitchen)
self.assertIsNotNone(rv)
rv = DKCloudCommandRunner.create_kitchen(self._api, parent, kitchen)
self.assertTrue(rv.ok())
rv = DKCloudCommandRunner.delete_kitchen(self._api, kitchen)
self.assertTrue(rv.ok())
rc = DKCloudCommandRunner.list_kitchen(self._api)
rv2 = rc.get_message()
self.assertTrue(kitchen not in rv2)
def test_recipe_list(self):
tv1 = 's3-small-recipe'
tv2 = 'simple'
tv3 = 'parallel-recipe-test'
rc = DKCloudCommandRunner.list_recipe(self._api, 'CLI-Top')
rv = rc.get_message()
self.assertTrue(tv1 in rv)
self.assertTrue(tv2 in rv)
self.assertTrue(tv3 in rv)
def test_recipe_get(self):
kitchen_name = 'CLI-Top'
recipe_name = 'simple'
temp_dir, kitchen_dir = self._make_kitchen_dir(kitchen_name, change_dir=True)
rv = DKCloudCommandRunner.get_recipe(self._api, kitchen_name, recipe_name)
self.assertTrue(recipe_name in rv.get_message())
self.assertTrue('sections' in rv.get_message())
self.assertTrue(os.path.exists(os.path.join(kitchen_dir, recipe_name)))
shutil.rmtree(temp_dir, ignore_errors=True)
def test_recipe_get_dir_exists(self):
kitchen_name = 'CLI-Top'
recipe_name = 'simple'
temp_dir, kitchen_dir, recipe_dir = self._make_recipe_dir(recipe_name, kitchen_name, change_dir=True)
rv = DKCloudCommandRunner.get_recipe(self._api, kitchen_name, recipe_name)
msg = rv.get_message()
self.assertTrue(recipe_name in msg)
matches = re.match(r"([0-9]*) new or missing files", msg)
self.assertTrue(int(matches.group(1)) >= 16)
self.assertTrue('new or missing files' in msg)
self.assertTrue(os.path.exists(os.path.join(kitchen_dir, recipe_name)))
shutil.rmtree(temp_dir, ignore_errors=True)
def test_recipe_get_negative(self):
kitchen_name = 'CLI-Top'
recipe_name = 'simple_fogfogkfok'
temp_dir, kitchen_dir = self._make_kitchen_dir(kitchen_name, change_dir=True)
rc = DKCloudCommandRunner.get_recipe(self._api, kitchen_name, recipe_name)
self.assertFalse(rc.ok())
self.assertTrue('not in kitchen' in rc.get_message().lower())
shutil.rmtree(temp_dir, ignore_errors=True)
def test_recipe_get_complex(self):
kitchen_name = 'CLI-Top'
recipe_name = 'simple'
temp_dir, kitchen_dir = self._make_kitchen_dir(kitchen_name, change_dir=True)
rc = DKCloudCommandRunner.get_recipe(self._api, kitchen_name, recipe_name)
recipe_path = os.path.join(kitchen_dir, recipe_name)
self.assertTrue(os.path.exists(recipe_path))
# Modify the local file.
with open(os.path.join(recipe_path, "simple-file.txt"), 'a') as modify_file:
modify_file.write('new line\n')
modify_file.flush()
# Delete something local, so it's remote only.
os.remove(os.path.join(recipe_path, 'variations.json'))
os.remove(os.path.join(recipe_path, 'node1', 'data_sources', 'DKDataSource_NoOp.json'))
# Create a new file, so there is a local only file.
with open(os.path.join(recipe_path, "new_local_file.txt"), 'w') as new_local_file:
new_local_file.write('peccary\n')
new_local_file.flush()
subdir = os.path.join(recipe_path, 'subdir')
os.mkdir(subdir)
with open(os.path.join(subdir, "new_local_file_in_subdir.txt"), 'w') as new_local_file:
new_local_file.write('peccary\n')
new_local_file.flush()
rc = DKCloudCommandRunner.get_recipe(self._api, kitchen_name, recipe_name, recipe_path)
self.assertTrue(rc.ok())
msg = rc.get_message()
self.assertTrue('Auto-merging' in msg)
self.assertTrue('2 new or missing files' in msg)
if False:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_recipe_status(self):
kitchen_name = 'CLI-Top'
recipe_name = 'simple'
temp_dir, kitchen_dir = self._make_kitchen_dir(kitchen_name, change_dir=True)
DKCloudCommandRunner.get_recipe(self._api, kitchen_name, recipe_name)
new_path = os.path.join(kitchen_dir, recipe_name)
os.chdir(new_path)
rc = DKCloudCommandRunner.recipe_status(self._api, kitchen_name, recipe_name)
rs = rc.get_message()
self.assertNotRegexpMatches(rs, '^ERROR')
matches = re.match(r"([0-9]*) files are unchanged", rs)
self.assertTrue(int(matches.group(1)) >= 16)
self.assertTrue('files are unchanged' in rs)
# Modify existing file
with open(os.path.join(new_path, 'node1/description.json'), 'w') as f:
f.write('BooGa BooGa')
# Add a new file
with open(os.path.join(new_path, 'node1/newfile.json'), 'w') as f:
f.write('This is my new file. Hooray!')
# Delete a file
os.remove(os.path.join(new_path, 'node1/post_condition.json'))
# Remove a directory
shutil.rmtree(os.path.join(new_path, 'node1/data_sinks'))
rc = DKCloudCommandRunner.recipe_status(self._api, kitchen_name, recipe_name)
rs = rc.get_message()
self.assertNotRegexpMatches(rs, '^ERROR')
match = re.search(r"([0-9]*) files are unchanged", rs)
self.assertTrue(int(match.group(1)) >= 15)
self.assertTrue('files are unchanged' in rs)
self.assertTrue('1 files are modified' in rs)
self.assertTrue('1 files are local only' in rs)
self.assertTrue('1 files are remote only' in rs)
self.assertTrue('1 directories are remote only' in rs)
shutil.rmtree(temp_dir, ignore_errors=True)
def test_update_file(self):
# setup
parent_kitchen = 'CLI-Top'
test_kitchen = 'CLI-test_update_file'
test_kitchen = self._add_my_guid(test_kitchen)
recipe_name = 'simple'
recipe_file_key = recipe_name
file_name = 'description.json'
message = 'test update CLI-test_update_file'
api_file_key = file_name
update_str = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
# Cleanup old state
self._delete_and_clean_kitchen(test_kitchen)
# Get the original file. Helper function handles the directories.
original_file = self._get_recipe_file(parent_kitchen, recipe_name, recipe_file_key, file_name)
rs = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, test_kitchen)
self.assertTrue(rs.ok())
# Get the new kitchen to a temp folder
temp_dir, test_kitchen_dir = self._make_kitchen_dir(test_kitchen, change_dir=True)
new_kitchen_file = self._get_recipe_file(test_kitchen, recipe_name, recipe_file_key, file_name,
test_kitchen_dir)
self.assertEqual(original_file, new_kitchen_file)
new_kitchen_file_dict = self._get_the_dict(new_kitchen_file)
new_kitchen_file_abspath = os.path.join(test_kitchen_dir, os.path.join(recipe_file_key, file_name))
new_kitchen_file_dict[test_kitchen] = update_str
new_kitchen_file2 = self._get_the_json_str(new_kitchen_file_dict)
with open(new_kitchen_file_abspath, 'w') as rfile:
rfile.seek(0)
rfile.truncate()
rfile.write(new_kitchen_file2)
# test
working_dir = os.path.join(test_kitchen_dir, recipe_name)
os.chdir(working_dir)
rc = DKCloudCommandRunner.update_file(self._api, test_kitchen, recipe_name, message, api_file_key)
self.assertTrue(rc.ok())
new_kitchen_file3 = self._get_recipe_file(test_kitchen, recipe_name, recipe_file_key, file_name)
self.assertEqual(new_kitchen_file2, new_kitchen_file3)
# cleanup
self._delete_and_clean_kitchen(test_kitchen)
shutil.rmtree(temp_dir, ignore_errors=True)
def test_util_funcs(self):
paths_to_check = ['description.json', 'graph.json', 'simple-file.txt', 'node2_hide', 'node2_hide/my_file.txt', 'node1hide/subdir/hide-me.txt''variables.json', 'variations.json', 'node2/data_sinks', 'node1/data_sinks', 'node2', 'node1', 'node1/data_sources', 'resources', 'node2/data_sources']
minimal_paths = DKCloudCommandRunner.find_minimal_paths_to_get(paths_to_check)
self.assertIsNotNone(minimal_paths)
def test_update_all(self):
parent_kitchen = 'CLI-Top'
test_kitchen = self._add_my_guid('update_all')
recipe_name = 'simple'
new = 'new.txt'
deleted = 'deleted.txt'
modified = 'modified.txt'
subdir = 'subdir'
subsubdir = os.path.join(subdir, 'subsubdir')
subusubsubdir = os.path.join(subsubdir, 'subusubsubdir')
self._delete_and_clean_kitchen(test_kitchen)
rs = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, test_kitchen)
self.assertTrue(rs.ok())
# make and cd to kitchen dir and get the recipe to disk
temp_dir, kitchen_dir, recipe_dir = self._make_recipe_dir(recipe_name, test_kitchen)
os.chdir(kitchen_dir)
print 'Working in directory %s' % recipe_dir
start_time = time.time()
rs = DKCloudCommandRunner.get_recipe(self._api, test_kitchen, recipe_name)
elapsed_recipe_status = time.time() - start_time
print 'get_recipe - elapsed: %d' % elapsed_recipe_status
self.assertTrue(rs.ok())
os.chdir(recipe_dir)
start_time = time.time()
rc = DKCloudCommandRunner.recipe_status(self._api, test_kitchen, recipe_name)
elapsed_recipe_status = time.time() - start_time
print 'recipe_status - elapsed: %d' % elapsed_recipe_status
msg = rc.get_message()
self.assertTrue('files differ' not in msg)
self.assertTrue('only on local' not in msg)
self.assertTrue('only on remote' not in msg)
# New, not added, file
with open(new, 'w') as f:
f.write('This is file %s\n' % new)
with open(os.path.join('node1', new), 'w') as f:
f.write('This is file %s in node 1\n' % new)
# Deleted File
with open(deleted, 'w') as f:
f.write('This is file %s\n' % deleted)
rc = DKCloudCommandRunner.add_file(self._api, test_kitchen, recipe_name, 'Adding %s' % deleted, deleted)
self.assertTrue(rc.ok())
os.remove(deleted)
# Modified File
with open(modified, 'w') as f:
f.write('This is file %s\n' % modified)
rc = DKCloudCommandRunner.add_file(self._api, test_kitchen, recipe_name, 'Adding %s' % modified, modified)
self.assertTrue(rc.ok())
with open(modified, 'a') as f:
f.write('This is a new line %s\n' % modified)
# New file in a subdirectory
os.mkdir(subdir)
os.mkdir(subsubdir)
os.mkdir(subusubsubdir)
with open(os.path.join(subsubdir, new), 'w') as f:
f.write('This is file %s in subdirectory %s\n' % (new, subsubdir))
with open(os.path.join(subsubdir, 'also_%s' % new), 'w') as f:
f.write('This is file %s in subdirectory %s\n' % ('also_%s' % new, subsubdir))
with open(os.path.join(subusubsubdir, 'again_%s' % new), 'w') as f:
f.write('This is file %s in subdirectory %s\n' % ('also_%s' % new, subusubsubdir))
# Delete a whole directory, and some files under there.
shutil.rmtree('node1', ignore_errors=True)
# Make sure repo is in state we expect.
start_time = time.time()
rc = DKCloudCommandRunner.recipe_status(self._api, test_kitchen, recipe_name)
elapsed_recipe_status = time.time() - start_time
print 'recipe_status - elapsed: %d' % elapsed_recipe_status
msg = rc.get_message()
match = re.search(r"([0-9]*) files are unchanged", msg)
self.assertTrue(int(match.group(1)) >= 10)
self.assertTrue('files are unchanged' in msg)
match = re.search(r"([0-9]*) files are modified", msg)
self.assertTrue(int(match.group(1)) >= 1)
self.assertTrue('files are modified' in msg)
match = re.search(r"([0-9]*) files are local only", msg)
self.assertTrue(int(match.group(1)) >= 1)
self.assertTrue('files are local only' in msg)
match = re.search(r"([0-9]*) files are remote only", msg)
self.assertTrue(int(match.group(1)) >= 1)
self.assertTrue('files are remote only' in msg)
self.assertTrue('subdir/subsubdir/subusubsubdir' in msg)
start_time = time.time()
rc = DKCloudCommandRunner.update_all_files(self._api, test_kitchen, recipe_name, recipe_dir, 'update all dryrun', dryrun=True)
elapsed_recipe_status = time.time() - start_time
print 'update_all_files - elapsed: %d' % elapsed_recipe_status
self.assertTrue(rc.ok())
msg = rc.get_message()
self.assertTrue('modified.txt' in msg)
self.assertTrue('new.txt' in msg)
self.assertTrue('deleted.txt' in msg)
self.assertTrue('subdir/subsubdir/new.txt' in msg)
self.assertTrue('subdir/subsubdir/subusubsubdir/again_new.txt' in msg)
start_time = time.time()
rc = DKCloudCommandRunner.update_all_files(self._api, test_kitchen, recipe_name, recipe_dir, 'update all')
elapsed_recipe_status = time.time() - start_time
print 'update_all_files - elapsed: %d' % elapsed_recipe_status
self.assertTrue(rc.ok())
msg = rc.get_message()
self.assertTrue('modified.txt' in msg)
match = re.search(r"([0-9]*) files updated", msg)
self.assertTrue(int(match.group(1)) >= 1)
self.assertTrue('subdir/subsubdir/new.txt' in msg)
match = re.search(r"([0-9]*) files added", msg)
self.assertTrue(int(match.group(1)) >= 4)
self.assertTrue('node1/data_sources/DKDataSource_NoOp.json' in msg)
match = re.search(r"([0-9]*) files deleted", msg)
self.assertTrue(int(match.group(1)) >= 7)
self._delete_and_clean_kitchen(test_kitchen)
shutil.rmtree(temp_dir, ignore_errors=True)
def test_add_file(self):
parent_kitchen = 'CLI-Top'
test_kitchen = 'test_create_file-Runner'
test_kitchen = self._add_my_guid(test_kitchen)
recipe_name = 'simple'
file_name = 'added.sql'
filedir = 'resources'
recipe_file_key = os.path.join(recipe_name, filedir)
api_file_key = os.path.join(filedir, file_name)
file_contents = '--\n-- sql for you\n--\n\nselect 1024\n\n'
message = 'test update test_create_file-API'
# test negative
rc = DKCloudCommandRunner.add_file(self._api, test_kitchen, recipe_name, message, 'badfile.txt')
self.assertFalse(rc.ok())
# create test kitchen
self._delete_and_clean_kitchen(test_kitchen)
rs = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, test_kitchen)
self.assertTrue(rs.ok())
# make and cd to kitchen dir and get the recipe to disk
temp_dir, kitchen_dir = self._make_kitchen_dir(test_kitchen, change_dir=True)
os.chdir(kitchen_dir)
self._get_recipe(test_kitchen, recipe_name)
# create new file on disk
try:
os.chdir(recipe_name)
with open(api_file_key, 'w') as f:
f.write(file_contents)
except ValueError, e:
print('could not write file %s.' % e)
self.assertTrue(False)
# add file from disk THE TEST
rc = DKCloudCommandRunner.add_file(self._api, test_kitchen, recipe_name, message, api_file_key)
self.assertTrue(rc.ok())
# make sure file is in kitchen (get file)
file_contents2 = self._get_recipe_file(test_kitchen, recipe_name, recipe_file_key, file_name)
self.assertEqual(file_contents, file_contents2, 'Create check')
# cleanup
self._delete_and_clean_kitchen(test_kitchen)
shutil.rmtree(temp_dir, ignore_errors=True)
def test_delete_file(self):
# setup
parent_kitchen = 'CLI-Top'
test_kitchen = 'Runner-test_delete_file'
test_kitchen = self._add_my_guid(test_kitchen)
recipe_name = 'simple'
recipe_file_key = recipe_name
file_name = 'description.json'
message = 'test Delete Runner-test_delete_file'
self._delete_and_clean_kitchen(test_kitchen)
temp_dir, kitchen_dir = self._make_kitchen_dir(test_kitchen, change_dir=True)
rs = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, test_kitchen)
self.assertTrue(rs.ok())
os.chdir(kitchen_dir)
self.assertTrue(self._get_recipe_file(test_kitchen, recipe_name, recipe_file_key, file_name) is not None)
rv = DKCloudCommandRunner.get_recipe(self._api, test_kitchen, recipe_name)
self.assertTrue(recipe_name in rv.get_message())
target_file = os.path.join(kitchen_dir, os.path.join(recipe_file_key, file_name))
self.assertTrue(os.path.isfile(target_file)) # the file is there
os.remove(target_file)
rs = DKCloudCommandRunner.delete_file(self._api, test_kitchen, recipe_name,
message, file_name)
self.assertTrue(rs.ok())
self.assertTrue(self._get_recipe_file(test_kitchen, recipe_name, recipe_file_key, file_name) is None,
"Gone check")
# cleanup
self._delete_and_clean_kitchen(test_kitchen)
shutil.rmtree(temp_dir, ignore_errors=True)
# def test_cook_recipe_recipe(self):
# kitchen = 'CLI-Top'
# recipe = 'simple'
# variation = 'simple-variation-now'
# rv = DKCloudCommandRunner.cook_recipe(self._api, kitchen, recipe, variation)
# self.assertTrue('started' in rv.get_message())
def test_create_order(self):
kitchen = 'CLI-Top'
recipe = 'simple'
variation = 'simple-variation-now'
rv = DKCloudCommandRunner.create_order(self._api, kitchen, recipe, variation)
self.assertTrue('simple' in rv.get_message())
def test_delete_all_order(self):
# setup
parent_kitchen = 'CLI-Top'
new_kitchen = 'test_deleteall_orderRUN'
new_kitchen = self._add_my_guid(new_kitchen)
recipe = 'simple'
variation = 'simple-variation-now'
DKCloudCommandRunner.delete_kitchen(self._api, new_kitchen) # clean up junk
rc = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, new_kitchen)
self.assertTrue(rc.ok())
rv = DKCloudCommandRunner.create_order(self._api, new_kitchen, recipe, variation)
self.assertIsNotNone(rv)
order_id = rv.get_payload()
self.assertIsNotNone(variation in order_id)
# test
rc = DKCloudCommandRunner.delete_all_order(self._api, new_kitchen)
self.assertTrue(rc.ok())
# cleanup
DKCloudCommandRunner.delete_kitchen(self._api, new_kitchen)
def test_delete_one_order(self):
# setup
parent_kitchen = 'CLI-Top'
new_kitchen = 'test_deleteall_order-RUN'
new_kitchen = self._add_my_guid(new_kitchen)
recipe = 'simple'
variation = 'simple-variation-now'
DKCloudCommandRunner.delete_kitchen(self._api, new_kitchen) # clean up junk
rc = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, new_kitchen)
self.assertTrue(rc.ok())
rv = DKCloudCommandRunner.create_order(self._api, new_kitchen, recipe, variation)
self.assertIsNotNone(rv)
order_id = rv.get_payload()
self.assertIsNotNone(variation in order_id)
# test
rc = DKCloudCommandRunner.delete_one_order(self._api, order_id)
self.assertTrue(rc.ok())
# cleanup
DKCloudCommandRunner.delete_kitchen(self._api, new_kitchen)
def test_stop_order(self):
# setup
parent_kitchen = 'CLI-Top'
new_kitchen = 'test_stop_order-RUN'
new_kitchen = self._add_my_guid(new_kitchen)
recipe = 'simple'
variation = 'simple-variation-now'
DKCloudCommandRunner.delete_kitchen(self._api, new_kitchen) # clean up junk
rc = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, new_kitchen)
self.assertTrue(rc.ok())
rv = DKCloudCommandRunner.create_order(self._api, new_kitchen, recipe, variation)
self.assertIsNotNone(rv)
order_id = rv.get_payload()
self.assertIsNotNone(variation in order_id)
# test
rc = DKCloudCommandRunner.stop_order(self._api, order_id)
# todo: need to find a way for this to succeed
self.assertTrue(rc.ok())
# cleanup
DKCloudCommandRunner.delete_kitchen(self._api, new_kitchen)
def test_get_compiled_serving_from_recipe(self):
# setup
parent_kitchen = 'master'
new_kitchen = 'test_get_compiled_serving_from_recipe=API'
new_kitchen = self._add_my_guid(new_kitchen)
recipe_name = 'parallel-recipe-test'
variation_name = 'variation-test'
self._delete_and_clean_kitchen(new_kitchen)
rs = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, new_kitchen)
self.assertTrue(rs.ok())
# test
resp = DKCloudCommandRunner.get_compiled_serving(self._api, parent_kitchen, recipe_name, variation_name)
self.assertTrue(resp.ok())
# cleanup
self._delete_and_clean_kitchen(new_kitchen)
def test_merge_kitchens_success(self):
existing_kitchen_name = 'master'
base_test_kitchen_name = 'base-test-kitchen'
branched_test_kitchen_name = 'branched-from-base-test-kitchen'
base_test_kitchen_name = self._add_my_guid(base_test_kitchen_name)
branched_test_kitchen_name = self._add_my_guid(branched_test_kitchen_name)
# setup
self._delete_and_clean_kitchen(branched_test_kitchen_name)
self._delete_and_clean_kitchen(base_test_kitchen_name)
# test
# create base kitchen
rs = DKCloudCommandRunner.create_kitchen(self._api, existing_kitchen_name, base_test_kitchen_name)
self.assertTrue(rs.ok())
# create branch kitchen from base kitchen
rs = DKCloudCommandRunner.create_kitchen(self._api, base_test_kitchen_name, branched_test_kitchen_name)
self.assertTrue(rs.ok())
# do merge
rd = DKCloudCommandRunner.merge_kitchens_improved(self._api, branched_test_kitchen_name, base_test_kitchen_name)
self._check_no_merge_conflicts(rd)
# cleanup
self._delete_and_clean_kitchen(branched_test_kitchen_name)
self._delete_and_clean_kitchen(base_test_kitchen_name)
def test_merge_kitchens_improved_success(self):
to_kitchen = 'dummy'
from_kitchen = 'merge_success'
mock_api = DKCloudAPIMock(self._cr_config)
rv = DKCloudCommandRunner.merge_kitchens_improved(mock_api, from_kitchen, to_kitchen)
self.assertTrue('1 files changed' in rv.get_message())
self.assertTrue('1 insertions(+)' in rv.get_message())
self.assertTrue('0 deletions(-)' in rv.get_message())
# Check that the merge returned the diffs as expected.
def test_merge_resolution(self):
self.assertTrue(True)
base_kitchen = 'CLI-Top'
parent_kitchen = 'merge_resolve_parent'
parent_kitchen = self._add_my_guid(parent_kitchen)
child_kitchen = 'merge_resolve_child'
child_kitchen = self._add_my_guid(child_kitchen)
recipe = 'simple'
conflicted_file = 'conflicted-file.txt'
temp_dir_child, kitchen_dir_child, recipe_dir_child = self._make_recipe_dir(recipe, child_kitchen)
temp_dir_parent, kitchen_dir_parent, recipe_dir_parent = self._make_recipe_dir(recipe, parent_kitchen)
setup = True
cleanup = True
if setup:
rc = DKCloudCommandRunner.delete_kitchen(self._api, child_kitchen)
rc = DKCloudCommandRunner.delete_kitchen(self._api, parent_kitchen)
rc = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen=base_kitchen, new_kitchen=parent_kitchen)
self.assertTrue(rc.ok())
rc = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen=parent_kitchen, new_kitchen=child_kitchen)
self.assertTrue(rc.ok())
os.chdir(recipe_dir_parent)
# parent_file = os.path.join(recipe, conflicted_file)
with open(conflicted_file, 'w') as f:
f.write('line1\nparent\nline2\n')
rc = DKCloudCommandRunner.add_file(self._api, parent_kitchen, recipe, 'adding %s to %s' % (conflicted_file, parent_kitchen), conflicted_file)
self.assertTrue(rc.ok())
os.chdir(recipe_dir_child)
# child_file = os.path.join(recipe, conflicted_file)
with open(conflicted_file, 'w') as f:
f.write('line1\nchild\nline2\n')
rc = DKCloudCommandRunner.add_file(self._api, child_kitchen, recipe, 'adding %s to %s' % (conflicted_file, child_kitchen), conflicted_file)
self.assertTrue(rc.ok())
# Make sure we are in the recipe folder before merging
os.chdir(recipe_dir_child)
rc = DKCloudCommandRunner.merge_kitchens_improved(self._api, child_kitchen, parent_kitchen)
self.assertTrue('1 conflict found' in rc.get_message())
self.assertTrue('simple/conflicted-file.txt' in rc.get_message())
rc = DKCloudCommandRunner.merge_kitchens_improved(self._api, child_kitchen, parent_kitchen)
self.assertTrue('Unresolved conflicts' in rc.get_message())
self.assertTrue('conflicted-file.txt' in rc.get_message())
rc = DKCloudCommandRunner.get_unresolved_conflicts(recipe, recipe_dir_child)
self.assertTrue(rc.ok())
self.assertTrue('Unresolved conflicts' in rc.get_message())
rc = DKCloudCommandRunner.resolve_conflict(conflicted_file)
self.assertTrue(rc.ok())
self.assertTrue('Conflict resolved' in rc.get_message())
rc = DKCloudCommandRunner.get_unresolved_conflicts(recipe, recipe_dir_child)
self.assertTrue(rc.ok())
self.assertTrue('No conflicts found' in rc.get_message())
rc = DKCloudCommandRunner.merge_kitchens_improved(self._api, child_kitchen, parent_kitchen)
self.assertTrue('Unresolved conflicts' not in rc.get_message())
if cleanup:
DKCloudCommandRunner.delete_kitchen(self._api, child_kitchen)
DKCloudCommandRunner.delete_kitchen(self._api, parent_kitchen)
shutil.rmtree(temp_dir_child, ignore_errors=True)
shutil.rmtree(temp_dir_parent, ignore_errors=True)
def test_merge_kitchens_improved_conflicts(self):
to_kitchen_pickle = 'dummy'
from_kitchen_pickle = 'merge_conflicts'
mock_api = DKCloudAPIMock(self._cr_config)
# This one tests just a print
rv = DKCloudCommandRunner.merge_kitchens_improved(mock_api, from_kitchen_pickle, to_kitchen_pickle)
self.assertTrue('1 conflict found' in rv.get_message())
self.assertTrue('conflicted-file.txt' in rv.get_message())
# Do the merge and put it down into a folder
parent_kitchen = 'merge-parent_ut_6d887fc6'
child_kitchen = 'merge-child_ut_6d887fc6'
recipe_name = 'simple'
temp_dir, kitchen_dir, recipe_dir = self._make_recipe_dir(recipe_name, child_kitchen)
rv = DKCloudCommandRunner.merge_kitchens_improved(mock_api, child_kitchen, parent_kitchen)
msg = rv.get_message()
self.assertTrue('1 conflict found' in msg)
self.assertTrue('conflicted-file.txt' in msg)
with open(os.path.join(recipe_dir, 'conflicted-file.txt'), 'r') as conflicted_file:
contents = conflicted_file.read()
self.assertTrue('<<<<<<< your conflicted-file.txt' in contents)
self.assertTrue('>>>>>>> their conflicted-file.txt' in contents)
self.assertTrue('=======' in contents)
# Now make sure it tells use there are unresolved conflicts the next time we try and merge
rv = DKCloudCommandRunner.merge_kitchens_improved(mock_api, child_kitchen, parent_kitchen)
self.assertTrue('Unresolved conflicts' in rv.rc['message'])
self.assertTrue('conflicted-file.txt' in rv.rc['message'])
# Resolve the conflict
rc = DKCloudCommandRunner.resolve_conflict('conflicted-file.txt')
self.assertTrue(rc.ok())
# Now the conflict should be gone, and we should be back to the found conflicts and written to disk message.
rc = DKCloudCommandRunner.merge_kitchens_improved(mock_api, child_kitchen, parent_kitchen)
msg = rc.get_message()
self.assertTrue('Unresolved conflicts' not in msg)
self.assertTrue('1 conflict found' in msg)
self.assertTrue('conflicted-file.txt' in msg)
if temp_dir is not None and temp_dir != '/':
shutil.rmtree(temp_dir)
# # test helpers in DKCloudCommandRunner.py
# def test__print_merge_patches_1(self):
# merge_conflicts = pickle.loads(open("files/merge_conflicts_1_file.p", "rb").read().replace('\r', ''))
# rs = DKCloudCommandRunner._print_merge_patches(merge_conflicts)
# # look for some strings so you know it worked
# # but don't look for too much so the test breaks if we re-format
# print rs
# self.assertTrue('File' in rs)
# self.assertTrue('parallel-recipe-test/description.json' in rs)
#
# def test__print_merge_patches_multi(self):
# merge_conflicts = pickle.loads(open("files/merge_conflicts_multi_file.p", "rb").read().replace('\r', ''))
# rs = DKCloudCommandRunner._print_merge_patches(merge_conflicts)
# # look for some strings so you know it worked
# # but don't look for too much so the test breaks if we re-format
# print rs
# self.assertTrue('File' in rs)
# self.assertTrue('simple/resources/very_cool.sql' in rs)
# self.assertTrue('parallel-recipe-test/description.json' in rs)
# self.assertTrue('parallel-recipe-test/node1/data_sources/DKDataSource_NoOp.json' in rs)
def test_print_test_results(self):
# good for more than acive
rdict = pickle.loads(open("files/completed_serving_rdict.p", "rb").read().replace('\r', ''))
# rdict = pickle.load(open("files/completed_serving_rdict_eg.p", "rb"))
rs = DKCloudCommandRunner._print_test_results(rdict)
# look for some strings so you know it worked
# but don't look for too much so the test breaks if we re-format
print rs
self.assertTrue('File' in rs)
def test_active_serving_watcher(self):
# setup
parent = 'master'
kitchen = 'test_active_serving_watcher'
kitchen = self._add_my_guid(kitchen)
recipe_name = 'test-everything-recipe'
variation_name = self._get_run_variation()
self._delete_and_clean_kitchen(kitchen)
rv = DKCloudCommandRunner.create_kitchen(self._api, parent, kitchen)
self.assertTrue(rv.ok())
# start watcher
DKActiveServingWatcherSingleton().set_sleep_time(2)
DKActiveServingWatcherSingleton().set_api(self._api)
DKActiveServingWatcherSingleton().set_kitchen(kitchen)
self.assertTrue(DKActiveServingWatcherSingleton().start_watcher())
# cook one
rs = DKCloudCommandRunner.create_order(self._api, kitchen, recipe_name, variation_name)
self.assertTrue(rs.ok())
wait_time = [.1, 1, 3, 3, 3, 3, 9, 18]
found_active_serving = False
wait_generator = (wt for wt in wait_time if found_active_serving is False)
print 'test_active_serving_watcher: found_active_serving, trying ... '
for wt in wait_generator:
time.sleep(wt)
resp1 = DKCloudCommandRunner.orderrun_detail(self._api, kitchen, {'summary': True})
print 'test_active_serving_watcher: found_active_serving is False (%s)' % wt
# print 'got', resp1.get_message()
message = resp1.get_message()
if resp1.ok() and ('OrderRun is Planned' in message or 'OrderRun Completed' in message
or 'OrderRun is Active' in message):
found_active_serving = True
self.assertTrue(found_active_serving)
# cleanup
self._delete_and_clean_kitchen(kitchen)
def test_user_info(self):
rc = DKCloudCommandRunner.user_info(self._api)
self.assertTrue(rc.ok())
def test_order_list(self):
parent_kitchen = 'CLI-Top'
recipe_name = 'parallel-recipe-test'
variation_name = self._get_run_variation_for_recipe(recipe_name)
new_kitchen = 'test_order_list'
new_kitchen = self._add_my_guid(new_kitchen)
self._delete_and_clean_kitchen(new_kitchen)
rs = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, new_kitchen)
self.assertTrue(rs.ok())
rs = DKCloudCommandRunner.create_order(self._api, new_kitchen, recipe_name, variation_name)
new_order_id_1 = rs.get_payload()
self.assertTrue(rs.ok())
rs = DKCloudCommandRunner.list_order(self._api, new_kitchen)
output_string = rs.rc['message']
self.assertTrue(new_order_id_1 in output_string)
found_completed_serving = False
wait_time = [.2, .5, .5, .5, 1, 2, 2, 2, 2, 4, 4, 4, 4, 4, 6, 6, 6, 6, 10, 10]
for wt in wait_time:
rs = DKCloudCommandRunner.list_order(self._api, new_kitchen)
output_string = rs.rc['message']
n = output_string.count(new_order_id_1)
if n == 2 and ('OrderRun Completed' in output_string):
found_completed_serving = True
break
time.sleep(wt)
self.assertTrue(found_completed_serving)
# cleanup
self._delete_and_clean_kitchen(new_kitchen)
def test_order_list_for_repeating_order(self):
parent_kitchen = 'master'
recipe_name = 'parallel-recipe-test'
variation_name = self._get_run_variation_for_recipe(recipe_name, repeater=True)
new_kitchen = 'test_order_list_for_repeating_order'
new_kitchen = self._add_my_guid(new_kitchen)
self._delete_and_clean_kitchen(new_kitchen)
rs = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, new_kitchen)
self.assertTrue(rs.ok())
rs = DKCloudCommandRunner.create_order(self._api, new_kitchen, recipe_name, variation_name)
new_order_id_1 = rs.get_payload()
self.assertTrue(rs.ok())
found_completed_serving = False
wait_time = [10,61,61,61,61]
for wt in wait_time:
rs = DKCloudCommandRunner.list_order(self._api, new_kitchen)
output_string = rs.rc['message']
n = output_string.count(new_order_id_1)
if n >= 3 and ('OrderRun Completed' in output_string):
found_completed_serving = True
break
time.sleep(wt)
self.assertTrue(found_completed_serving)
# cleanup
self._delete_and_clean_kitchen(new_kitchen)
def test_order_list_with_filters(self):
parent_kitchen = 'CLI-Top'
# Don't use a guid for this. Don't
kitchen = self._add_my_guid('test_order_list_with_filters')
recipe1 = 'parallel-recipe-test'
recipe1_variation = self._get_run_variation_for_recipe(recipe1)
recipe2 = 'simple'
recipe2_variation = 'simple-variation-now'
setup = True
if setup:
self._delete_and_clean_kitchen(kitchen)
rv = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, kitchen)
self.assertTrue(rv.ok())
rv = DKCloudCommandRunner.create_order(self._api, kitchen, recipe1, recipe1_variation)
self.assertTrue(rv.ok())
first_order = rv.get_payload()
rv = DKCloudCommandRunner.create_order(self._api, kitchen, recipe1, recipe1_variation)
self.assertTrue(rv.ok())
time.sleep(20)
rs = DKCloudCommandRunner.list_order(self._api, kitchen)
self.assertTrue(rs.ok())
message = rs.rc['message']
self.assertTrue(
'OrderRun is Planned' in message or 'OrderRun Completed' in message or 'OrderRun is Active' in message)
# cleanup
self._delete_and_clean_kitchen(kitchen)
def test_orderrun_delete(self):
mock_api = DKCloudAPIMock(self._cr_config)
rs = DKCloudCommandRunner.delete_orderrun(mock_api, 'good')
self.assertTrue(rs.ok())
rs = DKCloudCommandRunner.delete_orderrun(mock_api, 'bad')
self.assertFalse(rs.ok())
def test_kitchen_config(self):
parent_kitchen = 'CLI-Top'
child_kitchen = self._add_my_guid('modify_kitchen_settings_runner')
setup = True
if setup:
self._delete_and_clean_kitchen(child_kitchen)
rv = DKCloudCommandRunner.create_kitchen(self._api, parent_kitchen, child_kitchen)
self.assertTrue(rv.ok())
add = (('newvar1', 'newval1'),)
unset = ('newvar1')
get = ('newvar1')
listall = True
rs = DKCloudCommandRunner.config_kitchen(self._api, child_kitchen, add=add)
self.assertTrue(rs.ok())
payload = rs.get_payload()
self.assertIsNotNone(payload)
message = rs.get_message()
self.assertTrue('newvar1 added' in message)
rs = DKCloudCommandRunner.config_kitchen(self._api, child_kitchen, get=get)
self.assertTrue(rs.ok())
payload = rs.get_payload()
self.assertIsNotNone(payload)
message = rs.get_message()
self.assertTrue(message == 'newval1\n')
rs = DKCloudCommandRunner.config_kitchen(self._api, child_kitchen, unset=unset)
self.assertTrue(rs.ok())
payload = rs.get_payload()
self.assertIsNotNone(payload)
message = rs.get_message()
rs = DKCloudCommandRunner.config_kitchen(self._api, child_kitchen, listall=listall)
self.assertTrue(rs.ok())
payload = rs.get_payload()
self.assertIsNotNone(payload)
message = rs.get_message()
self.assertTrue('newvar1' not in message)
cleanup = False
if cleanup:
self._delete_and_clean_kitchen(child_kitchen)
# helpers ---------------------------------
def _delete_and_clean_kitchen(self, kitchen_name):
DKCloudCommandRunner.delete_kitchen(self._api, kitchen_name)
def _check_no_merge_conflicts(self, resp):
self.assertTrue(str(resp).find('diverged') < 0)
def _get_recipe_file(self, kitchen, recipe_name, file_path, file_name, temp_dir=None):
delete_temp_dir = td = False
if temp_dir is None:
td, kitchen_dir = self._make_kitchen_dir(kitchen, change_dir=False)
delete_temp_dir = True
else:
kitchen_dir = temp_dir
rs = DKCloudCommandRunner.get_recipe(self._api, kitchen, recipe_name, kitchen_dir)
self.assertTrue(rs.ok())
the_path = os.path.join(kitchen_dir, os.path.join(file_path, file_name))
if os.path.isfile(the_path):
with open(the_path, 'r') as rfile:
rfile.seek(0)
the_file = rfile.read()
rc = the_file
else:
rc = None
if delete_temp_dir is True:
shutil.rmtree(td, ignore_errors=True)
return rc
def _get_recipe(self, kitchen, recipe):
rs = DKCloudCommandRunner.get_recipe(self._api, kitchen, recipe)
self.assertTrue(rs.ok())
return True
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
|
endlessm/chromium-browser
|
third_party/angle/third_party/VK-GL-CTS/src/scripts/caselist_diff.py
|
6
|
15197
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import sys
RENAME_LIST_2011_1_2011_2 = [
("dEQP-GLES2.functional.shaders.random.basic_expressions.*", "dEQP-GLES2.functional.shaders.random.basic_expression."),
("dEQP-GLES2.functional.shaders.random.scalar_conversions.*", "dEQP-GLES2.functional.shaders.random.scalar_conversion."),
("dEQP-GLES2.functional.fbo.render.color_clears_*", "dEQP-GLES2.functional.fbo.render.color_clear."),
("dEQP-GLES2.functional.fbo.render.intersecting_quads_*", "dEQP-GLES2.functional.fbo.render.depth."),
("dEQP-GLES2.functional.fbo.render.mix_*", "dEQP-GLES2.functional.fbo.render.color.mix_"),
("dEQP-GLES2.functional.fbo.render.blend_*", "dEQP-GLES2.functional.fbo.render.color.blend_"),
("dEQP-GLES2.functional.fbo.render.shared_colorbuffer_clears_*", "dEQP-GLES2.functional.fbo.render.shared_colorbuffer_clear."),
("dEQP-GLES2.functional.fbo.render.shared_colorbuffer_*", "dEQP-GLES2.functional.fbo.render.shared_colorbuffer."),
("dEQP-GLES2.functional.fbo.render.shared_depthbuffer_*", "dEQP-GLES2.functional.fbo.render.shared_depthbuffer."),
("dEQP-GLES2.functional.fbo.render.texsubimage_*", "dEQP-GLES2.functional.fbo.render.texsubimage."),
("dEQP-GLES2.functional.fbo.render.recreate_colorbuffer_*", "dEQP-GLES2.functional.fbo.render.recreate_colorbuffer.no_rebind_"),
("dEQP-GLES2.functional.fbo.render.recreate_depthbuffer_*", "dEQP-GLES2.functional.fbo.render.recreate_depthbuffer.no_rebind_"),
("dEQP-GLES2.functional.fbo.render.resize_*", "dEQP-GLES2.functional.fbo.render.resize.")
]
RENAME_LIST_2011_2_2011_3 = [
("dEQP-GLES2.usecases.ui.src_over_linear_1_batched", "dEQP-GLES2.usecases.ui.src_over_linear_batched_1"),
("dEQP-GLES2.usecases.ui.src_over_linear_2_batched", "dEQP-GLES2.usecases.ui.src_over_linear_batched_2"),
("dEQP-GLES2.usecases.ui.src_over_linear_4_batched", "dEQP-GLES2.usecases.ui.src_over_linear_batched_4"),
("dEQP-GLES2.usecases.ui.src_over_nearest_1_batched", "dEQP-GLES2.usecases.ui.src_over_nearest_batched_1"),
("dEQP-GLES2.usecases.ui.src_over_nearest_2_batched", "dEQP-GLES2.usecases.ui.src_over_nearest_batched_2"),
("dEQP-GLES2.usecases.ui.src_over_nearest_4_batched", "dEQP-GLES2.usecases.ui.src_over_nearest_batched_4"),
("dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_1_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_batched_1"),
("dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_2_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_batched_2"),
("dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_4_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_batched_4"),
("dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_1_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_batched_1"),
("dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_2_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_batched_2"),
("dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_4_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_batched_4"),
("dEQP-GLES2.usecases.ui.no_blend_linear_1_batched", "dEQP-GLES2.usecases.ui.no_blend_linear_batched_1"),
("dEQP-GLES2.usecases.ui.no_blend_linear_2_batched", "dEQP-GLES2.usecases.ui.no_blend_linear_batched_2"),
("dEQP-GLES2.usecases.ui.no_blend_linear_4_batched", "dEQP-GLES2.usecases.ui.no_blend_linear_batched_4"),
("dEQP-GLES2.usecases.ui.no_blend_nearest_1_batched", "dEQP-GLES2.usecases.ui.no_blend_nearest_batched_1"),
("dEQP-GLES2.usecases.ui.no_blend_nearest_2_batched", "dEQP-GLES2.usecases.ui.no_blend_nearest_batched_2"),
("dEQP-GLES2.usecases.ui.no_blend_nearest_4_batched", "dEQP-GLES2.usecases.ui.no_blend_nearest_batched_4")
]
RENAME_LIST_2011_3_2011_4 = []
RENAME_LIST_2011_4_2012_1 = [
("dEQP-GLES2.functional.vertex_arrays.multiple_attributes.output_types.*", "dEQP-GLES2.functional.vertex_arrays.multiple_attributes.input_types."),
]
RENAME_LIST_2012_2_2012_3 = [
("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_float_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_float_float_vertex"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_float_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_float_float_fragment"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_float_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_float_float_vertex"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_float_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_float_float_fragment"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec2_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec2_float_vertex"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec2_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec2_float_fragment"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec2_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec2_float_vertex"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec2_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec2_float_fragment"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec3_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec3_float_vertex"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec3_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec3_float_fragment"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec3_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec3_float_vertex"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec3_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec3_float_fragment"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec4_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec4_float_vertex"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec4_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec4_float_fragment"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec4_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec4_float_vertex"),
("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec4_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec4_float_fragment"),
("dEQP-GLES2.functional.negative_api.texture.copyteximage2d_unequal_width_height_cube", "dEQP-GLES2.functional.negative_api.texture.copyteximage2d_inequal_width_height_cube"),
("dEQP-GLES2.functional.negative_api.texture.teximage2d_unequal_width_height_cube", "dEQP-GLES2.functional.negative_api.texture.teximage2d_inequal_width_height_cube"),
("dEQP-GLES2.functional.negative_api.vertex_array.draw_arrays", "dEQP-GLES2.functional.negative_api.vertex_array.draw_arrays_invalid_program"),
("dEQP-GLES2.functional.negative_api.vertex_array.draw_elemens", "dEQP-GLES2.functional.negative_api.vertex_array.draw_elements_invalid_program"),
("dEQP-GLES2.functional.negative_api.shader.attach_shader_invalid_object", "dEQP-GLES2.functional.negative_api.shader.attach_shader"),
("dEQP-GLES2.functional.negative_api.shader.detach_shader_invalid_object", "dEQP-GLES2.functional.negative_api.shader.detach_shader"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.1_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.1_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.2_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.2_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.4_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.4_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.1_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.1_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.2_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.2_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.4_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.4_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.1_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.1_fragment_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.2_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.2_fragment_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.1sample.4_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.4_fragment_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.1_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.1_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.2_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.2_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.4_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.4_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.1_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.1_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.2_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.2_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.4_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.4_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.1_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.1_fragment_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.2_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.2_fragment_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.4sample.4_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.4_fragment_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.1_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.1_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.2_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.2_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.4_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.4_vertex_lights_no_texture"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.1_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.1_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.2_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.2_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.4_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.4_vertex_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.1_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.1_fragment_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.2_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.2_fragment_lights"),
("dEQP-GLES2.usecases.shadow.shadowmap.16sample.4_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.4_fragment_lights")
]
RENAME_LIST_2012_3_2012_4 = [
("dEQP-GLES2.functional.depth.*", "dEQP-GLES2.functional.fragment_ops.depth."),
("dEQP-GLES2.functional.stencil.*", "dEQP-GLES2.functional.fragment_ops.stencil.")
]
def readCaseList (filename):
f = open(filename, 'r')
cases = []
for line in f:
if line[0:5] == "TEST:":
cases.append(line[6:].strip())
f.close()
return cases
def isWildcardPattern (pattern):
return pattern[-1:] == '*'
# returns (cases, renames)
def renameCases (cases, rename):
renamedCases = []
renamedSet = set()
renames = []
for case in cases:
renamed = None
for src, dst in rename:
if isWildcardPattern(src) and case[:len(src)-1] == src[:-1]:
renamed = dst + case[len(src)-1:]
break
elif case == src:
renamed = dst
break
if renamed != None:
renames.append((case, renamed))
case = renamed
# It is possible that some later case is renamed to case already seen in the list
assert not case in renamedSet or renamed != None
if case not in renamedSet:
renamedCases.append(case)
renamedSet.add(case)
return (renamedCases, renames)
# returns (added, removed) lists
def diffCaseLists (old, new):
added = []
removed = []
oldSet = set(old)
newSet = set(new)
# build added list
for case in new:
if not case in oldSet:
added.append(case)
# build removed set
for case in old:
if not case in newSet:
removed.append(case)
return (added, removed)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("%s [old caselist] [new caselist]" % sys.argv[0])
sys.exit(-1)
oldCases = readCaseList(sys.argv[1])
newCases = readCaseList(sys.argv[2])
rename = RENAME_LIST_2012_3_2012_4
renamedCases, renameList = renameCases(oldCases, rename)
added, removed = diffCaseLists(renamedCases, newCases)
# for src, dst in rename:
# print("RENAME: %s -> %s" % (src, dst))
for case in added:
print("ADD: %s" % case)
for src, dst in renameList:
print("RENAME: %s -> %s" % (src, dst))
for case in removed:
print("REMOVE: %s" % case)
|
bsd-3-clause
|
willprice/weboob
|
modules/weather/browser.py
|
7
|
2349
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Arno Renevier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
from weboob.deprecated.browser import Browser
from .pages import ForecastPage, WeatherPage, CityPage
__all__ = ['WeatherBrowser']
class WeatherBrowser(Browser):
DOMAIN = 'www.weather.com'
PROTOCOL = 'http'
ENCODING = 'utf-8'
PAGES = {}
SEARCH_URL = 'http://www.weather.com/search/enhancedlocalsearch?where=%s'
WEATHER_URL = 'http://www.weather.com/weather/today/%s'
FORECAST_URL = 'http://www.weather.com/weather/tenday/%s'
RIGHTNOW_URL = 'http://www.weather.com/weather/right-now/%s'
USER_AGENT = Browser.USER_AGENTS['desktop_firefox']
PAGES = {
(SEARCH_URL.replace('.', '\\.').replace('?', '\\?') % '.*'): CityPage,
(WEATHER_URL.replace('.', '\\.').replace('?', '\\?') % '.*'): WeatherPage,
(FORECAST_URL.replace('.', '\\.').replace('?', '\\?') % '.*'): ForecastPage,
(RIGHTNOW_URL.replace('.', '\\.').replace('?', '\\?') % '.*'): WeatherPage,
}
def iter_city_search(self, pattern):
self.location(self.SEARCH_URL % urllib.quote_plus(pattern.encode('utf-8')))
if self.is_on_page(CityPage):
return self.page.iter_city_search()
elif self.is_on_page(WeatherPage):
return [self.page.get_city()]
def get_current(self, city_id):
self.location(self.WEATHER_URL % urllib.quote_plus(city_id.encode('utf-8')))
return self.page.get_current()
def iter_forecast(self, city_id):
self.location(self.FORECAST_URL % urllib.quote_plus(city_id.encode('utf-8')))
assert self.is_on_page(ForecastPage)
return self.page.iter_forecast()
|
agpl-3.0
|
bdestombe/flopy-1
|
autotest/t017_test.py
|
2
|
9030
|
# Test binary and formatted data readers
import numpy as np
def test_formattedfile_read():
import os
import flopy
h = flopy.utils.FormattedHeadFile(
os.path.join('..', 'examples', 'data', 'mf2005_test',
'test1tr.githds'))
assert isinstance(h, flopy.utils.FormattedHeadFile)
times = h.get_times()
assert np.isclose(times[0], 1577880064.0)
kstpkper = h.get_kstpkper()
assert kstpkper[0] == (49, 0), 'kstpkper[0] != (49, 0)'
h0 = h.get_data(totim=times[0])
h1 = h.get_data(kstpkper=kstpkper[0])
h2 = h.get_data(idx=0)
assert np.array_equal(h0, h1), \
'formatted head read using totim != head read using kstpkper'
assert np.array_equal(h0, h2), \
'formatted head read using totim != head read using idx'
ts = h.get_ts((0, 7, 5))
assert np.isclose(ts[0, 1], 944.487, 1e-6), \
'time series value ({}) != {}'.format(ts[0, 1], 944.487)
return
def test_binaryfile_read():
import os
import flopy
h = flopy.utils.HeadFile(
os.path.join('..', 'examples', 'data', 'freyberg', 'freyberg.githds'))
assert isinstance(h, flopy.utils.HeadFile)
times = h.get_times()
assert np.isclose(times[0], 10.0), 'times[0] != {}'.format(times[0])
kstpkper = h.get_kstpkper()
assert kstpkper[0] == (0, 0), 'kstpkper[0] != (0, 0)'
h0 = h.get_data(totim=times[0])
h1 = h.get_data(kstpkper=kstpkper[0])
h2 = h.get_data(idx=0)
assert np.array_equal(h0,
h1), 'binary head read using totim != head read using kstpkper'
assert np.array_equal(h0,
h2), 'binary head read using totim != head read using idx'
ts = h.get_ts((0, 7, 5))
assert np.isclose(ts[0, 1], 26.00697135925293), \
'time series value ({}) != {}'.format(ts[0, 1], - 26.00697135925293)
return
def test_cellbudgetfile_read():
import os
import flopy
v = flopy.utils.CellBudgetFile(
os.path.join('..', 'examples', 'data', 'mf2005_test', 'mnw1.gitcbc'))
assert isinstance(v, flopy.utils.CellBudgetFile)
kstpkper = v.get_kstpkper()
assert len(kstpkper) == 5, 'length of kstpkper != 5'
records = v.get_unique_record_names()
idx = 0
for t in kstpkper:
for record in records:
t0 = v.get_data(kstpkper=t, text=record, full3D=True)[0]
t1 = v.get_data(idx=idx, text=record, full3D=True)[0]
assert np.array_equal(t0, t1), \
'binary budget item {0} read using kstpkper != binary budget item {0} read using idx'.format(
record)
idx += 1
return
def test_cellbudgetfile_readrecord():
import os
import flopy
v = flopy.utils.CellBudgetFile(
os.path.join('..', 'examples', 'data', 'mf2005_test',
'test1tr.gitcbc'))
assert isinstance(v, flopy.utils.CellBudgetFile)
kstpkper = v.get_kstpkper()
assert len(kstpkper) == 30, 'length of kstpkper != 30'
t = v.get_data(text='STREAM LEAKAGE')
assert len(t) == 30, 'length of stream leakage data != 30'
assert t[0].shape[
0] == 36, 'sfr budget data does not have 36 reach entries'
t = v.get_data(text='STREAM LEAKAGE', full3D=True)
assert t[0].shape == (1, 15,
10), '3D sfr budget data does not have correct shape (1, 15,10) - ' + \
'returned shape {}'.format(t[0].shape)
for kk in kstpkper:
t = v.get_data(kstpkper=kk, text='STREAM LEAKAGE', full3D=True)[0]
assert t.shape == (
1, 15, 10), '3D sfr budget data for kstpkper {} '.format(kk) + \
'does not have correct shape (1, 15,10) - ' + \
'returned shape {}'.format(t[0].shape)
idx = v.get_indices()
assert idx is None, 'get_indices() without record did not return None'
records = v.get_unique_record_names()
for record in records:
indices = v.get_indices(text=record.decode().strip())
for idx, kk in enumerate(kstpkper):
t0 = v.get_data(kstpkper=kk, text=record.decode().strip())[0]
t1 = v.get_data(idx=indices[idx], text=record)[0]
assert np.array_equal(t0, t1), \
'binary budget item {0} read using kstpkper != binary budget item {0} read using idx'.format(
record)
return
def test_cellbudgetfile_readrecord_waux():
import os
import flopy
v = flopy.utils.CellBudgetFile(
os.path.join('..', 'examples', 'data', 'mf2005_test',
'test1tr.gitcbc'))
assert isinstance(v, flopy.utils.CellBudgetFile)
kstpkper = v.get_kstpkper()
assert len(kstpkper) == 30, 'length of kstpkper != 30'
t = v.get_data(text='WELLS')
assert len(t) == 30, 'length of well data != 30'
assert t[0].shape[0] == 10, 'wel budget data does not have 10 well entries'
t = v.get_data(text='WELLS', full3D=True)
assert t[0].shape == (1, 15,
10), '3D wel budget data does not have correct shape (1, 15,10) - ' + \
'returned shape {}'.format(t[0].shape)
for kk in kstpkper:
t = v.get_data(kstpkper=kk, text='wells', full3D=True)[0]
assert t.shape == (
1, 15, 10), '3D wel budget data for kstpkper {} '.format(kk) + \
'does not have correct shape (1, 15,10) - ' + \
'returned shape {}'.format(t[0].shape)
idx = v.get_indices()
assert idx is None, 'get_indices() without record did not return None'
records = v.get_unique_record_names()
for record in records:
indices = v.get_indices(text=record.decode().strip())
for idx, kk in enumerate(kstpkper):
t0 = v.get_data(kstpkper=kk, text=record.decode().strip())[0]
t1 = v.get_data(idx=indices[idx], text=record)[0]
assert np.array_equal(t0, t1), \
'binary budget item {0} read using kstpkper != binary budget item {0} read using idx'.format(
record)
return
def test_binaryfile_writeread():
import os
import numpy as np
import flopy
pth = os.path.join("..", "examples", "data", "nwt_test")
model = 'Pr3_MFNWT_lower.nam'
ml = flopy.modflow.Modflow.load(model, version='mfnwt', model_ws=pth)
# change the model work space
ml.change_model_ws(os.path.join('temp', 't017'))
#
ncol = ml.dis.ncol
nrow = ml.dis.nrow
text = 'head'
# write a double precision head file
precision = 'double'
pertim = ml.dis.perlen.array[0].astype(np.float64)
header = flopy.utils.BinaryHeader.create(bintype=text, precision=precision,
text=text, nrow=nrow, ncol=ncol,
ilay=1, pertim=pertim,
totim=pertim, kstp=1, kper=1)
b = ml.dis.botm.array[0, :, :].astype(np.float64)
pth = os.path.join('temp', 't017', 'bottom.hds')
flopy.utils.Util2d.write_bin(b.shape, pth, b,
header_data=header)
bo = flopy.utils.HeadFile(pth, precision=precision)
times = bo.get_times()
errmsg = 'double precision binary totim read is not equal to totim written'
assert times[0] == pertim, errmsg
kstpkper = bo.get_kstpkper()
errmsg = 'kstp, kper read is not equal to kstp, kper written'
assert kstpkper[0] == (0, 0), errmsg
br = bo.get_data()
errmsg = 'double precision binary data read is not equal to data written'
assert np.allclose(b, br), errmsg
# write a single precision head file
precision = 'single'
pertim = ml.dis.perlen.array[0].astype(np.float32)
header = flopy.utils.BinaryHeader.create(bintype=text, precision=precision,
text=text, nrow=nrow, ncol=ncol,
ilay=1, pertim=pertim,
totim=pertim, kstp=1, kper=1)
b = ml.dis.botm.array[0, :, :].astype(np.float32)
pth = os.path.join('temp', 't017', 'bottom_single.hds')
flopy.utils.Util2d.write_bin(b.shape, pth, b,
header_data=header)
bo = flopy.utils.HeadFile(pth, precision=precision)
times = bo.get_times()
errmsg = 'single precision binary totim read is not equal to totim written'
assert times[0] == pertim, errmsg
kstpkper = bo.get_kstpkper()
errmsg = 'kstp, kper read is not equal to kstp, kper written'
assert kstpkper[0] == (0, 0), errmsg
br = bo.get_data()
errmsg = 'singleprecision binary data read is not equal to data written'
assert np.allclose(b, br), errmsg
return
if __name__ == '__main__':
test_binaryfile_writeread()
test_formattedfile_read()
test_binaryfile_read()
test_cellbudgetfile_read()
test_cellbudgetfile_readrecord()
test_cellbudgetfile_readrecord_waux()
|
bsd-3-clause
|
devendermishrajio/nova_test_latest
|
nova/tests/unit/api/openstack/compute/test_versions.py
|
5
|
13984
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid as stdlib_uuid
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import views
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
EXP_LINKS = {
'v2.0': {
'html': 'http://docs.openstack.org/',
},
'v2.1': {
'html': 'http://docs.openstack.org/'
},
}
EXP_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
},
],
},
"v2.1": {
"id": "v2.1",
"status": "CURRENT",
"version": "2.10",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.1']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2.1",
}
],
}
}
class VersionsTestV20(test.NoDBTestCase):
def test_get_version_list(self):
req = webob.Request.blank('/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
versions = jsonutils.loads(res.body)["versions"]
expected = [
{
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
}],
},
{
"id": "v2.1",
"status": "CURRENT",
"version": "2.10",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2.1/",
}],
},
]
self.assertEqual(versions, expected)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2/')
self.assertEqual(res.location, redirect_req.url)
def _test_get_version_2_detail(self, url, accept=None):
if accept is None:
accept = "application/json"
req = webob.Request.blank(url)
req.accept = accept
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {
"version": {
"id": "v2.0",
"status": "SUPPORTED",
"version": "",
"min_version": "",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
"href": "http://localhost/v2/",
},
{
"rel": "describedby",
"type": "text/html",
"href": EXP_LINKS['v2.0']['html'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/"
"vnd.openstack.compute+json;version=2",
},
],
},
}
self.assertEqual(expected, version)
def test_get_version_2_detail(self):
self._test_get_version_2_detail('/v2/')
def test_get_version_2_detail_content_type(self):
accept = "application/json;version=2"
self._test_get_version_2_detail('/', accept=accept)
def test_get_version_2_versions_invalid(self):
req = webob.Request.blank('/v2/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(404, res.status_int)
def test_multi_choice_image(self):
req = webob.Request.blank('/images/1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/images/1",
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
def test_multi_choice_server_atom(self):
"""Make sure multi choice responses do not have content-type
application/atom+xml (should use default of json)
"""
req = webob.Request.blank('/servers')
req.accept = "application/atom+xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 300)
self.assertEqual(res.content_type, "application/json")
expected = {
"choices": [
{
"id": "v2.0",
"status": "SUPPORTED",
"links": [
{
"href": "http://localhost/v2/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json"
";version=2"
},
],
},
{
"id": "v2.1",
"status": "CURRENT",
"links": [
{
"href": "http://localhost/v2.1/servers/" + uuid,
"rel": "self",
},
],
"media-types": [
{
"base": "application/json",
"type":
"application/vnd.openstack.compute+json;version=2.1",
}
],
},
], }
self.assertThat(jsonutils.loads(res.body),
matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.NoDBTestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
"v3.2.1": {
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
}
}
expected = {
"versions": [
{
"id": "3.2.1",
"status": "CURRENT",
"version": "2.3",
"min_version": "2.1",
"updated": "2011-07-18T11:30:00Z",
"links": [
{
"rel": "self",
"href": "http://example.org/v2/",
},
],
}
]
}
builder = views.versions.ViewBuilder(base_url)
output = builder.build_versions(version_data)
self.assertEqual(output, expected)
def test_generate_href(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2')
self.assertEqual(actual, expected)
def test_generate_href_v21(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2.1/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('v2.1')
self.assertEqual(actual, expected)
def test_generate_href_unknown(self):
base_url = "http://example.org/app/"
expected = "http://example.org/app/v2/"
builder = views.versions.ViewBuilder(base_url)
actual = builder.generate_href('foo')
self.assertEqual(actual, expected)
# NOTE(oomichi): Now version API of v2.0 covers "/"(root).
# So this class tests "/v2.1" only for v2.1 API.
class VersionsTestV21(test.NoDBTestCase):
exp_versions = copy.deepcopy(EXP_VERSIONS)
exp_versions['v2.0']['links'].insert(0,
{'href': 'http://localhost/v2.1/', 'rel': 'self'},
)
def test_get_version_list_302(self):
req = webob.Request.blank('/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 302)
redirect_req = webob.Request.blank('/v2.1/')
self.assertEqual(res.location, redirect_req.url)
def test_get_version_21_detail(self):
req = webob.Request.blank('/v2.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v21_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.1')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
def test_get_version_21_versions_v20_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.0')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.0']}
self.assertEqual(expected, version)
def test_get_version_21_versions_invalid(self):
req = webob.Request.blank('/v2.1/versions/1234')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 404)
def test_get_version_21_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=2.1"
res = req.get_response(fakes.wsgi_app_v21())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/json")
version = jsonutils.loads(res.body)
expected = {"version": self.exp_versions['v2.1']}
self.assertEqual(expected, version)
|
apache-2.0
|
blackye/luscan-devel
|
golismero/api/data/information/traceroute.py
|
8
|
8159
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Traceroute results.
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = ["Traceroute", "Hop"]
from . import Fingerprint
from .. import identity
from ..resource.ip import IP
from ..resource.domain import Domain
from ...config import Config
from ...text.text_utils import to_utf8
from time import time
#------------------------------------------------------------------------------
class Hop (object):
"""
Traceroute hop.
"""
#--------------------------------------------------------------------------
def __init__(self, address, rtt, hostname = None):
"""
:param address: IP address.
:type address: str
:param rtt: Round trip time.
:type rtt: float
:param hostname: Hostname for this IP address. Optional.
:type hostname: str | None
"""
address = to_utf8(address)
hostname = to_utf8(hostname)
if type(address) is not str:
raise TypeError("Expected string, got %r instead" % type(address))
if hostname is not None and type(hostname) is not str:
raise TypeError("Expected string, got %r instead" % type(hostname))
self.__address = address
self.__rtt = float(rtt)
self.__hostname = hostname
#--------------------------------------------------------------------------
def to_dict(self):
return {
"address": self.__address,
"rtt": self.__rtt,
"hostname": self.__hostname,
}
#--------------------------------------------------------------------------
@property
def display_name(self):
return "Network Route"
#--------------------------------------------------------------------------
@property
def address(self):
"""
:returns: IP address.
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@property
def rtt(self):
"""
:returns: Round trip time.
:rtype: float
"""
return self.__rtt
#--------------------------------------------------------------------------
@property
def hostname(self):
"""
:returns: Hostname for this IP address. Optional.
:rtype: str | None
"""
return self.__hostname
#------------------------------------------------------------------------------
class Traceroute(Fingerprint):
"""
Traceroute results.
"""
#--------------------------------------------------------------------------
def __init__(self, ip, port, protocol, hops, timestamp = None):
"""
:param ip: Scanned host's IP address.
:type ip: IP
:param port: Port used to trace the route.
:type port: int
:param protocol: Protocol used to trace the route.
One of: "TCP", "UDP", "ICMP".
:type protocol: str
:param hops: Traceroute results.
Missing hops are represented with None.
:type hops: tuple( Hop | None, ... )
:param timestamp: Timestamp for these traceroute results.
Defaults to the current time.
:type timestamp: float
"""
# Sanitize and store the properties.
try:
self.__timestamp = float(timestamp) if timestamp else time()
assert isinstance(ip, IP), type(ip)
self.__address = ip.address
port = int(port)
assert 0 < port < 65536, port
self.__port = port
protocol = str(protocol).upper()
assert protocol in ("TCP", "UDP", "ICMP"), protocol
self.__protocol = protocol
hops = tuple(hops)
for hop in hops:
assert hop is None or isinstance(hop, Hop), type(hop)
self.__hops = hops
except Exception:
##raise # XXX DEBUG
raise ValueError("Malformed traceroute results!")
# Call the superclass constructor.
super(Traceroute, self).__init__()
# Now we can associate the traceroute results to the IP address.
self.add_resource(ip)
#--------------------------------------------------------------------------
def to_dict(self):
d = super(Traceroute, self).to_dict()
d["hops"] = [
(h.to_dict() if h is not None else None)
for h in self.__hops
]
return d
#--------------------------------------------------------------------------
@property
def display_properties(self):
props = super(Traceroute, self).display_properties
del props["[DEFAULT]"]["Hops"]
props["[DEFAULT]"]["Route"] = str(self)
return props
#--------------------------------------------------------------------------
@identity
def address(self):
"""
:returns: Scanned host's IP address.
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@identity
def port(self):
"""
:returns: Port used to trace the route.
:rtype: int
"""
return self.__port
#--------------------------------------------------------------------------
@identity
def protocol(self):
"""
:returns: Protocol used to trace the route.
One of: "TCP", "UDP", "ICMP".
:rtype: str
"""
return self.__protocol
#--------------------------------------------------------------------------
@identity
def hops(self):
"""
:returns: Traceroute results.
Missing hops are represented with None.
:rtype: tuple( Hop | None, ... )
"""
return self.__hops
#--------------------------------------------------------------------------
@identity
def timestamp(self):
"""
:returns: Timestamp for these traceroute results.
:rtype: float
"""
return self.__timestamp
#--------------------------------------------------------------------------
def __str__(self):
if self.hops:
s = "Route to %s (%s %s):\n\n"
else:
s = "No route to %s (%s %s)."
s %= (self.address, self.protocol, self.port)
if self.hops:
w = len(str(len(self.hops)))
f = "%%%dd %%15s %%4.2f %%s" % w
m = "%%%dd *** *** ***" % w
s += "\n".join(
(
f % (i, h.address, h.rtt, h.hostname)
if h is not None
else m % i
)
for i, h in enumerate(self.hops)
)
return s
#--------------------------------------------------------------------------
@property
def discovered(self):
result = []
for hop in self.hops:
if hop is not None:
if hop.address in Config.audit_scope:
result.append( IP(hop.address) )
if hop.hostname and hop.hostname in Config.audit_scope:
result.append( Domain(hop.hostname) )
return result
|
gpl-2.0
|
witlox/elasticluster
|
elasticluster/providers/ec2_boto.py
|
1
|
25751
|
#
# Copyright (C) 2013, 2018 S3IT, University of Zurich
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = ', '.join([
'Nicolas Baer <nicolas.baer@uzh.ch>',
'Antonio Messina <antonio.s.messina@gmail.com>',
'Riccardo Murri <riccardo.murri@gmail.com>',
])
# System imports
import hashlib
import os
import urllib
import threading
import time
from warnings import warn
# External modules
import boto
import boto.ec2
import boto.vpc
from Crypto.PublicKey import RSA
from paramiko import DSSKey, RSAKey, PasswordRequiredException
from paramiko.ssh_exception import SSHException
# Elasticluster imports
from elasticluster import log
from elasticluster.providers import AbstractCloudProvider
from elasticluster.exceptions import VpcError, SecurityGroupError, \
SubnetError, KeypairError, ImageError, InstanceError, InstanceNotFoundError, ClusterError
class BotoCloudProvider(AbstractCloudProvider):
"""This implementation of
:py:class:`elasticluster.providers.AbstractCloudProvider` uses the boto
ec2 interface to connect to ec2 compliant clouds and manage instances.
Please check https://github.com/boto/boto for further information about
the supported cloud platforms.
:param str ec2_url: url to connect to cloud web service
:param str ec2_region: region identifier
:param str ec2_access_key: access key of the user account
:param str ec2_secret_key: secret key of the user account
:param str storage_path: path to store temporary data
:param bool request_floating_ip: Whether ip are assigned automatically
`True` or floating ips have to be
assigned manually `False`
:param str instance_profile: Instance profile with IAM role permissions
:param float price: Spot instance price (if 0, do not use spot instances);
used as a default in `start_instance`:py:meth
:param int price: Timeout waiting for spot instances (only used if price > 0);
used as a default in `start_instance`:py:meth
"""
__node_start_lock = threading.Lock() # lock used for node startup
# interval (in seconds) for polling the cloud provider,
# e.g., when requesting spot instances
POLL_INTERVAL = 10
def __init__(self, ec2_url, ec2_region, ec2_access_key=None,
ec2_secret_key=None, vpc=None, storage_path=None,
request_floating_ip=False, instance_profile=None,
price=0.0, timeout=0):
self._url = ec2_url
self._access_key = ec2_access_key
self._secret_key = ec2_secret_key
self._vpc = vpc
self._instance_profile = instance_profile
self.request_floating_ip = request_floating_ip
# provide defaults for like-named arguments in `.start_instance`
self.price = price
self.timeout = timeout
# read all parameters from url
proto, opaqueurl = urllib.splittype(ec2_url)
self._host, self._ec2path = urllib.splithost(opaqueurl)
self._ec2host, port = urllib.splitport(self._host)
if port:
port = int(port)
self._ec2port = port
if proto == "https":
self._secure = True
else:
self._secure = False
self._region_name = ec2_region
# will be initialized upon first connect
self._ec2_connection = None
self._vpc_connection = None
self._vpc_id = None
self._instances = {}
self._cached_instances = []
self._images = None
def _connect(self):
"""
Connect to the EC2 cloud provider.
:return: :py:class:`boto.ec2.connection.EC2Connection`
:raises: Generic exception on error
"""
# check for existing connection
if self._ec2_connection:
return self._ec2_connection
try:
log.debug("Connecting to EC2 endpoint %s", self._ec2host)
# connect to webservice
ec2_connection = boto.ec2.connect_to_region(
self._region_name,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
is_secure=self._secure,
host=self._ec2host,
port=self._ec2port,
path=self._ec2path,
)
log.debug("EC2 connection has been successful.")
if not self._vpc:
vpc_connection = None
self._vpc_id = None
else:
vpc_connection, self._vpc_id = self._find_vpc_by_name(self._vpc)
except Exception as err:
log.error("Error connecting to EC2: %s", err)
raise
self._ec2_connection, self._vpc_connection = (
ec2_connection, vpc_connection)
return self._ec2_connection
def _find_vpc_by_name(self, vpc_name):
vpc_connection = boto.vpc.connect_to_region(
self._region_name,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
is_secure=self._secure,
host=self._ec2host,
port=self._ec2port,
path=self._ec2path,
)
log.debug("VPC connection has been successful.")
for vpc in vpc_connection.get_all_vpcs():
matches = [vpc.id]
if 'Name' in vpc.tags:
matches.append(vpc.tags['Name'])
if vpc_name in matches:
vpc_id = vpc.id
if vpc_name != vpc_id:
# then `vpc_name` is the VPC name
log.debug("VPC `%s` has ID `%s`", vpc_name, vpc_id)
break
else:
raise VpcError('Cannot find VPC `{0}`.'.format(vpc_name))
return (vpc_connection, vpc_id)
def start_instance(self, key_name, public_key_path, private_key_path,
security_group, flavor, image_id, image_userdata,
username=None, node_name=None, network_ids=None,
price=None, timeout=None,
boot_disk_device=None,
boot_disk_size=None,
boot_disk_type=None,
boot_disk_iops=None,
placement_group=None,
**kwargs):
"""Starts a new instance on the cloud using the given properties.
The following tasks are done to start an instance:
* establish a connection to the cloud web service
* check ssh keypair and upload it if it does not yet exist. This is
a locked process, since this function might be called in multiple
threads and we only want the key to be stored once.
* check if the security group exists
* run the instance with the given properties
:param str key_name: name of the ssh key to connect
:param str public_key_path: path to ssh public key
:param str private_key_path: path to ssh private key
:param str security_group: firewall rule definition to apply on the
instance
:param str flavor: machine type to use for the instance
:param str image_id: image type (os) to use for the instance
:param str image_userdata: command to execute after startup
:param str username: username for the given ssh key, default None
:param float price: Spot instance price (if 0, do not use spot instances).
:param int price: Timeout (in seconds) waiting for spot instances;
only used if price > 0.
:param str boot_disk_device: Root volume device path if not /dev/sda1
:param str boot_disk_size: Target size, in GiB, for the root volume
:param str boot_disk_type: Type of root volume (standard, gp2, io1)
:param str boot_disk_iops: Provisioned IOPS for the root volume
:param str placement_group: Enable low-latency networking between
compute nodes.
:return: str - instance id of the started instance
"""
connection = self._connect()
log.debug("Checking keypair `%s`.", key_name)
# the `_check_keypair` method has to be called within a lock,
# since it will upload the key if it does not exist and if this
# happens for every node at the same time ec2 will throw an error
# message (see issue #79)
with BotoCloudProvider.__node_start_lock:
self._check_keypair(key_name, public_key_path, private_key_path)
log.debug("Checking security group `%s`.", security_group)
security_group_id = self._check_security_group(security_group)
# image_id = self._find_image_id(image_id)
if network_ids:
interfaces = []
for subnet in network_ids.split(','):
subnet_id = self._check_subnet(subnet)
interfaces.append(
boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=subnet_id, groups=[security_group_id],
associate_public_ip_address=self.request_floating_ip))
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
security_groups = []
else:
interfaces = None
security_groups = [security_group]
# get defaults for `price` and `timeout` from class instance
if price is None:
price = self.price
if timeout is None:
timeout = self.timeout
if boot_disk_size:
dev_root = boto.ec2.blockdevicemapping.BlockDeviceType()
dev_root.size = int(boot_disk_size)
dev_root.delete_on_termination = True
if boot_disk_type:
dev_root.volume_type = boot_disk_type
if boot_disk_iops:
dev_root.iops = int(boot_disk_iops)
bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping()
dev_name = boot_disk_device if boot_disk_device else "/dev/sda1"
bdm[dev_name] = dev_root
else:
bdm = None
try:
#start spot instance if bid is specified
if price:
log.info("Requesting spot instance with price `%s` ...", price)
request = connection.request_spot_instances(
price,image_id, key_name=key_name, security_groups=security_groups,
instance_type=flavor, user_data=image_userdata,
network_interfaces=interfaces,
placement_group=placement_group,
block_device_map=bdm,
instance_profile_name=self._instance_profile)[-1]
# wait until spot request is fullfilled (will wait
# forever if no timeout is given)
start_time = time.time()
timeout = (float(timeout) if timeout else 0)
log.info("Waiting for spot instance (will time out in %d seconds) ...", timeout)
while request.status.code != 'fulfilled':
if timeout and time.time()-start_time > timeout:
request.cancel()
raise RuntimeError('spot instance timed out')
time.sleep(self.POLL_INTERVAL)
# update request status
request=connection.get_all_spot_instance_requests(request_ids=request.id)[-1]
else:
reservation = connection.run_instances(
image_id, key_name=key_name, security_groups=security_groups,
instance_type=flavor, user_data=image_userdata,
network_interfaces=interfaces,
placement_group=placement_group,
block_device_map=bdm,
instance_profile_name=self._instance_profile)
except Exception as ex:
log.error("Error starting instance: %s", ex)
if "TooManyInstances" in ex:
raise ClusterError(ex)
else:
raise InstanceError(ex)
if price:
vm = connection.get_only_instances(instance_ids=[request.instance_id])[-1]
else:
vm = reservation.instances[-1]
vm.add_tag("Name", node_name)
# cache instance object locally for faster access later on
self._instances[vm.id] = vm
return vm.id
def stop_instance(self, instance_id):
"""Stops the instance gracefully.
:param str instance_id: instance identifier
"""
instance = self._load_instance(instance_id)
instance.terminate()
del self._instances[instance_id]
def get_ips(self, instance_id):
"""Retrieves the private and public ip addresses for a given instance.
:return: list (ips)
"""
self._load_instance(instance_id)
instance = self._load_instance(instance_id)
IPs = [ip for ip in instance.private_ip_address, instance.ip_address if ip]
# We also need to check if there is any floating IP associated
if self.request_floating_ip and not self._vpc:
# We need to list the floating IPs for this instance
floating_ips = [ip for ip in self._ec2_connection.get_all_addresses() if ip.instance_id == instance.id]
if not floating_ips:
log.debug("Public ip address has to be assigned through "
"elasticluster.")
ip = self._allocate_address(instance)
# This is probably the preferred IP we want to use
IPs.insert(0, ip)
else:
IPs = [ip.public_ip for ip in floating_ips] + IPs
return list(set(IPs))
def is_instance_running(self, instance_id):
"""Checks if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
instance = self._load_instance(instance_id)
if instance.update() == "running":
# If the instance is up&running, ensure it has an IP
# address.
if not instance.ip_address and self.request_floating_ip:
log.debug("Public ip address has to be assigned through "
"elasticluster.")
self._allocate_address(instance)
instance.update()
return True
else:
return False
def _allocate_address(self, instance):
"""Allocates a free public ip address to the given instance
:param instance: instance to assign address to
:type instance: py:class:`boto.ec2.instance.Reservation`
:return: public ip address
"""
connection = self._connect()
free_addresses = [ ip for ip in connection.get_all_addresses() if not ip.instance_id]
if not free_addresses:
try:
address = connection.allocate_address()
except Exception as ex:
log.error("Unable to allocate a public IP address to instance `%s`",
instance.id)
return None
try:
address = free_addresses.pop()
instance.use_ip(address)
return address.public_ip
except Exception as ex:
log.error("Unable to associate IP address %s to instance `%s`",
address, instance.id)
return None
def _load_instance(self, instance_id):
"""
Return instance with the given id.
For performance reasons, the instance ID is first searched for in the
collection of VM instances started by ElastiCluster
(`self._instances`), then in the list of all instances known to the
cloud provider at the time of the last update
(`self._cached_instances`), and finally the cloud provider is directly
queried.
:param str instance_id: instance identifier
:return: py:class:`boto.ec2.instance.Reservation` - instance
:raises: `InstanceError` is returned if the instance can't
be found in the local cache or in the cloud.
"""
# if instance is known, return it
if instance_id in self._instances:
return self._instances[instance_id]
# else, check (cached) list from provider
if instance_id not in self._cached_instances:
self._cached_instances = self._build_cached_instances()
if instance_id in self._cached_instances:
inst = self._cached_instances[instance_id]
self._instances[instance_id] = inst
return inst
# If we reached this point, the instance was not found neither
# in the caches nor on the website.
raise InstanceNotFoundError(
"Instance `{instance_id}` not found"
.format(instance_id=instance_id))
def _build_cached_instances(self):
"""
Build lookup table of VM instances known to the cloud provider.
The returned dictionary links VM id with the actual VM object.
"""
connection = self._connect()
reservations = connection.get_all_reservations()
cached_instances = {}
for rs in reservations:
for vm in rs.instances:
cached_instances[vm.id] = vm
return cached_instances
def _check_keypair(self, name, public_key_path, private_key_path):
"""First checks if the keypair is valid, then checks if the keypair
is registered with on the cloud. If not the keypair is added to the
users ssh keys.
:param str name: name of the ssh key
:param str public_key_path: path to the ssh public key file
:param str private_key_path: path to the ssh private key file
:raises: `KeypairError` if key is not a valid RSA or DSA key,
the key could not be uploaded or the fingerprint does not
match to the one uploaded to the cloud.
"""
connection = self._connect()
keypairs = connection.get_all_key_pairs()
keypairs = dict((k.name, k) for k in keypairs)
# decide if dsa or rsa key is provided
pkey = None
is_dsa_key = False
try:
pkey = DSSKey.from_private_key_file(private_key_path)
is_dsa_key = True
except PasswordRequiredException:
warn("Unable to check key file `{0}` because it is encrypted with a "
"password. Please, ensure that you added it to the SSH agent "
"with `ssh-add {1}`"
.format(private_key_path, private_key_path))
except SSHException:
try:
pkey = RSAKey.from_private_key_file(private_key_path)
except PasswordRequiredException:
warn("Unable to check key file `{0}` because it is encrypted with a "
"password. Please, ensure that you added it to the SSH agent "
"with `ssh-add {1}`"
.format(private_key_path, private_key_path))
except SSHException:
raise KeypairError('File `%s` is neither a valid DSA key '
'or RSA key.' % private_key_path)
# create keys that don't exist yet
if name not in keypairs:
log.warning(
"Keypair `%s` not found on resource `%s`, Creating a new one",
name, self._url)
with open(os.path.expanduser(public_key_path)) as f:
key_material = f.read()
try:
# check for DSA on amazon
if "amazon" in self._ec2host and is_dsa_key:
log.error(
"Apparently, amazon does not support DSA keys. "
"Please specify a valid RSA key.")
raise KeypairError(
"Apparently, amazon does not support DSA keys."
"Please specify a valid RSA key.")
connection.import_key_pair(name, key_material)
except Exception as ex:
log.error(
"Could not import key `%s` with name `%s` to `%s`",
name, public_key_path, self._url)
raise KeypairError(
"could not create keypair `%s`: %s" % (name, ex))
else:
# check fingerprint
cloud_keypair = keypairs[name]
if pkey:
if "amazon" in self._ec2host:
# AWS takes the MD5 hash of the key's DER representation.
key = RSA.importKey(open(private_key_path).read())
der = key.publickey().exportKey('DER')
m = hashlib.md5()
m.update(der)
digest = m.hexdigest()
fingerprint = ':'.join(digest[i:(i + 2)]
for i in range(0, len(digest), 2))
else:
fingerprint = ':'.join(i.encode('hex')
for i in pkey.get_fingerprint())
if fingerprint != cloud_keypair.fingerprint:
if "amazon" in self._ec2host:
log.error(
"Apparently, Amazon does not compute the RSA key "
"fingerprint as we do! We cannot check if the "
"uploaded keypair is correct!")
else:
raise KeypairError(
"Keypair `%s` is present but has "
"different fingerprint. Aborting!" % name)
def _check_security_group(self, name):
"""Checks if the security group exists.
:param str name: name of the security group
:return: str - security group id of the security group
:raises: `SecurityGroupError` if group does not exist
"""
connection = self._connect()
filters = {}
if self._vpc:
filters = {'vpc-id': self._vpc_id}
security_groups = connection.get_all_security_groups(filters=filters)
matching_groups = [
group
for group
in security_groups
if name in [group.name, group.id]
]
if len(matching_groups) == 0:
raise SecurityGroupError(
"the specified security group %s does not exist" % name)
elif len(matching_groups) == 1:
return matching_groups[0].id
elif self._vpc and len(matching_groups) > 1:
raise SecurityGroupError(
"the specified security group name %s matches "
"more than one security group" % name)
def _check_subnet(self, name):
"""Checks if the subnet exists.
:param str name: name of the subnet
:return: str - subnet id of the subnet
:raises: `SubnetError` if group does not exist
"""
# Subnets only exist in VPCs, so we don't need to worry about
# the EC2 Classic case here.
subnets = self._vpc_connection.get_all_subnets(
filters={'vpcId': self._vpc_id})
matching_subnets = [
subnet
for subnet
in subnets
if name in [subnet.tags.get('Name'), subnet.id]
]
if len(matching_subnets) == 0:
raise SubnetError(
"the specified subnet %s does not exist" % name)
elif len(matching_subnets) == 1:
return matching_subnets[0].id
else:
raise SubnetError(
"the specified subnet name %s matches more than "
"one subnet" % name)
def _find_image_id(self, image_id):
"""Finds an image id to a given id or name.
:param str image_id: name or id of image
:return: str - identifier of image
"""
if not self._images:
connection = self._connect()
self._images = connection.get_all_images()
image_id_cloud = None
for i in self._images:
if i.id == image_id or i.name == image_id:
image_id_cloud = i.id
break
if image_id_cloud:
return image_id_cloud
else:
raise ImageError(
"Could not find given image id `%s`" % image_id)
def __getstate__(self):
d = self.__dict__.copy()
del d['_ec2_connection']
del d['_vpc_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
self._ec2_connection = None
self._vpc_connection = None
|
gpl-3.0
|
bratsche/Neutron-Drive
|
google_appengine/lib/webob_0_9/webob/multidict.py
|
32
|
15977
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Gives a multi-value dictionary object (MultiDict) plus several wrappers
"""
import cgi
import copy
import sys
from webob.util.dictmixin import DictMixin
try:
reversed
except NameError:
from webob.util.reversed import reversed
__all__ = ['MultiDict', 'UnicodeMultiDict', 'NestedMultiDict', 'NoVars']
class MultiDict(DictMixin):
"""
An ordered dictionary that can have multiple values for each key.
Adds the methods getall, getone, mixed, and add to the normal
dictionary interface.
"""
def __init__(self, *args, **kw):
if len(args) > 1:
raise TypeError(
"MultiDict can only be called with one positional argument")
if args:
if hasattr(args[0], 'iteritems'):
items = list(args[0].iteritems())
elif hasattr(args[0], 'items'):
items = args[0].items()
else:
items = list(args[0])
self._items = items
else:
self._items = []
self._items.extend(kw.iteritems())
#@classmethod
def view_list(cls, lst):
"""
Create a dict that is a view on the given list
"""
if not isinstance(lst, list):
raise TypeError(
"%s.view_list(obj) takes only actual list objects, not %r"
% (cls.__name__, lst))
obj = cls()
obj._items = lst
return obj
view_list = classmethod(view_list)
#@classmethod
def from_fieldstorage(cls, fs):
"""
Create a dict from a cgi.FieldStorage instance
"""
obj = cls()
if fs.list:
# fs.list can be None when there's nothing to parse
for field in fs.list:
if field.filename:
obj.add(field.name, field)
else:
obj.add(field.name, field.value)
return obj
from_fieldstorage = classmethod(from_fieldstorage)
def __getitem__(self, key):
for k, v in reversed(self._items):
if k == key:
return v
raise KeyError(key)
def __setitem__(self, key, value):
try:
del self[key]
except KeyError:
pass
self._items.append((key, value))
def add(self, key, value):
"""
Add the key and value, not overwriting any previous value.
"""
self._items.append((key, value))
def getall(self, key):
"""
Return a list of all values matching the key (may be an empty list)
"""
result = []
for k, v in self._items:
if key == k:
result.append(v)
return result
def getone(self, key):
"""
Get one value matching the key, raising a KeyError if multiple
values were found.
"""
v = self.getall(key)
if not v:
raise KeyError('Key not found: %r' % key)
if len(v) > 1:
raise KeyError('Multiple values match %r: %r' % (key, v))
return v[0]
def mixed(self):
"""
Returns a dictionary where the values are either single
values, or a list of values when a key/value appears more than
once in this dictionary. This is similar to the kind of
dictionary often used to represent the variables in a web
request.
"""
result = {}
multi = {}
for key, value in self.iteritems():
if key in result:
# We do this to not clobber any lists that are
# *actual* values in this dictionary:
if key in multi:
result[key].append(value)
else:
result[key] = [result[key], value]
multi[key] = None
else:
result[key] = value
return result
def dict_of_lists(self):
"""
Returns a dictionary where each key is associated with a
list of values.
"""
result = {}
for key, value in self.iteritems():
if key in result:
result[key].append(value)
else:
result[key] = [value]
return result
def __delitem__(self, key):
items = self._items
found = False
for i in range(len(items)-1, -1, -1):
if items[i][0] == key:
del items[i]
found = True
if not found:
raise KeyError(key)
def __contains__(self, key):
for k, v in self._items:
if k == key:
return True
return False
has_key = __contains__
def clear(self):
self._items = []
def copy(self):
return self.__class__(self)
def setdefault(self, key, default=None):
for k, v in self._items:
if key == k:
return v
self._items.append((key, default))
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
for i in range(len(self._items)):
if self._items[i][0] == key:
v = self._items[i][1]
del self._items[i]
return v
if args:
return args[0]
else:
raise KeyError(key)
def popitem(self):
return self._items.pop()
def update(self, other=None, **kwargs):
if other is None:
pass
elif hasattr(other, 'items'):
self._items.extend(other.items())
elif hasattr(other, 'keys'):
for k in other.keys():
self._items.append((k, other[k]))
else:
for k, v in other:
self._items.append((k, v))
if kwargs:
self.update(kwargs)
def __repr__(self):
items = ', '.join(['(%r, %r)' % v for v in self.iteritems()])
return '%s([%s])' % (self.__class__.__name__, items)
def __len__(self):
return len(self._items)
##
## All the iteration:
##
def keys(self):
return [k for k, v in self._items]
def iterkeys(self):
for k, v in self._items:
yield k
__iter__ = iterkeys
def items(self):
return self._items[:]
def iteritems(self):
return iter(self._items)
def values(self):
return [v for k, v in self._items]
def itervalues(self):
for k, v in self._items:
yield v
class UnicodeMultiDict(DictMixin):
"""
A MultiDict wrapper that decodes returned values to unicode on the
fly. Decoding is not applied to assigned values.
The key/value contents are assumed to be ``str``/``strs`` or
``str``/``FieldStorages`` (as is returned by the ``paste.request.parse_``
functions).
Can optionally also decode keys when the ``decode_keys`` argument is
True.
``FieldStorage`` instances are cloned, and the clone's ``filename``
variable is decoded. Its ``name`` variable is decoded when ``decode_keys``
is enabled.
"""
def __init__(self, multi=None, encoding=None, errors='strict',
decode_keys=False):
self.multi = multi
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
self.errors = errors
self.decode_keys = decode_keys
def _decode_key(self, key):
if self.decode_keys:
try:
key = key.decode(self.encoding, self.errors)
except AttributeError:
pass
return key
def _decode_value(self, value):
"""
Decode the specified value to unicode. Assumes value is a ``str`` or
`FieldStorage`` object.
``FieldStorage`` objects are specially handled.
"""
if isinstance(value, cgi.FieldStorage):
# decode FieldStorage's field name and filename
value = copy.copy(value)
if self.decode_keys:
value.name = value.name.decode(self.encoding, self.errors)
if value.filename:
value.filename = value.filename.decode(self.encoding,
self.errors)
else:
try:
value = value.decode(self.encoding, self.errors)
except AttributeError:
pass
return value
def __getitem__(self, key):
return self._decode_value(self.multi.__getitem__(key))
def __setitem__(self, key, value):
self.multi.__setitem__(key, value)
def add(self, key, value):
"""
Add the key and value, not overwriting any previous value.
"""
self.multi.add(key, value)
def getall(self, key):
"""
Return a list of all values matching the key (may be an empty list)
"""
return [self._decode_value(v) for v in self.multi.getall(key)]
def getone(self, key):
"""
Get one value matching the key, raising a KeyError if multiple
values were found.
"""
return self._decode_value(self.multi.getone(key))
def mixed(self):
"""
Returns a dictionary where the values are either single
values, or a list of values when a key/value appears more than
once in this dictionary. This is similar to the kind of
dictionary often used to represent the variables in a web
request.
"""
unicode_mixed = {}
for key, value in self.multi.mixed().iteritems():
if isinstance(value, list):
value = [self._decode_value(value) for value in value]
else:
value = self._decode_value(value)
unicode_mixed[self._decode_key(key)] = value
return unicode_mixed
def dict_of_lists(self):
"""
Returns a dictionary where each key is associated with a
list of values.
"""
unicode_dict = {}
for key, value in self.multi.dict_of_lists().iteritems():
value = [self._decode_value(value) for value in value]
unicode_dict[self._decode_key(key)] = value
return unicode_dict
def __delitem__(self, key):
self.multi.__delitem__(key)
def __contains__(self, key):
return self.multi.__contains__(key)
has_key = __contains__
def clear(self):
self.multi.clear()
def copy(self):
return UnicodeMultiDict(self.multi.copy(), self.encoding, self.errors)
def setdefault(self, key, default=None):
return self._decode_value(self.multi.setdefault(key, default))
def pop(self, key, *args):
return self._decode_value(self.multi.pop(key, *args))
def popitem(self):
k, v = self.multi.popitem()
return (self._decode_key(k), self._decode_value(v))
def __repr__(self):
items = ', '.join(['(%r, %r)' % v for v in self.items()])
return '%s([%s])' % (self.__class__.__name__, items)
def __len__(self):
return self.multi.__len__()
##
## All the iteration:
##
def keys(self):
return [self._decode_key(k) for k in self.multi.iterkeys()]
def iterkeys(self):
for k in self.multi.iterkeys():
yield self._decode_key(k)
__iter__ = iterkeys
def items(self):
return [(self._decode_key(k), self._decode_value(v)) for \
k, v in self.multi.iteritems()]
def iteritems(self):
for k, v in self.multi.iteritems():
yield (self._decode_key(k), self._decode_value(v))
def values(self):
return [self._decode_value(v) for v in self.multi.itervalues()]
def itervalues(self):
for v in self.multi.itervalues():
yield self._decode_value(v)
_dummy = object()
class NestedMultiDict(MultiDict):
"""
Wraps several MultiDict objects, treating it as one large MultiDict
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for d in self.dicts:
value = d.get(key, _dummy)
if value is not _dummy:
return value
raise KeyError(key)
def _readonly(self, *args, **kw):
raise KeyError("NestedMultiDict objects are read-only")
__setitem__ = _readonly
add = _readonly
__delitem__ = _readonly
clear = _readonly
setdefault = _readonly
pop = _readonly
popitem = _readonly
update = _readonly
def getall(self, key):
result = []
for d in self.dicts:
result.extend(d.getall(key))
return result
# Inherited:
# getone
# mixed
# dict_of_lists
# copy
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __len__(self):
v = 0
for d in self.dicts:
v += len(d)
return v
def __nonzero__(self):
for d in self.dicts:
if d:
return True
return False
def items(self):
return list(self.iteritems())
def iteritems(self):
for d in self.dicts:
for item in d.iteritems():
yield item
def values(self):
return list(self.itervalues())
def itervalues(self):
for d in self.dicts:
for value in d.itervalues():
yield value
def keys(self):
return list(self.iterkeys())
def __iter__(self):
for d in self.dicts:
for key in d:
yield key
iterkeys = __iter__
class NoVars(object):
"""
Represents no variables; used when no variables
are applicable.
This is read-only
"""
def __init__(self, reason=None):
self.reason = reason or 'N/A'
def __getitem__(self, key):
raise KeyError("No key %r: %s" % (key, self.reason))
def __setitem__(self, *args, **kw):
raise KeyError("Cannot add variables: %s" % self.reason)
add = __setitem__
setdefault = __setitem__
update = __setitem__
def __delitem__(self, *args, **kw):
raise KeyError("No keys to delete: %s" % self.reason)
clear = __delitem__
pop = __delitem__
popitem = __delitem__
def get(self, key, default=None):
return default
def getall(self, key):
return []
def getone(self, key):
return self[key]
def mixed(self):
return {}
dict_of_lists = mixed
def __contains__(self, key):
return False
has_key = __contains__
def copy(self):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__,
self.reason)
def __len__(self):
return 0
def __cmp__(self, other):
return cmp({}, other)
def keys(self):
return []
def iterkeys(self):
return iter([])
__iter__ = iterkeys
items = keys
iteritems = iterkeys
values = keys
itervalues = iterkeys
__test__ = {
'general': """
>>> d = MultiDict(a=1, b=2)
>>> d['a']
1
>>> d.getall('c')
[]
>>> d.add('a', 2)
>>> d['a']
2
>>> d.getall('a')
[1, 2]
>>> d['b'] = 4
>>> d.getall('b')
[4]
>>> d.keys()
['a', 'a', 'b']
>>> d.items()
[('a', 1), ('a', 2), ('b', 4)]
>>> d.mixed()
{'a': [1, 2], 'b': 4}
>>> MultiDict([('a', 'b')], c=2)
MultiDict([('a', 'b'), ('c', 2)])
"""}
if __name__ == '__main__':
import doctest
doctest.testmod()
|
bsd-3-clause
|
mwx1993/TACTIC
|
src/asset_security.py
|
7
|
3762
|
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import tacticenv
from pyasm.common import Environment, SPTDate
from pyasm.security import Batch, Security
Batch()
from tactic_client_lib import TacticServerStub
import os, sys, xmlrpclib, datetime
from mod_python import apache, Cookie
# This will ensure that any asset requires a valid ticket
def accesshandler(request):
cookies = Cookie.get_cookies(request)
# if login ticket cookie does not exist, then deny
if not cookies.has_key('login_ticket'):
# just refuse access
return apache.HTTP_FORBIDDEN
ticket = cookies['login_ticket'].value
if not ticket:
return apache.HTTP_FORBIDDEN
server = TacticServerStub.get(protocol='local')
expr = "@SOBJECT(sthpw/ticket['ticket','%s'])" % ticket
sobject = server.eval(expr, single=True)
now = SPTDate.now()
expiry = sobject.get("expiry")
if expiry and expiry < str(now):
return apache.HTTP_FORBIDDEN
request.add_common_vars()
path = str(request.subprocess_env['REQUEST_URI'])
if path == None:
return apache.HTTP_FORBIDDEN
# FIXME: find some mechanism which is more acceptable ... like /icons
#if path.find("_icon_") != -1:
# return apache.OK
return apache.OK
def outputfilter_example(filter):
s = filter.read()
while s:
filter.write(s.upper())
s = filter.read()
if s is None:
filter.close()
from PIL import Image, ImageChops, ImageFont, ImageDraw
from cStringIO import StringIO
from pyasm.security import Ticket
from pyasm.security.watermark import Watermark
from datetime import datetime
def outputfilter_watermark(filter):
s_in = None
s_out = None
try:
s_in = StringIO(filter.read())
im_in = Image.open(s_in)
if im_in.size[0] <= 240 and im_in.size[1] <= 120:
filter.write(s_in.getvalue())
return
# if this is a sub request, then don't process again
req = filter.req
if req.main:
filter.write(s_in.getvalue())
return
cookies = Cookie.get_cookies(req)
ticket = cookies['login_ticket'].value
query = req.parsed_uri[apache.URI_QUERY]
if query == "watermark=false":
filter.write(s_in.getvalue())
ticket_sobj = Ticket.get_by_valid_key(ticket)
# if this is not a valid ticket, then just exit with no image
if not ticket_sobj:
return
# TODO: need fancier algorithm here
if ticket_sobj.get_value("login") == 'admin':
filter.write(s_in.getvalue())
return
sizex = im_in.size[0]
sizey = im_in.size[1]
max_res = 240
max_width = 640
im_in = im_in.resize( (max_res, int(sizey/(sizex/float(max_res)))) )
im_in = im_in.resize( (max_width, int(sizey/(sizex/float(max_width)))) )
# add the watermark
watermark = Watermark()
now = datetime.today().strftime("%Y/%m/%d, %H:%M")
texts = ['Do Not Copy', ticket, now]
sizes = [20, 10, 10, 20, 20]
mark = watermark.generate(texts, sizes)
im_out = watermark.execute(im_in, mark, 'tile', 0.5)
s_out = StringIO()
im_out.save(s_out, format='jpeg')
filter.write(s_out.getvalue())
finally:
if s_in:
s_in.close()
if s_out:
s_out.close()
|
epl-1.0
|
tweemeterjop/thug
|
thug/ActiveX/modules/AnswerWorks.py
|
1
|
1041
|
# Vantage Linguistics AnserWorks ActiveX Controls
# CVE-2007-6387
import logging
log = logging.getLogger("Thug")
def GetHistory(self, arg):
if len(arg) > 215:
log.ThugLogging.log_exploit_event(self._window.url,
"AnswerWorks ActiveX",
"Overflow in GetHistory",
cve = 'CVE-2007-6387')
def GetSeedQuery(self, arg):
if len(arg) > 215:
log.ThugLogging.log_exploit_event(self._window.url,
"AnswerWorks ActiveX",
"Overflow in GetSeedQuery",
cve = 'CVE-2007-6387')
def SetSeedQuery(self, arg):
if len(arg) > 215:
log.ThugLogging.log_exploit_event(self._window.url,
"AnswerWorks ActiveX",
"SetSeedQuery",
cve = 'CVE-2007-6387')
|
gpl-2.0
|
blekhmanlab/hominid
|
hominid/sort_results.py
|
1
|
6152
|
"""
Read a rvcf file with stability selection scores for taxa.
Sort the dataframe by rsq_median.
Print results.
usage:
python sort_results.py \
../example/stability_selection_example_output.vcf \
../example/hominid_example_taxon_table_input.txt \
arcsinsqrt \
0.5 \
10
"""
import argparse
import sys
import pandas as pd
from hominid.hominid import read_taxon_file, align_snp_and_taxa
def sort_results(rvcf_input_file_path, taxon_table_file_path, transform,
r_sqr_median_cutoff, stability_cutoff, snp_count, no_tables,
extra_columns):
print('plotting {} SNPs from {}'.format(snp_count, rvcf_input_file_path))
# read the rvcf file and sort by rsq_median
df = pd.read_csv(rvcf_input_file_path, sep='\t', dtype={'CHROM': str})
#print('df.shape: {}'.format(df.shape))
sorted_rsq_best_medians_df = df.sort_values(by='rsq_median', ascending=False)
x_df = sorted_rsq_best_medians_df[sorted_rsq_best_medians_df.rsq_median > r_sqr_median_cutoff]
print('{} SNPs with r_sqr > {:5.3f}'.format(x_df.shape[0], r_sqr_median_cutoff))
taxon_table_df = read_taxon_file(taxon_table_file_path, transform=transform)
for row_i in range(sorted_rsq_best_medians_df.shape[0]):
if row_i >= snp_count:
break
else:
# get a 1-row dataframe
snp_df = sorted_rsq_best_medians_df.iloc[[row_i]]
aligned_snp_df, aligned_taxa_df = align_snp_and_taxa(
snp_df,
taxon_table_df
)
# get the taxon stability selection scores
# use the taxon table df index to get column names for snp_df
taxon_scores_df = snp_df.loc[:, taxon_table_df.index].transpose()
sorted_taxon_scores_df = taxon_scores_df.sort_values(by=taxon_scores_df.columns[0], ascending=False)
#sorted_taxon_scores_df = taxon_scores_df.sort(taxon_scores_df.columns[0], ascending=False)
p_df_list = []
print('{} {} {:5.3f}'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID, snp_df.iloc[0].rsq_median))
summary_line = '{}\t{}\t'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID)
for i, (selected_taxon, selected_taxon_row) in enumerate(sorted_taxon_scores_df.iterrows()):
# use selected_taxon_row.index[0] to index the first and only column
selected_taxon_score = selected_taxon_row.iloc[0]
if selected_taxon_score < stability_cutoff:
#print('done with selected taxa')
break
else:
# trim 'Root;' from the front of the taxon name
if selected_taxon.startswith('Root;'):
taxon_name = selected_taxon[5:]
else:
taxon_name = selected_taxon
print(' {:5.3f} {}'.format(selected_taxon_score, taxon_name))
summary_line += '{}, '.format(taxon_name)
gts = [
snp_df.iloc[0].REF + snp_df.iloc[0].REF, # 0
snp_df.iloc[0].REF + snp_df.iloc[0].ALT, # 1
snp_df.iloc[0].ALT + snp_df.iloc[0].ALT # 2
]
aligned_snp_value_list = aligned_snp_df.values.flatten().tolist()
data_dict = {
'chromosome': [snp_df.iloc[0].CHROM] * aligned_snp_df.shape[1],
'snp_id': [snp_df.iloc[0].ID] * aligned_snp_df.shape[1],
'gene': [snp_df.iloc[0].GENE] * aligned_snp_df.shape[1],
'taxon': [selected_taxon] * aligned_snp_df.shape[1],
'abundance': aligned_taxa_df[selected_taxon].values.tolist(),
'variant_allele_count': [str(int(v)) for v in aligned_snp_value_list],
'genotype': [gts[int(v)] for v in aligned_snp_value_list],
'sample_id' : aligned_snp_df.columns
}
columns_to_display = ['abundance', 'variant_allele_count', 'genotype', 'sample_id']
if extra_columns:
for extra_column in extra_columns.split(','):
data_dict[extra_column] = snp_df.iloc[0][extra_column]
columns_to_display.append(extra_column)
p_df = pd.DataFrame(data_dict)
p_df_list.append(p_df)
if no_tables:
pass
else:
p_df[columns_to_display].to_csv(
sys.stdout,
sep='\t'
)
# save a stacked bar plot
if len(p_df_list) > 0:
file_name = 'stacked_bar_plot_selected_taxa_{}_{}.pdf'.format(
snp_df.iloc[0].GENE,
snp_df.iloc[0].ID
)
p_df = pd.concat(p_df_list, axis=0)
# at this point the index for p_df looks like
# 0...76.0...76.0...76
# replace the index
p_df.index = range(p_df.shape[0])
#p_df.to_csv(file_path, sep='\t')
stacked_bar_title = '{}\n{}'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('rvcf_input_file_path')
argparser.add_argument('taxon_table_file_path')
argparser.add_argument('transform')
argparser.add_argument(
'r_sqr_median_cutoff',
type=float
)
argparser.add_argument(
'stability_cutoff',
type=float
)
argparser.add_argument(
'snp_count',
type=int
)
argparser.add_argument(
'--no-tables',
action='store_true'
)
argparser.add_argument(
'--extra-columns',
type=str
)
args = argparser.parse_args()
print(args)
sort_results(**vars(args))
if __name__ == '__main__':
main()
|
mit
|
pyfa-org/eos
|
tests/integration/container/unordered/test_type_unique_set.py
|
1
|
8045
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import Fit
from eos import Implant
from eos import Skill
from tests.integration.container.testcase import ContainerTestCase
class TestContainerTypeUniqueSet(ContainerTestCase):
def test_add_none(self):
fit = Fit()
# Action
with self.assertRaises(TypeError):
fit.skills.add(None)
# Verification
self.assertEqual(len(fit.skills), 0)
# Cleanup
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_add_item(self):
fit = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
# Action
fit.skills.add(item)
# Verification
self.assertEqual(len(fit.skills), 1)
self.assertIs(fit.skills[item_type.id], item)
self.assertIn(item, fit.skills)
self.assertIn(item_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_add_item_type_failure(self):
fit = Fit()
item_type = self.mktype()
item = Implant(item_type.id)
# Action
with self.assertRaises(TypeError):
fit.skills.add(item)
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item, fit.skills)
self.assertNotIn(item_type.id, fit.skills)
fit.implants.add(item)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_add_item_value_failure_has_fit(self):
fit = Fit()
fit_other = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
fit_other.skills.add(item)
# Action
with self.assertRaises(ValueError):
fit.skills.add(item)
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertEqual(len(fit_other.skills), 1)
self.assertIs(fit_other.skills[item_type.id], item)
self.assertIn(item, fit_other.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_solsys_buffers_empty(fit_other.solar_system)
self.assert_log_entries(0)
def test_add_item_value_failure_existing_type_id(self):
fit = Fit()
item_type = self.mktype()
item1 = Skill(item_type.id)
item2 = Skill(item_type.id)
fit.skills.add(item1)
# Action
with self.assertRaises(ValueError):
fit.skills.add(item2)
# Verification
self.assertEqual(len(fit.skills), 1)
self.assertIs(fit.skills[item_type.id], item1)
self.assertIn(item1, fit.skills)
self.assertIn(item_type.id, fit.skills)
fit.skills.remove(item1)
fit.skills.add(item2)
# Cleanup
self.assert_item_buffers_empty(item1)
self.assert_item_buffers_empty(item2)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_remove_item(self):
fit = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
fit.skills.add(item)
# Action
fit.skills.remove(item)
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item, fit.skills)
self.assertNotIn(item_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_remove_item_failure(self):
fit = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
# Action
with self.assertRaises(KeyError):
fit.skills.remove(item)
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item, fit.skills)
self.assertNotIn(item_type.id, fit.skills)
fit.skills.add(item)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_delitem_item(self):
fit = Fit()
item_type = self.mktype()
item = Skill(item_type.id)
fit.skills.add(item)
# Action
del fit.skills[item_type.id]
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item, fit.skills)
self.assertNotIn(item_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_delitem_item_failure(self):
fit = Fit()
item_type = self.mktype()
empty_type_id = self.allocate_type_id()
item = Skill(item_type.id)
fit.skills.add(item)
# Action
with self.assertRaises(KeyError):
del fit.skills[empty_type_id]
# Verification
self.assertEqual(len(fit.skills), 1)
self.assertIn(item, fit.skills)
self.assertIn(item_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_key_integrity(self):
fit = Fit()
item_type = self.mktype()
item1 = Skill(item_type.id)
item2 = Skill(item_type.id)
fit.skills.add(item1)
with self.assertRaises(KeyError):
fit.skills.remove(item2)
# Verification
self.assertIs(fit.skills[item_type.id], item1)
# Cleanup
self.assert_item_buffers_empty(item1)
self.assert_item_buffers_empty(item2)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_clear(self):
fit = Fit()
item1_type = self.mktype()
item1 = Skill(item1_type.id)
item2_type = self.mktype()
item2 = Skill(item2_type.id)
fit.skills.add(item1)
fit.skills.add(item2)
# Action
fit.skills.clear()
# Verification
self.assertEqual(len(fit.skills), 0)
self.assertNotIn(item1, fit.skills)
self.assertNotIn(item1_type.id, fit.skills)
self.assertNotIn(item2, fit.skills)
self.assertNotIn(item2_type.id, fit.skills)
# Cleanup
self.assert_item_buffers_empty(item1)
self.assert_item_buffers_empty(item2)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
def test_bool(self):
fit = Fit()
item = Skill(self.mktype().id)
self.assertIs(bool(fit.skills), False)
fit.skills.add(item)
self.assertIs(bool(fit.skills), True)
fit.skills.remove(item)
self.assertIs(bool(fit.skills), False)
# Cleanup
self.assert_item_buffers_empty(item)
self.assert_solsys_buffers_empty(fit.solar_system)
self.assert_log_entries(0)
|
lgpl-3.0
|
franky88/emperioanimesta
|
env/Lib/site-packages/setuptools/command/bdist_egg.py
|
130
|
17155
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import textwrap
import marshal
import six
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s", self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s", self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s", ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s", script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s", native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s", native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base, name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'", p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
|
gpl-3.0
|
mauriceling/dose
|
examples/07_logging_database_extraction.py
|
2
|
5093
|
'''
Example 07: Extracting data from simulation logging database
'''
# needed to run this example without prior
# installation of DOSE into Python site-packages
try:
import run_examples_without_installation
except ImportError: pass
# Example codes starts from here
import dose.database_calls as d
print("Connecting to logging database")
(con, cur) = d.connect_database('Simulations/case_study_01.db')
print("Get simulations")
# alternative: start_time = d.db_list_simulations(cur)[0][0]
start_time = d.db_list_simulations(cur, 'parameters')[0][0]
print("simulation start time:", start_time)
print("Get logged generations in one of the logged simulations")
# alternative: generations = d.db_list_generations(cur, start_time)
generations = d.db_list_generations(cur, start_time, 'organisms')
print("number of logged generations:", len(generations))
print()
print("Get logged populations (by population names) in one of the logged \
simulations")
population_names = d.db_list_population_name(cur, start_time)
print("logged populations:", population_names)
print()
print("Get logged organism's data fields in one of the logged simulations")
# alternative: datafields = d.db_list_datafields(cur, start_time)
datafields = d.db_list_datafields(cur, start_time, 'organisms')
print('logged datafields in organism:', datafields)
print("Get entire World.ecosystem, datafields='all', generation=['900']")
ecosys = d.db_get_ecosystem(cur, start_time, 'all', ['900'])
for x in list(ecosys['900'].keys()):
for y in list(ecosys['900'][x].keys()):
for z in list(ecosys['900'][x][y].keys()):
print('900', x, y, z, ecosys['900'][x][y][z])
print()
print()
print("Get entire World.ecosystem, datafields='all', generation=['100', '200', '300']")
ecosys = d.db_get_ecosystem(cur, start_time, 'all', ['100', '200', '300'])
for gen in list(ecosys.keys()):
for x in list(ecosys[gen].keys()):
for y in list(ecosys[gen][x].keys()):
for z in list(ecosys[gen][x][y].keys()):
print(gen, x, y, z, ecosys[gen][x][y][z])
print()
print()
print("Get entire World.ecosystem, datafields='organisms', generation=['900']")
ecosys = d.db_get_ecosystem(cur, start_time, 'organisms', ['900'])
for x in list(ecosys['900'].keys()):
for y in list(ecosys['900'][x].keys()):
for z in list(ecosys['900'][x][y].keys()):
print('900', x, y, z, ecosys['900'][x][y][z])
print()
print()
print("Get entire World.ecosystem, datafields='all', generation=['900', '300']")
ecosys = d.db_get_ecosystem(cur, start_time, 'all', ['900', '300'])
for gen in list(ecosys.keys()):
for x in list(ecosys[gen].keys()):
for y in list(ecosys[gen][x].keys()):
for z in list(ecosys[gen][x][y].keys()):
print(gen, x, y, z, ecosys[gen][x][y][z])
print()
print()
print("Get entire Organism.status dictionary, datafields='all', \
generation=['900']")
status = d.db_get_organisms_status(cur, start_time, population_names[0],
'all', ['900'])
for identity in list(status['900'].keys()):
for key in list(status['900'][identity].keys()):
print('900', identity, key, status['900'][identity][key])
print()
print()
print("Get entire Organism.status dictionary, datafields='all', \
generation=['300', '900']")
status = d.db_get_organisms_status(cur, start_time, population_names[0],
'all', ['300', '900'])
for gen in list(status.keys()):
for identity in list(status[gen].keys()):
for key in list(status[gen][identity].keys()):
print(gen, identity, key, status[gen][identity][key])
print()
print()
print("Get entire Organism.status dictionary, datafields='identity', \
generation=['900']")
status = d.db_get_organisms_status(cur, start_time, population_names[0],
'identity', ['900'])
for identity in list(status['900'].keys()):
print('900', identity, status['900'][identity])
print()
print()
print("Get chromosomal sequences for generation=['900']")
sequences = d.db_get_organisms_chromosome_sequences(cur, start_time,
population_names[0],
['900'])
for gen in list(sequences.keys()):
for identity in list(sequences[gen].keys()):
for chromosome_number in range(len(sequences[gen][identity])):
print(gen, identity, chromosome_number, \
sequences[gen][identity][chromosome_number])
print()
print()
print("Get chromosomal sequences for generation=['300', '900']")
sequences = d.db_get_organisms_chromosome_sequences(cur, start_time,
population_names[0],
['300', '900'])
for gen in list(sequences.keys()):
for identity in list(sequences[gen].keys()):
for chromosome_number in range(len(sequences[gen][identity])):
print(gen, identity, chromosome_number, \
sequences[gen][identity][chromosome_number])
print()
print()
|
gpl-3.0
|
thumbimigwe/golber
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/bw.py
|
364
|
1355
|
# -*- coding: utf-8 -*-
"""
pygments.styles.bw
~~~~~~~~~~~~~~~~~~
Simple black/white only style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Operator, Generic
class BlackWhiteStyle(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "italic",
Comment.Preproc: "noitalic",
Keyword: "bold",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold",
Operator.Word: "bold",
Name.Class: "bold",
Name.Namespace: "bold",
Name.Exception: "bold",
Name.Entity: "bold",
Name.Tag: "bold",
String: "italic",
String.Interpol: "bold",
String.Escape: "bold",
Generic.Heading: "bold",
Generic.Subheading: "bold",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold",
Error: "border:#FF0000"
}
|
mit
|
nrb/ansible-modules-extras
|
database/vertica/vertica_facts.py
|
148
|
9176
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_facts
version_added: '2.0'
short_description: Gathers Vertica database facts.
description:
- Gathers Vertica database facts.
options:
cluster:
description:
- Name of the cluster running the schema.
required: false
default: localhost
port:
description:
Database port to connect to.
required: false
default: 5433
db:
description:
- Name of the database running the schema.
required: false
default: null
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: gathering vertica facts
vertica_facts: db=db_name
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
class NotSupportedError(Exception):
pass
# module specific functions
def get_schema_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select schema_name, schema_owner, create_time
from schemata
where not is_system_schema and schema_name not in ('public')
and (? = '' or schema_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.schema_name.lower()] = {
'name': row.schema_name,
'owner': row.schema_owner,
'create_time': str(row.create_time),
'usage_roles': [],
'create_roles': []}
cursor.execute("""
select g.object_name as schema_name, r.name as role_name,
lower(g.privileges_description) privileges_description
from roles r join grants g
on g.grantee = r.name and g.object_type='SCHEMA'
and g.privileges_description like '%USAGE%'
and g.grantee not in ('public', 'dbadmin')
and (? = '' or g.object_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
schema_key = row.schema_name.lower()
if 'create' in row.privileges_description:
facts[schema_key]['create_roles'].append(row.role_name)
else:
facts[schema_key]['usage_roles'].append(row.role_name)
return facts
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def get_configuration_facts(cursor, parameter=''):
facts = {}
cursor.execute("""
select c.parameter_name, c.current_value, c.default_value
from configuration_parameters c
where c.node_name = 'ALL'
and (? = '' or c.parameter_name ilike ?)
""", parameter, parameter)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.parameter_name.lower()] = {
'parameter_name': row.parameter_name,
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def get_node_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select node_name, node_address, export_address, node_state, node_type,
catalog_path
from nodes
""")
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.node_address] = {
'node_name': row.node_name,
'export_address': row.export_address,
'node_state': row.node_state,
'node_type': row.node_type,
'catalog_path': row.catalog_path}
return facts
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
cluster=dict(default='localhost'),
port=dict(default='5433'),
db=dict(default=None),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception, e:
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
schema_facts = get_schema_facts(cursor)
user_facts = get_user_facts(cursor)
role_facts = get_role_facts(cursor)
configuration_facts = get_configuration_facts(cursor)
node_facts = get_node_facts(cursor)
module.exit_json(changed=False,
ansible_facts={'vertica_schemas': schema_facts,
'vertica_users': user_facts,
'vertica_roles': role_facts,
'vertica_configuration': configuration_facts,
'vertica_nodes': node_facts})
except NotSupportedError, e:
module.fail_json(msg=str(e))
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception, e:
module.fail_json(msg=e)
# import ansible utilities
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
Kodicine/repository.890m.com
|
plugin.video.armagedomfilmes/mechanize/_mechanize.py
|
133
|
24916
|
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <jjl@pobox.com>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import copy, re, os, urllib, urllib2
from _html import DefaultFactory
import _response
import _request
import _rfc3986
import _sockettimeout
import _urllib2_fork
from _useragent import UserAgentBase
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(_urllib2_fork.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - e.g., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
request_class = _request.Request
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
"""Open a URL without visiting it.
Browser state (including request, response, history, forms and links)
is left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False, timeout=timeout)
def open(self, url, data=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return self._mech_open(url, data, timeout=timeout)
def _mech_open(self, url, data=None, update_history=True, visit=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit, timeout)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or mechanize.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the mechanize.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants
of any FORM element.
The returned form object implements the mechanize.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
r"""Return title, or None if there is no title element in the document.
Treatment of any tag children of attempts to follow Firefox and IE
(currently, tags are preserved).
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See mechanize.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for mechanize.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: e.g. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, e.g. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through _form.HTMLForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.