repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
EzeAlbornoz5/TP-Ingenieria-web
|
doyourfreight/forums/migrations/0004_auto_20170514_2328.py
|
1
|
2152
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-15 02:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forums', '0003_auto_20170514_2127'),
]
operations = [
migrations.RenameField(
model_name='flagreply',
old_name='reply',
new_name='fk_reply',
),
migrations.RenameField(
model_name='flagreply',
old_name='userAuth',
new_name='fk_userAuth',
),
migrations.RenameField(
model_name='flagreply',
old_name='submit_date',
new_name='submitDate',
),
migrations.RenameField(
model_name='reply',
old_name='author',
new_name='fk_author',
),
migrations.RenameField(
model_name='reply',
old_name='thread',
new_name='fk_thread',
),
migrations.RenameField(
model_name='reply',
old_name='submit_date',
new_name='submitDate',
),
migrations.RenameField(
model_name='replyscore',
old_name='reply',
new_name='fk_reply',
),
migrations.RenameField(
model_name='replyscore',
old_name='user',
new_name='fk_user',
),
migrations.RenameField(
model_name='thread',
old_name='author',
new_name='fk_author',
),
migrations.RenameField(
model_name='thread',
old_name='forum',
new_name='fk_forum',
),
migrations.RenameField(
model_name='thread',
old_name='submit_date',
new_name='submitDate',
),
migrations.RenameField(
model_name='threadscore',
old_name='thread',
new_name='fk_thread',
),
migrations.RenameField(
model_name='threadscore',
old_name='user',
new_name='fk_user',
),
]
|
gpl-3.0
|
maxdeliso/elevatorSim
|
Lib/idlelib/ObjectBrowser.py
|
67
|
3749
|
# XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - more doc strings
# - tooltips
# object browser
# XXX TO DO:
# - for classes/modules, add "open source" to object browser
from idlelib.TreeWidget import TreeItem, TreeNode, ScrolledCanvas
from reprlib import Repr
myrepr = Repr()
myrepr.maxstring = 100
myrepr.maxother = 100
class ObjectTreeItem(TreeItem):
def __init__(self, labeltext, object, setfunction=None):
self.labeltext = labeltext
self.object = object
self.setfunction = setfunction
def GetLabelText(self):
return self.labeltext
def GetText(self):
return myrepr.repr(self.object)
def GetIconName(self):
if not self.IsExpandable():
return "python"
def IsEditable(self):
return self.setfunction is not None
def SetText(self, text):
try:
value = eval(text)
self.setfunction(value)
except:
pass
else:
self.object = value
def IsExpandable(self):
return not not dir(self.object)
def GetSubList(self):
keys = dir(self.object)
sublist = []
for key in keys:
try:
value = getattr(self.object, key)
except AttributeError:
continue
item = make_objecttreeitem(
str(key) + " =",
value,
lambda value, key=key, object=self.object:
setattr(object, key, value))
sublist.append(item)
return sublist
class ClassTreeItem(ObjectTreeItem):
def IsExpandable(self):
return True
def GetSubList(self):
sublist = ObjectTreeItem.GetSubList(self)
if len(self.object.__bases__) == 1:
item = make_objecttreeitem("__bases__[0] =",
self.object.__bases__[0])
else:
item = make_objecttreeitem("__bases__ =", self.object.__bases__)
sublist.insert(0, item)
return sublist
class AtomicObjectTreeItem(ObjectTreeItem):
def IsExpandable(self):
return 0
class SequenceTreeItem(ObjectTreeItem):
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return range(len(self.object))
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem("%r:" % (key,), value, setfunction)
sublist.append(item)
return sublist
class DictTreeItem(SequenceTreeItem):
def keys(self):
keys = list(self.object.keys())
try:
keys.sort()
except:
pass
return keys
dispatch = {
int: AtomicObjectTreeItem,
float: AtomicObjectTreeItem,
str: AtomicObjectTreeItem,
tuple: SequenceTreeItem,
list: SequenceTreeItem,
dict: DictTreeItem,
type: ClassTreeItem,
}
def make_objecttreeitem(labeltext, object, setfunction=None):
t = type(object)
if t in dispatch:
c = dispatch[t]
else:
c = ObjectTreeItem
return c(labeltext, object, setfunction)
# Test script
def _test():
import sys
from tkinter import Tk
root = Tk()
root.configure(bd=0, bg="yellow")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = make_objecttreeitem("sys", sys)
node = TreeNode(sc.canvas, None, item)
node.update()
root.mainloop()
if __name__ == '__main__':
_test()
|
bsd-2-clause
|
her0e1c1/pystock
|
crawler/crawler/spiders/yahoo_japan.py
|
1
|
2762
|
import re
import urllib
import datetime
import scrapy
import scrapy_splash
from dateutil import relativedelta
import stock
URL = ("http://info.finance.yahoo.co.jp/history/?code={code}.T&"
"sy={sy}&sm={sm}&sd={sd}&ey={ey}&em={em}&ed={ed}&tm=d")
REG_SPLIT_STOCK_DATE = re.compile(r"分割\W+(?P<from_number>\d+)株.*?(?P<to_number>\d+)株")
REG_DATE = re.compile(r"(?P<year>\d{4})年(?P<month>\d{1,2})月(?P<day>\d{1,2})日")
def parse_date(text):
match = REG_DATE.match(text)
if match:
converted = {k: int(v) for k, v in match.groupdict().items()}
return datetime.date(**converted)
class YahooJapanSpider(scrapy.Spider):
"""
Command line:
$ scrapy crawl yahoo_japan -a code=CODE -a start=YYYY/MM/DD -a end=YYYY/MM/DD
"""
name = "yahoo_japan"
allowed_domains = ['info.finance.yahoo.co.jp']
def __init__(self, **kwargs):
end = kwargs.pop("end", None)
end = stock.util.str2date(end, datetime.date.today())
start = kwargs.pop("start", None)
start = stock.util.str2date(start, end - relativedelta.relativedelta(month=1))
code = kwargs.pop("code", None)
self.params = {
"end": end,
"start": start,
"codes": [code] if code else []
}
super().__init__(**kwargs)
def start_requests(self):
for code in self.params["codes"]:
end = self.params["end"]
sta = self.params["start"]
url = URL.format(
code=code,
ey=end.year,
em=end.month,
ed=end.day,
sy=sta.year,
sm=sta.month,
sd=sta.day,
)
yield scrapy_splash.SplashRequest(url=url, callback=self.parse)
# yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
header = ["date", "open", "high", "low", "close", "volume", "adjust"]
x1 = "//tr[th[contains(text(), '日付')] and th[contains(text(), '安値')]]/following-sibling::tr"
x2 = "./td/text()"
x3 = "//a[text() = '次へ']/@href"
for tr in response.xpath(x1):
data = [t.get() for t in tr.xpath(x2)]
result = dict(zip(header, data))
result.pop("adjust")
result = {k: v.replace(",", "") for k, v in result.items()}
result["date"] = parse_date(result["date"])
query = urllib.parse.urlparse(response.url).query
code = urllib.parse.parse_qs(query).get("code", [""])[0][:-2]
result["quandl_code"] = "TSE/%s" % code
yield result
href = response.xpath(x3)
if href:
yield response.follow(href.get(), self.parse)
|
gpl-3.0
|
scotthartbti/android_external_chromium_org
|
build/config/win/get_msvc_config.py
|
53
|
9188
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file returns the MSVC config used by the Windows build.
# It's a bit hardcoded right now. I suspect we want to build this functionality
# into GN itself in the future.
import sys
# This script expects one parameter: the path to the root output directory.
# TODO(brettw): do escaping.
def FormatStringForGN(x):
return '"' + x + '"'
def PrintListOfStrings(x):
print '['
for i in x:
print FormatStringForGN(i) + ', '
print ']'
# GN wants system-absolutepaths to begin in slashes.
sdk_root = '/C:\\Program Files (x86)\\Windows Kits\\8.0\\'
vs_root = '/C:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\'
def GetIncludes():
return [
sdk_root + 'Include\\shared',
sdk_root + 'Include\\um',
sdk_root + 'Include\\winrt',
vs_root + 'VC\\atlmfc\\include'
]
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def WriteEnvFile(file_path, values):
f = open(file_path, "w")
f.write(_FormatAsEnvironmentBlock(values))
includes = GetIncludes()
# Write the environment files.
WriteEnvFile(sys.argv[1] + '\\environment.x86',
{ 'TMP': 'C:\\Users\\brettw\\AppData\\Local\\Temp',
'SYSTEMROOT': 'C:\\Windows',
'TEMP': 'C:\\Users\\brettw\\AppData\\Local\\Temp',
'LIB': 'c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\LIB;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\LIB;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\lib;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\LIB;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\LIB;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\lib;',
'LIBPATH': 'C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework\\v3.5;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\LIB;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\LIB;C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework\\v3.5;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\LIB;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\LIB;',
'PATH': 'C:\\apps\\depot_tools\\python_bin;c:\\Program Files (x86)\\Microsoft F#\\v4.0\\;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VSTSDB\\Deploy;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\IDE\\;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\BIN;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\Tools;C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework\\v3.5;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\VCPackages;C:\\Program Files (x86)\\HTML Help Workshop;C:\\Program Files (x86)\\HTML Help Workshop;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin\\NETFX 4.0 Tools;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin;C:\\apps\\depot_tools\\python_bin;C:\\apps\\depot_tools\\;C:\\apps\\depot_tools\\;C:\\apps\\depot_tools\\;c:\\Program Files (x86)\\Microsoft F#\\v4.0\\;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VSTSDB\\Deploy;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\IDE\\;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\BIN;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\Tools;C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework\\v3.5;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\VCPackages;C:\\Program Files (x86)\\HTML Help Workshop;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin\\NETFX 4.0 Tools;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\System32\\Wbem;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\;C:\\windows\\corpam;C:\\python_26_amd64\\files;C:\\Windows\\ccmsetup;c:\\Program Files (x86)\\Microsoft SQL Server\\100\\Tools\\Binn\\;c:\\Program Files\\Microsoft SQL Server\\100\\Tools\\Binn\\;c:\\Program Files\\Microsoft SQL Server\\100\\DTS\\Binn\\;c:\\cygwin\\bin;C:\\apps\\;C:\\apps\\depot_tools;C:\\Program Files (x86)\\Windows Kits\\8.0\\Windows Performance Toolkit\\;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\;C:\\Program Files (x86)\\Google\\Cert Installer;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\;C:\\Program Files (x86)\\Google\\google_appengine\\',
'PATHEXT': '=.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC',
'INCLUDE': 'c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\INCLUDE;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\INCLUDE;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\include;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\INCLUDE;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\INCLUDE;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\include;'})
WriteEnvFile(sys.argv[1] + '\\environment.x64',
{ 'TMP': 'C:\\Users\\brettw\\AppData\\Local\\Temp',
'SYSTEMROOT': 'C:\\Windows',
'TEMP': 'C:\\Users\\brettw\\AppData\\Local\\Temp',
'LIB': 'c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\LIB\\amd64;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\LIB\\amd64;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\lib\\x64;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\LIB;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\LIB;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\lib;',
'LIBPATH': 'C:\\Windows\\Microsoft.NET\\Framework64\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework64\\v3.5;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\LIB\\amd64;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\LIB\\amd64;C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework\\v3.5;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\LIB;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\LIB;',
'PATH': 'C:\\apps\\depot_tools\\python_bin;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\BIN\\amd64;C:\\Windows\\Microsoft.NET\\Framework64\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework64\\v3.5;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\VCPackages;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\IDE;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\Tools;C:\\Program Files (x86)\\HTML Help Workshop;C:\\Program Files (x86)\\HTML Help Workshop;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin\\NETFX 4.0 Tools\\x64;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin\\x64;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin;C:\\apps\\depot_tools\\python_bin;C:\\apps\\depot_tools\\;C:\\apps\\depot_tools\\;C:\\apps\\depot_tools\\;c:\\Program Files (x86)\\Microsoft F#\\v4.0\\;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VSTSDB\\Deploy;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\IDE\\;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\BIN;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\Common7\\Tools;C:\\Windows\\Microsoft.NET\\Framework\\v4.0.30319;C:\\Windows\\Microsoft.NET\\Framework\\v3.5;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\VCPackages;C:\\Program Files (x86)\\HTML Help Workshop;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin\\NETFX 4.0 Tools;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\bin;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\System32\\Wbem;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\;C:\\windows\\corpam;C:\\python_26_amd64\\files;C:\\Windows\\ccmsetup;c:\\Program Files (x86)\\Microsoft SQL Server\\100\\Tools\\Binn\\;c:\\Program Files\\Microsoft SQL Server\\100\\Tools\\Binn\\;c:\\Program Files\\Microsoft SQL Server\\100\\DTS\\Binn\\;c:\\cygwin\\bin;C:\\apps\\;C:\\apps\\depot_tools;C:\\Program Files (x86)\\Windows Kits\\8.0\\Windows Performance Toolkit\\;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\;C:\\Program Files (x86)\\Google\\Cert Installer;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\;C:\\Program Files (x86)\\Google\\google_appengine\\',
'PATHEXT': '.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC',
'INCLUDE': 'c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\INCLUDE;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\INCLUDE;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\include;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\INCLUDE;c:\\Program Files (x86)\\Microsoft Visual Studio 10.0\\VC\\ATLMFC\\INCLUDE;C:\\Program Files (x86)\\Microsoft SDKs\\Windows\\v7.0A\\include;'})
# Return the includes and such.
print '['
PrintListOfStrings(includes)
print ']'
|
bsd-3-clause
|
jeremiahmarks/sl4a
|
python/src/Lib/bsddb/dbobj.py
|
39
|
11719
|
#-------------------------------------------------------------------------
# This file contains real Python object wrappers for DB and DBEnv
# C "objects" that can be usefully subclassed. The previous SWIG
# based interface allowed this thanks to SWIG's shadow classes.
# -- Gregory P. Smith
#-------------------------------------------------------------------------
#
# (C) Copyright 2001 Autonomous Zone Industries
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
#
# TODO it would be *really nice* to have an automatic shadow class populator
# so that new methods don't need to be added here manually after being
# added to _bsddb.c.
#
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
import db
if sys.version_info[0:2] <= (2, 5) :
try:
from UserDict import DictMixin
except ImportError:
# DictMixin is new in Python 2.3
class DictMixin: pass
MutableMapping = DictMixin
else :
import collections
MutableMapping = collections.MutableMapping
class DBEnv:
def __init__(self, *args, **kwargs):
self._cobj = apply(db.DBEnv, args, kwargs)
def close(self, *args, **kwargs):
return apply(self._cobj.close, args, kwargs)
def open(self, *args, **kwargs):
return apply(self._cobj.open, args, kwargs)
def remove(self, *args, **kwargs):
return apply(self._cobj.remove, args, kwargs)
def set_shm_key(self, *args, **kwargs):
return apply(self._cobj.set_shm_key, args, kwargs)
def set_cachesize(self, *args, **kwargs):
return apply(self._cobj.set_cachesize, args, kwargs)
def set_data_dir(self, *args, **kwargs):
return apply(self._cobj.set_data_dir, args, kwargs)
def set_flags(self, *args, **kwargs):
return apply(self._cobj.set_flags, args, kwargs)
def set_lg_bsize(self, *args, **kwargs):
return apply(self._cobj.set_lg_bsize, args, kwargs)
def set_lg_dir(self, *args, **kwargs):
return apply(self._cobj.set_lg_dir, args, kwargs)
def set_lg_max(self, *args, **kwargs):
return apply(self._cobj.set_lg_max, args, kwargs)
def set_lk_detect(self, *args, **kwargs):
return apply(self._cobj.set_lk_detect, args, kwargs)
if db.version() < (4,5):
def set_lk_max(self, *args, **kwargs):
return apply(self._cobj.set_lk_max, args, kwargs)
def set_lk_max_locks(self, *args, **kwargs):
return apply(self._cobj.set_lk_max_locks, args, kwargs)
def set_lk_max_lockers(self, *args, **kwargs):
return apply(self._cobj.set_lk_max_lockers, args, kwargs)
def set_lk_max_objects(self, *args, **kwargs):
return apply(self._cobj.set_lk_max_objects, args, kwargs)
def set_mp_mmapsize(self, *args, **kwargs):
return apply(self._cobj.set_mp_mmapsize, args, kwargs)
def set_timeout(self, *args, **kwargs):
return apply(self._cobj.set_timeout, args, kwargs)
def set_tmp_dir(self, *args, **kwargs):
return apply(self._cobj.set_tmp_dir, args, kwargs)
def txn_begin(self, *args, **kwargs):
return apply(self._cobj.txn_begin, args, kwargs)
def txn_checkpoint(self, *args, **kwargs):
return apply(self._cobj.txn_checkpoint, args, kwargs)
def txn_stat(self, *args, **kwargs):
return apply(self._cobj.txn_stat, args, kwargs)
def set_tx_max(self, *args, **kwargs):
return apply(self._cobj.set_tx_max, args, kwargs)
def set_tx_timestamp(self, *args, **kwargs):
return apply(self._cobj.set_tx_timestamp, args, kwargs)
def lock_detect(self, *args, **kwargs):
return apply(self._cobj.lock_detect, args, kwargs)
def lock_get(self, *args, **kwargs):
return apply(self._cobj.lock_get, args, kwargs)
def lock_id(self, *args, **kwargs):
return apply(self._cobj.lock_id, args, kwargs)
def lock_put(self, *args, **kwargs):
return apply(self._cobj.lock_put, args, kwargs)
def lock_stat(self, *args, **kwargs):
return apply(self._cobj.lock_stat, args, kwargs)
def log_archive(self, *args, **kwargs):
return apply(self._cobj.log_archive, args, kwargs)
def set_get_returns_none(self, *args, **kwargs):
return apply(self._cobj.set_get_returns_none, args, kwargs)
def log_stat(self, *args, **kwargs):
return apply(self._cobj.log_stat, args, kwargs)
if db.version() >= (4,1):
def dbremove(self, *args, **kwargs):
return apply(self._cobj.dbremove, args, kwargs)
def dbrename(self, *args, **kwargs):
return apply(self._cobj.dbrename, args, kwargs)
def set_encrypt(self, *args, **kwargs):
return apply(self._cobj.set_encrypt, args, kwargs)
if db.version() >= (4,4):
def lsn_reset(self, *args, **kwargs):
return apply(self._cobj.lsn_reset, args, kwargs)
class DB(MutableMapping):
def __init__(self, dbenv, *args, **kwargs):
# give it the proper DBEnv C object that its expecting
self._cobj = apply(db.DB, (dbenv._cobj,) + args, kwargs)
# TODO are there other dict methods that need to be overridden?
def __len__(self):
return len(self._cobj)
def __getitem__(self, arg):
return self._cobj[arg]
def __setitem__(self, key, value):
self._cobj[key] = value
def __delitem__(self, arg):
del self._cobj[arg]
if sys.version_info[0:2] >= (2, 6) :
def __iter__(self) :
return self._cobj.__iter__()
def append(self, *args, **kwargs):
return apply(self._cobj.append, args, kwargs)
def associate(self, *args, **kwargs):
return apply(self._cobj.associate, args, kwargs)
def close(self, *args, **kwargs):
return apply(self._cobj.close, args, kwargs)
def consume(self, *args, **kwargs):
return apply(self._cobj.consume, args, kwargs)
def consume_wait(self, *args, **kwargs):
return apply(self._cobj.consume_wait, args, kwargs)
def cursor(self, *args, **kwargs):
return apply(self._cobj.cursor, args, kwargs)
def delete(self, *args, **kwargs):
return apply(self._cobj.delete, args, kwargs)
def fd(self, *args, **kwargs):
return apply(self._cobj.fd, args, kwargs)
def get(self, *args, **kwargs):
return apply(self._cobj.get, args, kwargs)
def pget(self, *args, **kwargs):
return apply(self._cobj.pget, args, kwargs)
def get_both(self, *args, **kwargs):
return apply(self._cobj.get_both, args, kwargs)
def get_byteswapped(self, *args, **kwargs):
return apply(self._cobj.get_byteswapped, args, kwargs)
def get_size(self, *args, **kwargs):
return apply(self._cobj.get_size, args, kwargs)
def get_type(self, *args, **kwargs):
return apply(self._cobj.get_type, args, kwargs)
def join(self, *args, **kwargs):
return apply(self._cobj.join, args, kwargs)
def key_range(self, *args, **kwargs):
return apply(self._cobj.key_range, args, kwargs)
def has_key(self, *args, **kwargs):
return apply(self._cobj.has_key, args, kwargs)
def items(self, *args, **kwargs):
return apply(self._cobj.items, args, kwargs)
def keys(self, *args, **kwargs):
return apply(self._cobj.keys, args, kwargs)
def open(self, *args, **kwargs):
return apply(self._cobj.open, args, kwargs)
def put(self, *args, **kwargs):
return apply(self._cobj.put, args, kwargs)
def remove(self, *args, **kwargs):
return apply(self._cobj.remove, args, kwargs)
def rename(self, *args, **kwargs):
return apply(self._cobj.rename, args, kwargs)
def set_bt_minkey(self, *args, **kwargs):
return apply(self._cobj.set_bt_minkey, args, kwargs)
def set_bt_compare(self, *args, **kwargs):
return apply(self._cobj.set_bt_compare, args, kwargs)
def set_cachesize(self, *args, **kwargs):
return apply(self._cobj.set_cachesize, args, kwargs)
def set_flags(self, *args, **kwargs):
return apply(self._cobj.set_flags, args, kwargs)
def set_h_ffactor(self, *args, **kwargs):
return apply(self._cobj.set_h_ffactor, args, kwargs)
def set_h_nelem(self, *args, **kwargs):
return apply(self._cobj.set_h_nelem, args, kwargs)
def set_lorder(self, *args, **kwargs):
return apply(self._cobj.set_lorder, args, kwargs)
def set_pagesize(self, *args, **kwargs):
return apply(self._cobj.set_pagesize, args, kwargs)
def set_re_delim(self, *args, **kwargs):
return apply(self._cobj.set_re_delim, args, kwargs)
def set_re_len(self, *args, **kwargs):
return apply(self._cobj.set_re_len, args, kwargs)
def set_re_pad(self, *args, **kwargs):
return apply(self._cobj.set_re_pad, args, kwargs)
def set_re_source(self, *args, **kwargs):
return apply(self._cobj.set_re_source, args, kwargs)
def set_q_extentsize(self, *args, **kwargs):
return apply(self._cobj.set_q_extentsize, args, kwargs)
def stat(self, *args, **kwargs):
return apply(self._cobj.stat, args, kwargs)
def sync(self, *args, **kwargs):
return apply(self._cobj.sync, args, kwargs)
def type(self, *args, **kwargs):
return apply(self._cobj.type, args, kwargs)
def upgrade(self, *args, **kwargs):
return apply(self._cobj.upgrade, args, kwargs)
def values(self, *args, **kwargs):
return apply(self._cobj.values, args, kwargs)
def verify(self, *args, **kwargs):
return apply(self._cobj.verify, args, kwargs)
def set_get_returns_none(self, *args, **kwargs):
return apply(self._cobj.set_get_returns_none, args, kwargs)
if db.version() >= (4,1):
def set_encrypt(self, *args, **kwargs):
return apply(self._cobj.set_encrypt, args, kwargs)
class DBSequence:
def __init__(self, *args, **kwargs):
self._cobj = apply(db.DBSequence, args, kwargs)
def close(self, *args, **kwargs):
return apply(self._cobj.close, args, kwargs)
def get(self, *args, **kwargs):
return apply(self._cobj.get, args, kwargs)
def get_dbp(self, *args, **kwargs):
return apply(self._cobj.get_dbp, args, kwargs)
def get_key(self, *args, **kwargs):
return apply(self._cobj.get_key, args, kwargs)
def init_value(self, *args, **kwargs):
return apply(self._cobj.init_value, args, kwargs)
def open(self, *args, **kwargs):
return apply(self._cobj.open, args, kwargs)
def remove(self, *args, **kwargs):
return apply(self._cobj.remove, args, kwargs)
def stat(self, *args, **kwargs):
return apply(self._cobj.stat, args, kwargs)
def set_cachesize(self, *args, **kwargs):
return apply(self._cobj.set_cachesize, args, kwargs)
def set_flags(self, *args, **kwargs):
return apply(self._cobj.set_flags, args, kwargs)
def set_range(self, *args, **kwargs):
return apply(self._cobj.set_range, args, kwargs)
def get_cachesize(self, *args, **kwargs):
return apply(self._cobj.get_cachesize, args, kwargs)
def get_flags(self, *args, **kwargs):
return apply(self._cobj.get_flags, args, kwargs)
def get_range(self, *args, **kwargs):
return apply(self._cobj.get_range, args, kwargs)
|
apache-2.0
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.3-py2.5.egg/sqlalchemy/sql/functions.py
|
1
|
2720
|
from sqlalchemy import types as sqltypes
from sqlalchemy.sql.expression import _Function, _literal_as_binds, ClauseList, _FigureVisitName
from sqlalchemy.sql import operators
class _GenericMeta(_FigureVisitName):
def __init__(cls, clsname, bases, dict):
cls.__visit_name__ = 'function'
type.__init__(cls, clsname, bases, dict)
def __call__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
return type.__call__(self, *args, **kwargs)
class GenericFunction(_Function):
__metaclass__ = _GenericMeta
def __init__(self, type_=None, group=True, args=(), **kwargs):
self.packagenames = []
self.oid_column = None
self.name = self.__class__.__name__
self._bind = kwargs.get('bind', None)
if group:
self.clause_expr = ClauseList(operator=operators.comma_op, group_contents=True, *args).self_group()
else:
self.clause_expr = ClauseList(operator=operators.comma_op, group_contents=True, *args)
self.type = sqltypes.to_instance(type_ or getattr(self, '__return_type__', None))
class AnsiFunction(GenericFunction):
def __init__(self, **kwargs):
GenericFunction.__init__(self, **kwargs)
class coalesce(GenericFunction):
def __init__(self, *args, **kwargs):
kwargs.setdefault('type_', _type_from_args(args))
GenericFunction.__init__(self, args=args, **kwargs)
class now(GenericFunction):
__return_type__ = sqltypes.DateTime
class concat(GenericFunction):
__return_type__ = sqltypes.String
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, args=args, **kwargs)
class char_length(GenericFunction):
__return_type__ = sqltypes.Integer
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, args=[arg], **kwargs)
class current_date(AnsiFunction):
__return_type__ = sqltypes.Date
class current_time(AnsiFunction):
__return_type__ = sqltypes.Time
class current_timestamp(AnsiFunction):
__return_type__ = sqltypes.DateTime
class current_user(AnsiFunction):
__return_type__ = sqltypes.String
class localtime(AnsiFunction):
__return_type__ = sqltypes.DateTime
class localtimestamp(AnsiFunction):
__return_type__ = sqltypes.DateTime
class session_user(AnsiFunction):
__return_type__ = sqltypes.String
class sysdate(AnsiFunction):
__return_type__ = sqltypes.DateTime
class user(AnsiFunction):
__return_type__ = sqltypes.String
def _type_from_args(args):
for a in args:
if not isinstance(a.type, sqltypes.NullType):
return a.type
else:
return sqltypes.NullType
|
bsd-3-clause
|
sarahdunbar/Conway-Life
|
conway.py
|
1
|
28959
|
"""
conway.py
Author: <your name here>
Credit: <list sources used, if any>
Assignment:
Write and submit a program that plays Conway's Game of Life, per
https://github.com/HHS-IntroProgramming/Conway-Life
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
class Cubey(Sprite):
def __init__(self, position):
myapp = App()
myapp.run()
"""
def Init():
name = input ("Please select a name: ")
age = input (name + ", how old are you? ")
age = int (age)
g = 0
while g == 0:
if age < 6:
age = input ("Even prodigies can't be that young. Please select again: ")
age = int (age)
elif age > 13 and age < 19:
age = input ("You're a child, not a teenager. Please select again: ")
age = int (age)
elif age >= 19:
age = input ("You're a child, not an adult. Please select again: ")
age = int (age)
else:
print ("That will do. Now, time to become yourself.")
g = 1
print (" ")
print ("You will have 30 attribute points to spend on six categories: ")
namestat = ["Bravery", "Tenacity", "Wisdom", "Cleverness", "Luck", "Manipulation"]
for i in namestat:
print (i)
print (" ")
num = 30
list1 = [0, 0, 0, 0, 0, 0]
list2 = ["Bravery is the ability to act in a situation. ", "Tenacity is the ability to withstand pain. ", "Wisdom is the ability to acquire and recall knowledge, as well as the knowledge of when to use it. ", "Cleverness is the ability to solve problems quickly. ", "Luck is a natural gift not quite described. ", "Manipulation is the ability to change minds in ways less honorable. "]
for x in range (0, 6):
desc = list2[x]
print (desc)
namre = namestat[x]
e = 0
while e == 0:
g = 0
while g == 0:
ent = input (namre + ", out of ten. ")
ent = int(ent)
if ent < 0 or ent > 10:
print ("Don't be a novelty. Between zero and ten, please. ")
else:
g = 1
print (namre + " total: " + str(ent))
num = num - ent
if num < 0:
print ("Oh dear, you don't have enough attribute points for that! Please select again! ")
print (" ")
num = num + ent
else:
e = 1
print ("Points left: " + str(num))
list1[x] = ent
print (" ")
hero = [0, 1]
intel = [3, 5]
spir = [2, 4]
att = ["Heroicness", "Intelligence", "Spirit"]
nums2 = [hero, intel, spir]
names = [0, 0, 0]
values = [0, 0, 0]
print (" ")
for x in range (0, 3):
attr = att[x]
varlist = nums2[x]
major = varlist[0]
minor = varlist[1]
majname = namestat[major]
minname = namestat[minor]
lvl = list1[major] + list1[minor]
print (attr + ": " + majname + " plus " + minname + ".")
print (attr + ": " + str(lvl))
values[x] = lvl
namer = input("Type anything to continue. ")
names[x] = namer
print (" ")
valuez = values[:]
val2 = valuez.sort()
return values
def Transition():
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
def Desc(room, ede, ba, ri7):
if room == 1:
print ("Courtyard Proper")
print ("You are standing outside a spindly northern tower with a stained glass door. Around you, "
+ "an endless sea of grass ripples in the wind. It is eerily green, almost neon in its brightness. " +
"Aside from overgrown paths east and west through the tresses, there is nothing for miles. " +
"Beside the door is a sign -I OPEN WITH A KEY-")
print ("Directions: n, e, w")
if room == 2:
print ("Sasha's Courtyard")
print ("You are standing in a flattened patch of grass known as Sasha's Courtyard. " +
"Around you, the green grass extends almost to your head. A path winds east through the jade ocean towards " +
"what looks like a tower.")
print ("Directions: e")
if room == 3:
if ba == 0:
print ("Small Ditch")
print ("The path east leading to the small ditch you are standing in is overgrown with moss. There is a hole " +
"in the northern cave wall. Even in here, the grass is eerily green. ")
print ("Directions: w, n")
if ba == 1:
print ("Crystal Valley")
print ("The path east leading to the valley you are standing in is clean and well swept. " +
"As you go deeper, the walls get progressively shinier, until it's positively glowing blue. There is a gilded hole in the " +
"northern wall. ")
print ("Directions: w, n")
if room == 4:
print ("Crystal Cave")
print ("The passage takes you into a small cave adorned with rainbow crystals. Somehow, you think you "
+ "have seen this place before... To the east, an oaken door. Beside it, a sign. -I OPEN WITH A KEY-")
print ("Directions: s, e")
if room == 5:
print ("Mineshaft")
print ("As you walk along the rusted tracks, you get the increasing feeling that you're walking in a circle. "
+ "But then, a light ahead!")
print ("Directions: w, e")
if room == 6:
print ("Atrium")
print ("You are standing in the dark castle room known as the atrium. The only light comes from the stained " +
"glass window of the southern door. Stairways wind up and down into blackness. ")
print ("Directions: u, d, s")
if room == 7:
print ("Billiard Room")
if ri7[0] == 0:
print ("What was once the billiard room is now a misty, impenetrable grey. The only piece of furniture is a withered old pool table in the corner. Stairs wind down the stone walls into blackness. ")
print ("But as you stand here, you here distinct whispering, almost as if something has been here before. You turn " +
"to look, but there is nothing behind you. How odd. ")
if ri7[0] == 1:
print ("What once was a billiard room is now grey and filled with light. The only piece of furniture is a table, upon which rests a single stone." +
" When you turn away, the stone almost appears to be moving. How odd. ")
print ("Directions: d")
if room == 8:
print ("Basement")
print ("In here, it is so dark that you can't " +
"see your feet in front of you. Even though there are no windows, this room is distinctly drafty. ")
print ("Directions: u, ?")
if room == 9:
print ("Southern Corridor")
print ("The floor is "
+ "distinctly earthy, and you fear that the loose dirt ceiling will collapse on your head at any minute.")
print ("Directions: n, s")
if room == 10:
print ("Erdgeschoss Grounds")
print ("Stumbling out of the passage, the first thing that you see is the brilliant light. " +
"A crystalline lake stretches before you, the mountains reflected in pristine detail in the water. The "
"grassy tower seems a million miles away...")
def MoveProc(move):
j = 0
while j == 0:
if move == "n":
dire = 0
j = 1
elif move == "e":
dire = 1
j = 1
elif move == "s":
dire = 2
j = 1
elif move == "w":
dire = 3
j = 1
elif move == "u":
dire = 4
j = 1
elif move == "d":
dire = 5
j = 1
else:
print ("Incorrect format")
move = input (": ")
return dire
def Movement(ri7, dor, values, room, dire, turncounter):
r1 = [6, 3, 0, 2, 0, 0]
r2 = [0, 1, 0, 0, 0, 0]
r3 = [4, 0, 0, 1, 0, 0]
r4 = [0, 5, 3, 0, 0, 0]
r5 = [0, 9, 0, 4, 0, 0]
r6 = [0, 0, 1, 0, 7, 8]
r7 = [0, 0, 0, 0, 0, 6]
r8 = [0, 0, 9, 0, 6, 0]
r9 = [8, 0, 10, 0, 0, 0]
r10 = [0, 0, 0, 0, 0, 0]
rlist = [r1, r2, r3, r4, r5, r6, r7, r8, r9, r10]
rnum = room - 1
roomlist = rlist[rnum]
pos = roomlist[dire]
if pos == 0:
print ("You cannot go in that direction! ")
turncounter = turncounter - 1
return room, pos, turncounter
else:
if room == 1:
if pos == 6:
doc = dor[0]
if doc == 0:
print ("The door is locked! ")
turncounter = turncounter - 1
return room, pos, turncounter
else:
p = 1
else:
p = 1
if room == 4:
if pos == 5:
doc = dor[1]
if doc == 0:
print ("The door is locked! ")
turncounter = turncounter - 1
return room, pos, turncounter
else:
p = 1
else:
p = 1
if room == 9:
if pos == 10:
bleach = ri7[0]
if bleach == 0:
print ("Suddenly, everything goes ")
print ("Quiet. ")
print (" ")
print ("A lone voice through the shadows - 'Please, please can you give us something?'")
print ("'Something to do in our lonely isolation?'")
print ("'We do love ever so much to play billiards...' ")
print ("'Well, not as much as we wish we did, but we are with few options now that we have been trapped.'")
print ("'But perhaps you could give us something to play with?'")
print ("The voice deepens - 'Otherwise, we cannot let you pass.'")
print ("You feel tiny hands pushing you back, back into the corridor. Whatever was beyond, is for now out of reach. ")
turncounter = turncounter - 1
print (" ")
return room, pos, turncounter
else:
print (" ")
print ("The air gives way before you, and you step into the light. ")
print (" ")
room = pos
return room, pos, turncounter
room = pos
return room, pos, turncounter
def inventory(lizt, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10):
z = 0
d = [0, 0, 0]
ret = [rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10]
rnum = room
rlist = ret[rnum]
length = len(rlist)
wut = [""]*length
for i in range (0, length):
num = rlist[i]
if num == 0:
g = 3
if num == 1:
z = 1
smi = lizt[i]
d[i] = smi
if z == 1:
print (" ")
for i in range(0, length):
smi = d[i]
if smi == 0:
g = 3
else:
print ("There is a " + smi + " here. ")
print (" ")
return ret, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
else:
print (" ")
return ret, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
def selfinv(lizt, rim):
rib = len(rim)
for i in range (0, rib):
hal = rim[i]
if hal == 0:
g = 4
if hal == 1:
namee = lizt[i]
print ("You are holding a " + namee + ". ")
print (" ")
return rim
def dropfunc(turncounter, word, jj, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10):
ret = [rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10]
rlizzle = ret[room]
ent2 = movescript[1]
if ent2 == "stone" or ent2 == "envelope" or ent2 == "key":
g = 3
elif ent2 == "small" or ent2 == "gilded" or ent2 == "wooden":
ent3 = movescript[2]
if ent3 == "stone" or ent3 == "envelope" or ent3 == "key":
g = 3
else:
print ("Object to " + word + " not understood. ")
turncounter = turncounter - 1
print (" ")
return turncounter, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
else:
print ("Object to " + word + " not understood. ")
print (" ")
turncounter = turncounter - 1
return turncounter, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
if g == 3:
if ent2 == "small" or ent2 == "stone":
obj = 0
if jj == 1:
check = rim[0]
if jj == 2:
check = rlizzle[0]
if ent2 == "gilded" or ent2 == "envelope":
obj = 1
if jj == 1:
check = rim[1]
if jj == 2:
check = rlizzle[1]
if ent2 == "wooden" or ent2 == "key":
obj = 2
if jj == 1:
check = rim[2]
if jj == 2:
check = rlizzle[2]
if check == 0:
if jj == 1:
print ("You are not holding that object! ")
if jj == 2:
print ("That object is not in this room! ")
print (" ")
turncounter = turncounter - 1 0
return turncounter, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
if check == 1:
namer = lizt[obj]
if jj == 1:
rlizzle[obj] = 1
rim[obj] = 0
print ("You are no longer holding a " + namer + ".")
if room == 7 and obj == 0:
print ("As you place the stone on the table, the room suddenly feels lighter. Sunlight streams through the windows, illuminating the walls, the floors. To your left, you hear what sounds like a child's voice.")
print ("'Thank you.'")
if jj == 2:
rim[obj] = 1
rlizzle[obj] = 0
print ("You have picked up the " + namer + ".")
if room == 7 and obj == 0:
print ("The voices fade, and the room suddenly gets colder. The air feels denser somehow, as if you have disappointed it. ")
print ("Your hands suddenly feel wet, as if with ghostly tears. You try to wipe them on your cloak, but the feeling remains. ")
print (" ")
return turncounter, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
def openi (dor, bur, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10, ba):
ede = 0
ent2 = movescript[1]
t = True
if t == True:
if ent2 == "small" or ent2 == "stone":
g = 3
obj = 0
elif ent2 == "gilded" or ent2 == "envelope":
g = 3
obj = 1
elif ent2 == "wooden" or ent2 == "key":
g = 3
obj = 2
elif ent2 == "oak" or ent2 == "door":
g = 3
obj = 3
else:
print ("Object not understood. ")
turncounter = turncounter - 1
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
rlizzle = lizt[obj]
if obj == 0 or obj == 1 or obj == 2:
check = rim[obj]
if check == 0:
print ("You are not holding a " + rlizzle + "!")
turncounter = turncounter - 1
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
if obj == 0:
if bur == 0:
print ("You palm the small stone gingerly in your hands. It feels like a normal rock.")
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
if bur == 1:
print ("Following the instructions on the letter, you touch the top of the stone with your index finger and slide it open. A bright light! ")
print ("Words appear in the air in front of you... - 'Such a small, shallow ditch it once was...' ")
bur = 2
ede = 1
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
if bur == 2:
print ("Even though you touch the stone, it does not open anymore. ")
print ("Now, the only thing it's good for is as a billiard ball. ")
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
if obj == 1:
print ("You try your best, but the envelope refuses to open... You must rely on your special skills!")
her = values[0]
intl = values[1]
spir = values[2]
if intl > 9:
print ("You think for a moment, then slide your finger under the flap of the envelope and rip. Who would have thought opening an envelope would be so easy?")
t = 8
elif her > 9:
print ("This is a job for a hero! You throw the envelope on the ground and smash it until it opens. You feel proud.")
t = 8
elif spir > 9:
print ("The moon... the rivers... the trees... Magically, the letter slides out of the envelope. ")
t = 8
print (" ")
print ("As soon as you pull out the letter, it begins to disintigrate. Quickly, you scan it before it disappears in your hands. ")
print ("Touch your index finger to the top and open the thing you've been holding from the beginning. Do not disappoint me. ")
bur = 1
rim[1] = 0
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
if obj == 2:
print ("The wooden key refuses to open. You know you're allowed to open doors, right?")
turncounter = turncounter - 1
print (" ")
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
if obj == 3:
if room == 4 or room == 1:
key = rim[2]
if key == 1:
if room == 1:
nom = 0
if room == 4:
nom = 1
dor[nom] = 1
print ("You have unlocked the door!")
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
else:
print ("The door is locked! ")
turncounter = turncounter - 1
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
else:
print ("What door?")
print (" ")
turncounter = turncounter - 1
return dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10
amplaying = True
ba = 0
bur = 0
ede = 0
room = 1
turncounter = 0
ret = []
dor = [0, 0]
rim = [1, 0, 0]
ri1 = [0, 0, 0]
ri2 = [0, 1, 0]
ri3 = [0, 0, 0]
ri4 = [0, 0, 0]
ri5 = [0, 0, 0]
ri6 = [0, 0, 0]
ri7 = [0, 0, 0]
ri8 = [0, 0, 0]
ri9 = [0, 0, 0]
ri10 = [0, 0, 0]
values = Init()
t = Transition()
print ("You have been here before. ")
print ("You have 40 turns. ")
print ("Do not disappoint me. ")
print (" ")
print ("Controls: n - north, s - south, e - east, w - west, u - up, d - down, i - inventory, l - look, drop - drop object, grab - pick up object, open - open object")
print (" ")
lizt = ["small stone", "gilded envelope", "wooden key", "oak door"]
t = Desc(room, ede, ba, ri7)
while amplaying == True:
turncounterz = 40 - turncounter
print ("Turns Remaining: " + str(turncounterz))
print (" ")
move = input (": ")
print (" ")
print ("RESULT")
print (" ")
move = move.lower()
movescript = move.split(" ")
movescript.append(" ")
ent1 = movescript[0]
if ent1 == "n" or ent1 == "s" or ent1 == "e" or ent1 == "w" or ent1 == "u" or ent1 == "d":
dire = MoveProc(ent1)
room, pos, turncounter = Movement(ri7, dor, values, room, dire, turncounter)
if pos == 0:
j = 3
else:
t = Desc(room, ede, ba, ri7)
ret, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10 = inventory(lizt, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10)
turncounter = turncounter + 1
elif ent1 == "i":
rim = selfinv(lizt, rim)
elif ent1 == "l" or ent1 == "look":
t = Desc(room, ede, ba, ri7)
ret, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10 = inventory(lizt, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10)
elif ent1 == "drop" or ent1 == "grab":
if ent1 == "drop":
word = "drop"
jj = 1
if ent1 == "grab":
word = "grab"
jj = 2
ent2 = movescript[1]
if ent2 == " ":
print ("Please be more specific. What would you like to " + word + "?")
turncounter = turncounter - 1
print (" ")
else:
turncounter, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10 = dropfunc(turncounter, word, jj, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10)
turncounter = turncounter + 1
elif ent1 == "open":
ent2 = movescript[1]
if ent2 == " ":
print ("Please be more specific. What would you like to open?")
turncounter = turncounter - 1
print (" ")
else:
dor, bur, ba, ede, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10 = openi(dor, bur, turncounter, values, lizt, movescript, room, rim, ri1, ri2, ri3, ri4, ri5, ri6, ri7, ri8, ri9, ri10, ba)
turncounter = turncounter + 1
print (" ")
else:
print ("Invalid command. ")
print (" ")
if ede == 1:
ri3[2] = 1
ba = 1
ede = 0
if turncounter == 40:
print ("The easy part is looking. ")
k == input ("It's hard enough to find. ")
print (" ")
amplaying = False
if room == 10:
r = 6
turncountere = 40 - turncounter
print (" ")
while True:
run = input (": ")
if run == "n" or run == "s" or run == "e" or run == "w":
dd = "h"
break
elif run == "i" or run == "l":
dd = "i"
break
elif run == "drop" or run == "grab" or run == "open":
dd = "s"
break
else:
print ("Invalid command. ")
print (" ")
if dd == "h":
stat = turncountere + values[0]
num = 0
if dd == "i":
stat = turncountere + values[1]
num = 1
if dd == "s":
stat = turncountere + values[2]
num = 2
print (" ")
print ("Suddenly, the ground falls away beneath your feet and you're falling... falling... ")
t = 0
while t == 0:
if num == 0:
print ("Fear fogs your vision, and you can't see. You reach deep inside yourself to find some kind of courage... ")
if stat > 30:
k = input ("You conjure up the images of your triumphs, and feel brave once again. ")
p = Transition()
print ("A voice: 'It's time to wake up now.' ")
t = 1
else:
k = input ("... but it's all too much. You give in to the terror. ")
break
if num == 1:
print ("As you fall, you feel panic beginning to overwhelm your intellect. You struggle to focus...")
if stat > 30:
k = input ("You breathe deeply, and your head begins to clear. It's so obvious now...")
p = Transition()
print ("A voice: 'It's time to wake up now.' ")
t = 1
else:
k = input ("... but you just can't focus. Already, you feel yourself giving in. ")
break
if num == 2:
print ("But... but this isn't possible... You feel your faith begin to fade...")
if stat > 30:
k = input ("But something is causing this, isn't it? It must have a plan in the end. You close your eyes and hope for the best. ")
p = Transition()
("A voice: 'It's time to wake up now.' ")
t = 1
else:
k = input ("Yes, the spirits have given up on you for sure. You give in to the hopelessness. ")
break
if t == 0:
p = Transition()
print ("You have disappointed me. ")
while r == 0:
print (" ")
print ("The Void:")
print ("There is nothing here.")
print ("Directions: The only one left. ")
print (" ")
k = input (": ")
if k == "u":
break
amplaying = False
if t == 1:
print (" ")
print ("It seems as though you are in fact worthy. Before you wake, know that you have not disappointed me. ")
print ("You feel the padded chair beneath your feet, the keys under your fingers...")
print ("And now, it comes. ")
k = input ("Hit any key to wake up. ")
amplaying = False
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
print (" ")
|
mit
|
fillycheezstake/MissionPlanner
|
Lib/site-packages/scipy/special/basic.py
|
55
|
26575
|
#
# Author: Travis Oliphant, 2002
#
from numpy import pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt, \
where, mgrid, cos, sin, exp, place, seterr, issubdtype, extract, \
complexfloating, less, vectorize, inexact, nan, zeros, sometrue
from _cephes import ellipk, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta, \
hankel1, hankel2, yv, kv, gammaln, errprint, ndtri
import types
import specfun
import orthogonal
def sinc(x):
"""Returns sin(pi*x)/(pi*x) at all points of array x.
"""
w = pi * asarray(x)
# w might contain 0, and so temporarily turn off warnings
# while calculating sin(w)/w.
old_settings = seterr(all='ignore')
s = sin(w) / w
seterr(**old_settings)
return where(x==0, 1.0, s)
def diric(x,n):
"""Returns the periodic sinc function also called the dirichlet function:
diric(x) = sin(x *n / 2) / (n sin(x / 2))
where n is a positive integer.
"""
x,n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape,ytype)
mask1 = (n <= 0) | (n <> floor(n))
place(y,mask1,nan)
z = asarray(x / 2.0 / pi)
mask2 = (1-mask1) & (z == floor(z))
zsub = extract(mask2,z)
nsub = extract(mask2,n)
place(y,mask2,pow(-1,zsub*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask,x)
nsub = extract(mask,n)
place(y,mask,sin(nsub*xsub/2.0)/(nsub*sin(xsub/2.0)))
return y
def jnjnp_zeros(nt):
"""Compute nt (<=1200) zeros of the bessel functions Jn and Jn'
and arange them in order of their magnitudes.
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt>1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n,m,t,zo = specfun.jdzo(nt)
return zo[1:nt+1],n[:nt],m[:nt],t[:nt]
def jnyn_zeros(n,nt):
"""Compute nt zeros of the Bessel functions Jn(x), Jn'(x), Yn(x), and
Yn'(x), respectively. Returns 4 arrays of length nt.
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n)!=n) or (floor(nt)!=nt):
raise ValueError("Arguments must be integers.")
if (nt <=0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n),nt)
def jn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn(x).
"""
return jnyn_zeros(n,nt)[0]
def jnp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn'(x).
"""
return jnyn_zeros(n,nt)[1]
def yn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn(x).
"""
return jnyn_zeros(n,nt)[2]
def ynp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn'(x).
"""
return jnyn_zeros(n,nt)[3]
def y0_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y0(z), z0, and the value
of Y0'(z0) = -Y1(z0) at each zero.
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt <=0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1(z), z1, and the value
of Y1'(z1) = Y0(z1) at each zero.
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt <=0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1p_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1'(z), z1', and the value
of Y1(z1') at each zero.
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt <=0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
def jvp(v,z,n=1):
"""Return the nth derivative of Jv(z) with respect to z.
"""
if not isinstance(n,types.IntType) or (n<0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v,z)
else:
return bessel_diff_formula(v, z, n, jv, -1)
# return (jvp(v-1,z,n-1) - jvp(v+1,z,n-1))/2.0
def yvp(v,z,n=1):
"""Return the nth derivative of Yv(z) with respect to z.
"""
if not isinstance(n,types.IntType) or (n<0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v,z)
else:
return bessel_diff_formula(v, z, n, yv, -1)
# return (yvp(v-1,z,n-1) - yvp(v+1,z,n-1))/2.0
def kvp(v,z,n=1):
"""Return the nth derivative of Kv(z) with respect to z.
"""
if not isinstance(n,types.IntType) or (n<0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v,z)
else:
return (-1)**n * bessel_diff_formula(v, z, n, kv, 1)
def ivp(v,z,n=1):
"""Return the nth derivative of Iv(z) with respect to z.
"""
if not isinstance(n,types.IntType) or (n<0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v,z)
else:
return bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v,z,n=1):
"""Return the nth derivative of H1v(z) with respect to z.
"""
if not isinstance(n,types.IntType) or (n<0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v,z)
else:
return bessel_diff_formula(v, z, n, hankel1, -1)
# return (h1vp(v-1,z,n-1) - h1vp(v+1,z,n-1))/2.0
def h2vp(v,z,n=1):
"""Return the nth derivative of H2v(z) with respect to z.
"""
if not isinstance(n,types.IntType) or (n<0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v,z)
else:
return bessel_diff_formula(v, z, n, hankel2, -1)
# return (h2vp(v-1,z,n-1) - h2vp(v+1,z,n-1))/2.0
def sph_jn(n,z):
"""Compute the spherical Bessel function jn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n < 1): n1 = 1
else: n1 = n
if iscomplex(z):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n,z):
"""Compute the spherical Bessel function yn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n < 1): n1 = 1
else: n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n,z):
"""Compute the spherical Bessel functions, jn(z) and yn(z) and their
derivatives for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n < 1): n1 = 1
else: n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)],jnp[:(n+1)],yn[:(n+1)],ynp[:(n+1)]
def sph_in(n,z):
"""Compute the spherical Bessel function in(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n < 1): n1 = 1
else: n1 = n
if iscomplex(z):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,In,Inp = specfun.sphi(n1,z)
return In[:(n+1)], Inp[:(n+1)]
def sph_kn(n,z):
"""Compute the spherical Bessel function kn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n < 1): n1 = 1
else: n1 = n
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,kn,knp = specfun.sphk(n1,z)
return kn[:(n+1)], knp[:(n+1)]
def sph_inkn(n,z):
"""Compute the spherical Bessel functions, in(z) and kn(z) and their
derivatives for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n,z)
else:
nm,In,Inp = specfun.sphi(n,z)
nm,kn,knp = specfun.sphk(n,z)
return In,Inp,kn,knp
def riccati_jn(n,x):
"""Compute the Ricatti-Bessel function of the first kind and its
derivative for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n == 0): n1 = 1
else: n1 = n
nm,jn,jnp = specfun.rctj(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def riccati_yn(n,x):
"""Compute the Ricatti-Bessel function of the second kind and its
derivative for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n == 0): n1 = 1
else: n1 = n
nm,jn,jnp = specfun.rcty(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def _sph_harmonic(m,n,theta,phi):
"""Compute spherical harmonics.
This is a ufunc and may take scalar or array arguments like any
other ufunc. The inputs will be broadcasted against each other.
Parameters
----------
m : int
|m| <= n; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic $Y^m_n$ sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
"""
x = cos(phi)
m,n = int(m), int(n)
Pmn,Pmn_deriv = lpmn(m,n,x)
# Legendre call generates all orders up to m and degrees up to n
val = Pmn[-1, -1]
val *= sqrt((2*n+1)/4.0/pi)
val *= exp(0.5*(gammaln(n-m+1)-gammaln(n+m+1)))
val *= exp(1j*m*theta)
return val
sph_harm = vectorize(_sph_harmonic,'D')
def erfinv(y):
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
return ndtri((2-y)/2.0)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of the error function erf(z).
"""
if (floor(nt)!=nt) or (nt<=0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of the cosine fresnel integral C(z).
"""
if (floor(nt)!=nt) or (nt<=0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1,nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of the sine fresnel integral S(z).
"""
if (floor(nt)!=nt) or (nt<=0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of the sine and cosine fresnel integrals
S(z) and C(z).
"""
if (floor(nt)!=nt) or (nt<=0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt), specfun.fcszo(1,nt)
def hyp0f1(v,z):
"""Confluent hypergeometric limit function 0F1.
Limit as q->infinity of 1F1(q;a;z/q)
"""
z = asarray(z)
if issubdtype(z.dtype, complexfloating):
arg = 2*sqrt(abs(z))
num = where(z>=0, iv(v-1,arg), jv(v-1,arg))
den = abs(z)**((v-1.0)/2)
else:
num = iv(v-1,2*sqrt(z))
den = z**((v-1.0)/2.0)
num *= gamma(v)
return where(z==0,1.0,num/ asarray(den))
def assoc_laguerre(x,n,k=0.0):
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function which is the nth derivative of the digamma (psi)
function."""
n, x = asarray(n), asarray(x)
cond = (n==0)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1,x)
if sometrue(cond,axis=0):
return where(cond, psi(x), fac2)
return fac2
def mathieu_even_coef(m,q):
"""Compute expansion coefficients for even mathieu functions and
modified mathieu functions.
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m<0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm=17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print "Warning, too many predicted coefficients."
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m,q)
fc = specfun.fcoef(kd,m,q,a)
return fc[:km]
def mathieu_odd_coef(m,q):
"""Compute expansion coefficients for even mathieu functions and
modified mathieu functions.
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m<=0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm=17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print "Warning, too many predicted coefficients."
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m,q)
fc = specfun.fcoef(kd,m,q,b)
return fc[:km]
def lpmn(m,n,z):
"""Associated Legendre functions of the first kind, Pmn(z) and its
derivative, Pmn'(z) of order m and degree n. Returns two
arrays of size (m+1,n+1) containing Pmn(z) and Pmn'(z) for
all orders from 0..m and degrees from 0..n.
z can be complex.
Parameters
----------
m : int
|m| <= n; the order of the Legendre function
n : int
where `n` >= 0; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
input value
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
"""
if not isscalar(m) or (abs(m)>n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n<0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
fixarr = where(mf>nf,0.0,(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
if iscomplex(z):
p,pd = specfun.clpmn(mp,n,real(z),imag(z))
else:
p,pd = specfun.lpmn(mp,n,z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def lqmn(m,n,z):
"""Associated Legendre functions of the second kind, Qmn(z) and its
derivative, Qmn'(z) of order m and degree n. Returns two
arrays of size (m+1,n+1) containing Qmn(z) and Qmn'(z) for
all orders from 0..m and degrees from 0..n.
z can be complex.
"""
if not isscalar(m) or (m<0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n<0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1,m)
nn = max(1,n)
if iscomplex(z):
q,qd = specfun.clqmn(mm,nn,z)
else:
q,qd = specfun.lqmn(mm,nn,z)
return q[:(m+1),:(n+1)],qd[:(m+1),:(n+1)]
def bernoulli(n):
"""Return an array of the Bernoulli numbers B0..Bn
"""
if not isscalar(n) or (n<0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2): n1 = 2
else: n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Return an array of the Euler numbers E0..En (inclusive)
"""
if not isscalar(n) or (n<0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2): n1 = 2
else: n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n,z):
"""Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n < 1): n1 = 1
else: n1 = n
if iscomplex(z):
pn,pd = specfun.clpn(n1,z)
else:
pn,pd = specfun.lpn(n1,z)
return pn[:(n+1)],pd[:(n+1)]
## lpni
def lqn(n,z):
"""Compute sequence of Legendre functions of the second kind,
Qn(z) and derivatives for all degrees from 0 to n (inclusive).
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n!= floor(n)) or (n<0):
raise ValueError("n must be a non-negative integer.")
if (n < 1): n1 = 1
else: n1 = n
if iscomplex(z):
qn,qd = specfun.clqn(n1,z)
else:
qn,qd = specfun.lqnb(n1,z)
return qn[:(n+1)],qd[:(n+1)]
def ai_zeros(nt):
"""Compute the zeros of Airy Functions Ai(x) and Ai'(x), a and a'
respectively, and the associated values of Ai(a') and Ai'(a).
Returns
-------
a[l-1] -- the lth zero of Ai(x)
ap[l-1] -- the lth zero of Ai'(x)
ai[l-1] -- Ai(ap[l-1])
aip[l-1] -- Ai'(a[l-1])
"""
kf = 1
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def bi_zeros(nt):
"""Compute the zeros of Airy Functions Bi(x) and Bi'(x), b and b'
respectively, and the associated values of Ai(b') and Ai'(b).
Returns
-------
b[l-1] -- the lth zero of Bi(x)
bp[l-1] -- the lth zero of Bi'(x)
bi[l-1] -- Bi(bp[l-1])
bip[l-1] -- Bi'(b[l-1])
"""
kf = 2
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def lmbda(v,x):
"""Compute sequence of lambda functions with arbitrary order v
and their derivatives. Lv0(x)..Lv(x) are computed with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v<0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1): n1 = 1
else: n1 = n
v1 = n1 + v0
if (v!=floor(v)):
vm, vl, dl = specfun.lamv(v1,x)
else:
vm, vl, dl = specfun.lamn(v1,x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v,x):
"""Compute sequence of parabolic cylinder functions Dv(x) and
their derivatives for Dv0(x)..Dv(x) with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1): n1=1
else: n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbdv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbvv_seq(v,x):
"""Compute sequence of parabolic cylinder functions Dv(x) and
their derivatives for Dv0(x)..Dv(x) with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1): n1=1
else: n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbvv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbdn_seq(n,z):
"""Compute sequence of parabolic cylinder functions Dn(z) and
their derivatives for D0(z)..Dn(z).
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n)!=n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb,cpd = specfun.cpbdn(n1,z)
return cpb[:n1+1],cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the kelvin function ber x
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1)
def bei_zeros(nt):
"""Compute nt zeros of the kelvin function bei x
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,2)
def ker_zeros(nt):
"""Compute nt zeros of the kelvin function ker x
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,3)
def kei_zeros(nt):
"""Compute nt zeros of the kelvin function kei x
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,4)
def berp_zeros(nt):
"""Compute nt zeros of the kelvin function ber' x
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,5)
def beip_zeros(nt):
"""Compute nt zeros of the kelvin function bei' x
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,6)
def kerp_zeros(nt):
"""Compute nt zeros of the kelvin function ker' x
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,7)
def keip_zeros(nt):
"""Compute nt zeros of the kelvin function kei' x
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,8)
def kelvin_zeros(nt):
"""Compute nt zeros of all the kelvin functions returned in a
length 8 tuple of arrays of length nt.
The tuple containse the arrays of zeros of
(ber, bei, ker, kei, ber', bei', ker', kei')
"""
if not isscalar(nt) or (floor(nt)!=nt) or (nt<=0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1), \
specfun.klvnzo(nt,2), \
specfun.klvnzo(nt,3), \
specfun.klvnzo(nt,4), \
specfun.klvnzo(nt,5), \
specfun.klvnzo(nt,6), \
specfun.klvnzo(nt,7), \
specfun.klvnzo(nt,8)
def pro_cv_seq(m,n,c):
"""Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n!=floor(n)) or (m!=floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,1)[1][:maxL]
def obl_cv_seq(m,n,c):
"""Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n!=floor(n)) or (m!=floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,-1)[1][:maxL]
def agm(a,b):
"""Arithmetic, Geometric Mean
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a,b)
agm(a,b)=agm(b,a)
agm(a,a) = a
min(a,b) < agm(a,b) < max(a,b)
"""
res1 = a+b+0.0
res2 = a-b
k = res2 / res1
return res1*pi/4/ellipk(k**2)
|
gpl-3.0
|
tod31/pyload
|
module/plugins/hooks/ExtractArchive.py
|
2
|
20542
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
# monkey patch bug in python 2.6 and lower
# http://bugs.python.org/issue6122 , http://bugs.python.org/issue1236 , http://bugs.python.org/issue1731717
if sys.version_info < (2, 7) and os.name != "nt":
import errno
import subprocess
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
#: Unsued timeout option for older python version
def wait(self, timeout=0):
"""
Wait for child process to terminate. Returns returncode
attribute.
"""
if self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError, e:
if e.errno != errno.ECHILD:
raise
#: This happens if SIGCLD is set to be ignored or waiting
#: For child processes has otherwise been disabled for our
#: process. This child is dead, we can't get the status.
sts = 0
self._handle_exitstatus(sts)
return self.returncode
subprocess.Popen.wait = wait
try:
import send2trash
except ImportError:
pass
from module.plugins.internal.Addon import Addon
from module.plugins.internal.Extractor import ArchiveError, CRCError, PasswordError
from module.plugins.internal.misc import encode, exists, Expose, fsjoin, threaded, uniqify
class ArchiveQueue(object):
def __init__(self, plugin, storage):
self.plugin = plugin
self.storage = storage
def get(self):
return self.plugin.db.retrieve(self.storage, default=[])
def set(self, value):
return self.plugin.db.store(self.storage, value)
def delete(self):
return self.plugin.db.delete(self.storage)
def add(self, item):
queue = self.get()
if item not in queue:
return self.set(queue + [item])
else:
return True
def remove(self, item):
queue = self.get()
try:
queue.remove(item)
except ValueError:
pass
if queue is []:
return self.delete()
return self.set(queue)
class ExtractArchive(Addon):
__name__ = "ExtractArchive"
__type__ = "hook"
__version__ = "1.57"
__status__ = "broken"
__config__ = [("activated" , "bool" , "Activated" , True ),
("fullpath" , "bool" , "Extract with full paths" , True ),
("overwrite" , "bool" , "Overwrite files" , False ),
("keepbroken" , "bool" , "Try to extract broken archives" , False ),
("repair" , "bool" , "Repair broken archives (RAR required)" , False ),
("usepasswordfile", "bool" , "Use password file" , True ),
("passwordfile" , "file" , "Password file" , "passwords.txt" ),
("delete" , "bool" , "Delete archive after extraction" , True ),
("deltotrash" , "bool" , "Move to trash instead delete" , True ),
("subfolder" , "bool" , "Create subfolder for each package" , False ),
("destination" , "folder", "Extract files to folder" , "" ),
("extensions" , "str" , "Extract archives ending with extension", "7z,bz2,bzip2,gz,gzip,lha,lzh,lzma,rar,tar,taz,tbz,tbz2,tgz,xar,xz,z,zip"),
("excludefiles" , "str" , "Don't extract the following files" , "*.nfo,*.DS_Store,index.dat,thumb.db" ),
("recursive" , "bool" , "Extract archives in archives" , True ),
("waitall" , "bool" , "Run after all downloads was processed" , False ),
("priority" , "int" , "Process priority" , 0 )]
__description__ = """Extract different kind of archives"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com"),
("Immenz" , "immenz@gmx.net" )]
NAME_REPLACEMENTS = [(r'\.part\d+\.rar$', ".part.rar")]
def init(self):
self.event_map = {'allDownloadsProcessed': "all_downloads_processed",
'packageDeleted' : "package_deleted" }
self.queue = ArchiveQueue(self, "Queue")
self.failed = ArchiveQueue(self, "Failed")
self.extracting = False
self.last_package = False
self.extractors = []
self.passwords = []
self.repair = False
def activate(self):
for p in ("UnRar", "SevenZip", "UnZip", "UnTar"):
try:
module = self.pyload.pluginManager.loadModule("internal", p)
klass = getattr(module, p)
if klass.find():
self.extractors.append(klass)
if klass.REPAIR:
self.repair = self.config.get('repair')
except OSError, e:
if e.errno == 2:
self.log_warning(_("No %s installed") % p)
else:
self.log_warning(_("Could not activate: %s") % p, e)
except Exception, e:
self.log_warning(_("Could not activate: %s") % p, e)
if self.extractors:
self.log_debug(*["Found %s %s" % (Extractor.__name__, Extractor.VERSION) for Extractor in self.extractors])
self.extract_queued() #: Resume unfinished extractions
else:
self.log_info(_("No Extract plugins activated"))
@threaded
def extract_queued(self, thread):
if self.extracting: #@NOTE: doing the check here for safety (called by coreReady)
return
self.extracting = True
packages = self.queue.get()
while packages:
if self.last_package: #: Called from allDownloadsProcessed
self.last_package = False
if self.extract(packages, thread): #@NOTE: check only if all gone fine, no failed reporting for now
self.manager.dispatchEvent("all_archives_extracted")
self.manager.dispatchEvent("all_archives_processed")
else:
if self.extract(packages, thread): #@NOTE: check only if all gone fine, no failed reporting for now
pass
packages = self.queue.get() #: Check for packages added during extraction
self.extracting = False
#: Deprecated method, use `extract_package` instead
@Expose
def extractPackage(self, *args, **kwargs):
"""
See `extract_package`
"""
return self.extract_package(*args, **kwargs)
@Expose
def extract_package(self, *ids):
"""
Extract packages with given id
"""
for id in ids:
self.queue.add(id)
if not self.config.get('waitall') and not self.extracting:
self.extract_queued()
def package_deleted(self, pid):
self.queue.remove(pid)
def package_finished(self, pypack):
self.queue.add(pypack.id)
if not self.config.get('waitall') and not self.extracting:
self.extract_queued()
def all_downloads_processed(self):
self.last_package = True
if self.config.get('waitall') and not self.extracting:
self.extract_queued()
@Expose
def extract(self, ids, thread=None): #@TODO: Use pypack, not pid to improve method usability
if not ids:
return False
processed = []
extracted = []
failed = []
toList = lambda string: string.replace(' ', '').replace(',', '|').replace(';', '|').split('|')
destination = self.config.get('destination')
subfolder = self.config.get('subfolder')
fullpath = self.config.get('fullpath')
overwrite = self.config.get('overwrite')
priority = self.config.get('priority')
recursive = self.config.get('recursive')
keepbroken = self.config.get('keepbroken')
extensions = [x.lstrip('.').lower() for x in toList(self.config.get('extensions'))]
excludefiles = toList(self.config.get('excludefiles'))
if extensions:
self.log_debug("Use for extensions: %s" % "|.".join(extensions))
#: Reload from txt file
self.reload_passwords()
dl_folder = self.pyload.config.get("general", "download_folder")
#: Iterate packages -> extractors -> targets
for pid in ids:
pypack = self.pyload.files.getPackage(pid)
if not pypack:
self.queue.remove(pid)
continue
self.log_info(_("Check package: %s") % pypack.name)
#: Determine output folder
out = fsjoin(dl_folder, pypack.folder, destination, "") #: Force trailing slash
if subfolder:
out = fsjoin(out, pypack.folder)
if not exists(out):
os.makedirs(out)
matched = False
success = True
files_ids = dict((fdata['name'], ((fsjoin(dl_folder, pypack.folder, fdata['name'])), fid, out)) for fid, fdata \
in sorted(pypack.getChildren().values(), key=lambda k: k['name'])).items() #: Remove duplicates
#: Check as long there are unseen files
while files_ids:
new_files_ids = []
if extensions:
files_ids = [(fname, fid, fout) for fname, fid, fout in files_ids \
if filter(lambda ext: fname.lower().endswith(ext), extensions)]
for Extractor in self.extractors:
targets = Extractor.get_targets(files_ids)
if targets:
self.log_debug("Targets for %s: %s" % (Extractor.__name__, targets))
matched = True
for fname, fid, fout in targets:
name = os.path.basename(fname)
if not exists(fname):
self.log_debug(name, "File not found")
continue
self.log_info(name, _("Extract to: %s") % fout)
try:
pyfile = self.pyload.files.getFile(fid)
archive = Extractor(self,
fname,
fout,
fullpath,
overwrite,
excludefiles,
priority,
keepbroken,
fid)
thread.addActive(pyfile)
archive.init()
try:
new_files = self._extract(pyfile, archive, pypack.password)
finally:
pyfile.setProgress(100)
thread.finishFile(pyfile)
except Exception, e:
self.log_error(name, e)
success = False
continue
#: Remove processed file and related multiparts from list
files_ids = [(fname, fid, fout) for fname, fid, fout in files_ids \
if fname not in archive.items()]
self.log_debug("Extracted files: %s" % new_files)
for file in new_files:
self.set_permissions(file)
for filename in new_files:
file = encode(fsjoin(os.path.dirname(archive.filename), filename))
if not exists(file):
self.log_debug("New file %s does not exists" % filename)
continue
if recursive and os.path.isfile(file):
new_files_ids.append((filename, fid, os.path.dirname(filename))) #: Append as new target
self.manager.dispatchEvent("archive_extracted", pyfile, archive)
files_ids = new_files_ids #: Also check extracted files
if matched:
if success:
extracted.append(pid)
self.manager.dispatchEvent("package_extracted", pypack)
else:
failed.append(pid)
self.manager.dispatchEvent("package_extract_failed", pypack)
self.failed.add(pid)
else:
self.log_info(_("No files found to extract"))
if not matched or not success and subfolder:
try:
os.rmdir(out)
except OSError:
pass
self.queue.remove(pid)
return True if not failed else False
def _extract(self, pyfile, archive, password):
name = os.path.basename(archive.filename)
pyfile.setStatus("processing")
encrypted = False
try:
self.log_debug("Password: %s" % (password or "None provided"))
passwords = uniqify([password] + self.get_passwords(False)) if self.config.get('usepasswordfile') else [password]
for pw in passwords:
try:
pyfile.setCustomStatus(_("archive testing"))
pyfile.setProgress(0)
archive.verify(pw)
pyfile.setProgress(100)
except PasswordError:
if not encrypted:
self.log_info(name, _("Password protected"))
encrypted = True
except CRCError, e:
self.log_debug(name, e)
self.log_info(name, _("CRC Error"))
if not self.repair:
raise CRCError("Archive damaged")
else:
self.log_warning(name, _("Repairing..."))
pyfile.setCustomStatus(_("archive repairing"))
pyfile.setProgress(0)
repaired = archive.repair()
pyfile.setProgress(100)
if not repaired and not self.config.get('keepbroken'):
raise CRCError("Archive damaged")
else:
self.add_password(pw)
break
except ArchiveError, e:
raise ArchiveError(e)
else:
self.add_password(pw)
break
pyfile.setCustomStatus(_("archive extracting"))
pyfile.setProgress(0)
if not encrypted or not self.config.get('usepasswordfile'):
self.log_debug("Extracting using password: %s" % (password or "None"))
archive.extract(password)
else:
for pw in filter(None, uniqify([password] + self.get_passwords(False))):
try:
self.log_debug("Extracting using password: %s" % pw)
archive.extract(pw)
self.add_password(pw)
break
except PasswordError:
self.log_debug("Password was wrong")
else:
raise PasswordError
pyfile.setProgress(100)
pyfile.setStatus("processing")
delfiles = archive.items()
self.log_debug("Would delete: " + ", ".join(delfiles))
if self.config.get('delete'):
self.log_info(_("Deleting %s files") % len(delfiles))
deltotrash = self.config.get('deltotrash')
for f in delfiles:
file = encode(f)
if not exists(file):
continue
if not deltotrash:
os.remove(file)
else:
try:
send2trash.send2trash(file)
except NameError:
self.log_warning(_("Unable to move %s to trash") % os.path.basename(f),
_("Send2Trash lib not found"))
except Exception, e:
self.log_warning(_("Unable to move %s to trash") % os.path.basename(f),
e.message)
else:
self.log_info(_("Moved %s to trash") % os.path.basename(f))
self.log_info(name, _("Extracting finished"))
extracted_files = archive.files or archive.list()
return extracted_files
except PasswordError:
self.log_error(name, _("Wrong password" if password else "No password found"))
except CRCError, e:
self.log_error(name, _("CRC mismatch"), e)
except ArchiveError, e:
self.log_error(name, _("Archive error"), e)
except Exception, e:
self.log_error(name, _("Unknown error"), e)
self.manager.dispatchEvent("archive_extract_failed", pyfile, archive)
raise Exception(_("Extract failed"))
#: Deprecated method, use `get_passwords` instead
@Expose
def getPasswords(self, *args, **kwargs):
"""
See `get_passwords`
"""
return self.get_passwords(*args, **kwargs)
@Expose
def get_passwords(self, reload=True):
"""
List of saved passwords
"""
if reload:
self.reload_passwords()
return self.passwords
def reload_passwords(self):
try:
passwords = []
file = encode(self.config.get('passwordfile'))
with open(file) as f:
for pw in f.read().splitlines():
passwords.append(pw)
except IOError, e:
self.log_error(e)
else:
self.passwords = passwords
#: Deprecated method, use `add_password` instead
@Expose
def addPassword(self, *args, **kwargs):
"""
See `add_password`
"""
return self.add_password(*args, **kwargs)
@Expose
def add_password(self, password):
"""
Adds a password to saved list
"""
try:
self.passwords = uniqify([password] + self.passwords)
file = encode(self.config.get('passwordfile'))
with open(file, "wb") as f:
for pw in self.passwords:
f.write(pw + '\n')
except IOError, e:
self.log_error(e)
|
gpl-3.0
|
digwanderlust/pants
|
src/python/pants/base/addressable.py
|
7
|
1849
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.address import BuildFileAddress
from pants.util.meta import AbstractClass
class AddressableCallProxy(object):
"""A registration proxy for objects to be captured and addressed from BUILD files."""
def __init__(self, addressable_type, build_file, registration_callback):
self._addressable_type = addressable_type
self._build_file = build_file
self._registration_callback = registration_callback
def __call__(self, *args, **kwargs):
addressable = self._addressable_type(*args, **kwargs)
addressable_name = addressable.addressable_name
if addressable_name:
address = BuildFileAddress(self._build_file, addressable_name)
self._registration_callback(address, addressable)
return addressable
def __repr__(self):
return ('AddressableCallProxy(addressable_type={target_type}, build_file={build_file})'
.format(target_type=self._addressable_type,
build_file=self._build_file))
class Addressable(AbstractClass):
"""An ABC for classes which would like instances to be named and exported from BUILD files."""
class AddressableInitError(Exception): pass
@property
def addressable_name(self):
"""This property is inspected by AddressableCallProxy to automatically name Addressables.
Generally, a subclass will inspect its captured arguments and return, for example, the
captured `name` parameter. A value of `None` (the default) causes AddressableCallProxy
to skip capturing and naming this instance.
"""
return None
|
apache-2.0
|
aidan-/ansible-modules-extras
|
network/f5/bigip_node.py
|
32
|
16217
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_node
short_description: "Manages F5 BIG-IP LTM nodes"
description:
- "Manages F5 BIG-IP LTM nodes via iControl SOAP API"
version_added: "1.4"
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
requirements:
- bigsuds
options:
state:
description:
- Pool member state
required: true
default: present
choices: ['present', 'absent']
aliases: []
session_state:
description:
- Set new session availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
monitor_state:
description:
- Set monitor availability status for node
version_added: "1.9"
required: false
default: null
choices: ['enabled', 'disabled']
aliases: []
partition:
description:
- Partition
required: false
default: 'Common'
choices: []
aliases: []
name:
description:
- "Node name"
required: false
default: null
choices: []
monitor_type:
description:
- Monitor rule type when monitors > 1
version_added: "2.2"
required: False
default: null
choices: ['and_list', 'm_of_n']
aliases: []
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
host:
description:
- "Node IP. Required when state=present and node does not exist. Error when state=absent."
required: true
default: null
choices: []
aliases: ['address', 'ip']
description:
description:
- "Node description."
required: false
default: null
choices: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Add node
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
host: "10.20.30.40"
name: "10.20.30.40"
# Note that the BIG-IP automatically names the node using the
# IP address specified in previous play's host parameter.
# Future plays referencing this node no longer use the host
# parameter but instead use the name parameter.
# Alternatively, you could have specified a name with the
# name parameter when state=present.
- name: Add node with a single 'ping' monitor
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
host: "10.20.30.40"
name: "mytestserver"
monitors:
- /Common/icmp
delegate_to: localhost
- name: Modify node description
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
partition: "Common"
name: "10.20.30.40"
description: "Our best server yet"
delegate_to: localhost
- name: Delete node
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
partition: "Common"
name: "10.20.30.40"
# The BIG-IP GUI doesn't map directly to the API calls for "Node ->
# General Properties -> State". The following states map to API monitor
# and session states.
#
# Enabled (all traffic allowed):
# monitor_state=enabled, session_state=enabled
# Disabled (only persistent or active connections allowed):
# monitor_state=enabled, session_state=disabled
# Forced offline (only active connections allowed):
# monitor_state=disabled, session_state=disabled
#
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
- name: Force node offline
bigip_node:
server: "lb.mydomain.com"
user: "admin"
password: "mysecret"
state: "present"
session_state: "disabled"
monitor_state: "disabled"
partition: "Common"
name: "10.20.30.40"
'''
def node_exists(api, address):
# hack to determine if node exists
result = False
try:
api.LocalLB.NodeAddressV2.get_object_status(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_node_address(api, address, name):
try:
api.LocalLB.NodeAddressV2.create(
nodes=[name],
addresses=[address],
limits=[0]
)
result = True
desc = ""
except bigsuds.OperationFailed as e:
if "already exists" in str(e):
result = False
desc = "referenced name or IP already in use"
else:
# genuine exception
raise
return (result, desc)
def get_node_address(api, name):
return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0]
def delete_node_address(api, address):
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
desc = ""
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
desc = "node referenced by pool"
else:
# genuine exception
raise
return (result, desc)
def set_node_description(api, name, description):
api.LocalLB.NodeAddressV2.set_description(nodes=[name],
descriptions=[description])
def get_node_description(api, name):
return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0]
def set_node_session_enabled_state(api, name, session_state):
session_state = "STATE_%s" % session_state.strip().upper()
api.LocalLB.NodeAddressV2.set_session_enabled_state(nodes=[name],
states=[session_state])
def get_node_session_status(api, name):
result = api.LocalLB.NodeAddressV2.get_session_status(nodes=[name])[0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
def set_node_monitor_state(api, name, monitor_state):
monitor_state = "STATE_%s" % monitor_state.strip().upper()
api.LocalLB.NodeAddressV2.set_monitor_state(nodes=[name],
states=[monitor_state])
def get_node_monitor_status(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_status(nodes=[name])[0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
def get_monitors(api, name):
result = api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=[name])[0]
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, name, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
api.LocalLB.NodeAddressV2.set_monitor_rule(nodes=[name],
monitor_rules=[monitor_rule])
def main():
monitor_type_choices = ['and_list', 'm_of_n']
argument_spec = f5_argument_spec()
meta_args = dict(
session_state=dict(type='str', choices=['enabled', 'disabled']),
monitor_state=dict(type='str', choices=['enabled', 'disabled']),
name=dict(type='str', required=True),
host=dict(type='str', aliases=['address', 'ip']),
description=dict(type='str'),
monitor_type=dict(type='str', choices=monitor_type_choices),
quorum=dict(type='int'),
monitors=dict(type='list')
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
host = module.params['host']
name = module.params['name']
address = fq_name(partition, name)
description = module.params['description']
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
# sanity check user supplied values
if state == 'absent' and host is not None:
module.fail_json(msg="host parameter invalid when state=absent")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if node_exists(api, address):
if not module.check_mode:
deleted, desc = delete_node_address(api, address)
if not deleted:
module.fail_json(msg="unable to delete: %s" % desc)
else:
result = {'changed': True}
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
if not node_exists(api, address):
if host is None:
module.fail_json(msg="host parameter required when "
"state=present and node does not exist")
if not module.check_mode:
created, desc = create_node_address(api, address=host, name=address)
if not created:
module.fail_json(msg="unable to create: %s" % desc)
else:
result = {'changed': True}
if session_state is not None:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
set_node_monitor_state(api, address, monitor_state)
result = {'changed': True}
if description is not None:
set_node_description(api, address, description)
result = {'changed': True}
if monitors:
set_monitors(api, address, monitor_type, quorum, monitors)
else:
# check-mode return value
result = {'changed': True}
else:
# node exists -- potentially modify attributes
if host is not None:
if get_node_address(api, address) != host:
module.fail_json(msg="Changing the node address is "
"not supported by the API; "
"delete and recreate the node.")
if session_state is not None:
session_status = get_node_session_status(api, address)
if session_state == 'enabled' and \
session_status == 'forced_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
elif session_state == 'disabled' and \
session_status != 'force_disabled':
if not module.check_mode:
set_node_session_enabled_state(api, address,
session_state)
result = {'changed': True}
if monitor_state is not None:
monitor_status = get_node_monitor_status(api, address)
if monitor_state == 'enabled' and \
monitor_status == 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
elif monitor_state == 'disabled' and \
monitor_status != 'forced_down':
if not module.check_mode:
set_node_monitor_state(api, address,
monitor_state)
result = {'changed': True}
if description is not None:
if get_node_description(api, address) != description:
if not module.check_mode:
set_node_description(api, address, description)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, address)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, address, monitor_type, quorum, monitors)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
marcelovilaca/DIRAC
|
FrameworkSystem/Client/ProxyUpload.py
|
3
|
5693
|
########################################################################
# $HeadURL$
# File : dirac-proxy-init.py
# Author : Adrian Casajus
###########################################################from DIRAC.Core.Base import Script#############
__RCSID__ = "$Id$"
import sys
import getpass
import DIRAC
from DIRAC.Core.Base import Script
class CLIParams:
proxyLifeTime = 2592000
diracGroup = False
certLoc = False
keyLoc = False
proxyLoc = False
onTheFly = False
stdinPasswd = False
rfcIfPossible = False
userPasswd = ""
def __str__( self ):
data = []
for k in ( 'proxyLifeTime', 'diracGroup', 'certLoc', 'keyLoc', 'proxyLoc',
'onTheFly', 'stdinPasswd', 'userPasswd' ):
if k == 'userPasswd':
data.append( "userPasswd = *****" )
else:
data.append( "%s=%s" % ( k, getattr( self, k ) ) )
msg = "<UploadCLIParams %s>" % " ".join( data )
return msg
def setProxyLifeTime( self, arg ):
try:
fields = [ f.strip() for f in arg.split( ":" ) ]
self.proxyLifeTime = int( fields[0] ) * 3600 + int( fields[1] ) * 60
except ValueError:
print "Can't parse %s time! Is it a HH:MM?" % arg
return DIRAC.S_ERROR( "Can't parse time argument" )
return DIRAC.S_OK()
def setProxyRemainingSecs( self, arg ):
self.proxyLifeTime = int( arg )
return DIRAC.S_OK()
def getProxyLifeTime( self ):
hours = self.proxyLifeTime / 3600
mins = self.proxyLifeTime / 60 - hours * 60
return "%s:%s" % ( hours, mins )
def getProxyRemainingSecs( self ):
return self.proxyLifeTime
def setDIRACGroup( self, arg ):
self.diracGroup = arg
return DIRAC.S_OK()
def getDIRACGroup( self ):
return self.diracGroup
def setCertLocation( self, arg ):
self.certLoc = arg
return DIRAC.S_OK()
def setKeyLocation( self, arg ):
self.keyLoc = arg
return DIRAC.S_OK()
def setProxyLocation( self, arg ):
self.proxyLoc = arg
return DIRAC.S_OK()
def setOnTheFly( self, arg ):
self.onTheFly = True
return DIRAC.S_OK()
def setStdinPasswd( self, arg ):
self.stdinPasswd = True
return DIRAC.S_OK()
def showVersion( self, arg ):
print "Version:"
print " ", __RCSID__
sys.exit( 0 )
return DIRAC.S_OK()
def registerCLISwitches( self ):
Script.registerSwitch( "v:", "valid=", "Valid HH:MM for the proxy. By default is one month", self.setProxyLifeTime )
Script.registerSwitch( "g:", "group=", "DIRAC Group to embed in the proxy", self.setDIRACGroup )
Script.registerSwitch( "C:", "Cert=", "File to use as user certificate", self.setCertLocation )
Script.registerSwitch( "K:", "Key=", "File to use as user key", self.setKeyLocation )
Script.registerSwitch( "P:", "Proxy=", "File to use as proxy", self.setProxyLocation )
Script.registerSwitch( "f", "onthefly", "Generate a proxy on the fly", self.setOnTheFly )
Script.registerSwitch( "p", "pwstdin", "Get passwd from stdin", self.setStdinPasswd )
Script.registerSwitch( "i", "version", "Print version", self.showVersion )
Script.addDefaultOptionValue( "LogLevel", "always" )
from DIRAC import S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security import Locations
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
def uploadProxy( params ):
DIRAC.gLogger.info( "Loading user proxy" )
proxyLoc = params.proxyLoc
if not proxyLoc:
proxyLoc = Locations.getDefaultProxyLocation()
if not proxyLoc:
return S_ERROR( "Can't find any proxy" )
if params.onTheFly:
DIRAC.gLogger.info( "Uploading proxy on-the-fly" )
certLoc = params.certLoc
keyLoc = params.keyLoc
if not certLoc or not keyLoc:
cakLoc = Locations.getCertificateAndKeyLocation()
if not cakLoc:
return S_ERROR( "Can't find user certificate and key" )
if not certLoc:
certLoc = cakLoc[0]
if not keyLoc:
keyLoc = cakLoc[1]
DIRAC.gLogger.info( "Cert file %s" % certLoc )
DIRAC.gLogger.info( "Key file %s" % keyLoc )
testChain = X509Chain()
retVal = testChain.loadKeyFromFile( keyLoc, password = params.userPasswd )
if not retVal[ 'OK' ]:
passwdPrompt = "Enter Certificate password:"
if params.stdinPasswd:
userPasswd = sys.stdin.readline().strip( "\n" )
else:
userPasswd = getpass.getpass( passwdPrompt )
params.userPasswd = userPasswd
DIRAC.gLogger.info( "Loading cert and key" )
chain = X509Chain()
#Load user cert and key
retVal = chain.loadChainFromFile( certLoc )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't load %s" % certLoc )
retVal = chain.loadKeyFromFile( keyLoc, password = params.userPasswd )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't load %s" % keyLoc )
DIRAC.gLogger.info( "User credentials loaded" )
diracGroup = params.diracGroup
if not diracGroup:
result = chain.getCredentials()
if not result['OK']:
return result
if 'group' not in result['Value']:
return S_ERROR( 'Can not get Group from existing credentials' )
diracGroup = result['Value']['group']
restrictLifeTime = params.proxyLifeTime
else:
proxyChain = X509Chain()
retVal = proxyChain.loadProxyFromFile( proxyLoc )
if not retVal[ 'OK' ]:
return S_ERROR( "Can't load proxy file %s: %s" % ( params.proxyLoc, retVal[ 'Message' ] ) )
chain = proxyChain
diracGroup = False
restrictLifeTime = 0
DIRAC.gLogger.info( " Uploading..." )
return gProxyManager.uploadProxy( chain, diracGroup, restrictLifeTime = restrictLifeTime, rfcIfPossible = params.rfcIfPossible )
|
gpl-3.0
|
cybojenix/SlimBot
|
plugins/roulette.py
|
1
|
2463
|
# russian roulette
import random
from util import hook
import os
import json
import time
@hook.command(autohelp=False)
def load(inp, message=None, action=None, chan=None):
"load [<number of barrels>] [<number of bullets>] - " \
" load the gun up"
dir = "plugins/data/rr/"
if not os.path.exists(dir):
os.makedirs(dir)
file = dir + chan
try:
inp[0]
except IndexError:
no_barrels = 6
no_bullet = 1
else:
inp = inp.split(" ")
try:
inp[0]
inp[1]
except IndexError:
no_barrels = 6
no_bullet = 1
else:
try:
int(inp[0])
int(inp[1])
except ValueError:
no_barrels = 6
no_bullet = 1
else:
no_barrels =int(inp[0])
no_bullet = int(inp[1])
bullet_place = []
action("loads the bullets, spins the barrel...")
for x in range(no_bullet):
bul_pl = random.randint(1, no_barrels)
while bul_pl in bullet_place:
bul_pl = random.randint(1, no_barrels)
bullet_place.append(bul_pl)
data = json.dumps({'no_bullet': no_bullet, 'current_position': 0, 'bullet_place': bullet_place, 'dead': []})
with open(file, 'w+') as final_file:
final_file.write(data)
message("the bullets have been loaded. pull the trigger...")
@hook.command(autohelp=False)
def pull(inp, message=None, nick=None, notice=None, action=None, chan=None):
"pull the trigger"
file = "plugins/data/rr/" + chan
if not os.path.exists(file):
notice("please start a game with command load")
else:
with open(file, 'r') as final_file:
data = json.load(final_file)
no_bullet = data["no_bullet"]
current_position = data["current_position"]
bullet_place = data["bullet_place"]
dead = data["dead"]
if nick in dead:
message("you can not shoot if you are dead %s" % nick)
else:
if no_bullet == 0:
notice("please start a game with command load")
else:
message("click....")
time.sleep(2)
current_position += 1
if current_position in bullet_place:
message("BANG!! %s is DEAD" % nick)
no_bullet -= 1
dead.append(nick)
if chan[0] == "#":
action("drags the body off...")
out = "KICK %s %s : you died...." % (chan, nick)
else:
message("%s gets to live another day.." % nick)
if no_bullet == 0:
message("there are no bullets left")
data = json.dumps({'no_bullet': no_bullet, 'current_position': current_position, 'bullet_place': bullet_place, 'dead': dead})
with open(file, 'w+') as final_file:
final_file.write(data)
|
gpl-3.0
|
JakeBrand/CMPUT410-E6
|
v1/lib/python2.7/site-packages/django/contrib/gis/tests/geoapp/test_sitemaps.py
|
60
|
4956
|
from __future__ import unicode_literals
from io import BytesIO
from unittest import skipUnless
from xml.dom import minidom
import os
import zipfile
from django.conf import settings
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings
from django.test.utils import IgnoreDeprecationWarningsMixin
from django.utils._os import upath
if HAS_GEOS:
from .models import City, Country
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoSitemapTest(IgnoreDeprecationWarningsMixin, TestCase):
urls = 'django.contrib.gis.tests.geoapp.urls'
def setUp(self):
super(GeoSitemapTest, self).setUp()
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def test_geositemap_index(self):
"Tests geographic sitemap index."
# Getting the geo index.
from django.contrib import sitemaps
template_dirs = settings.TEMPLATE_DIRS + (
os.path.join(os.path.dirname(upath(sitemaps.__file__)), 'templates'),)
with self.settings(TEMPLATE_DIRS=template_dirs):
doc = minidom.parseString(self.client.get('/sitemap.xml').content)
index = doc.firstChild
self.assertEqual(index.getAttribute('xmlns'), 'http://www.sitemaps.org/schemas/sitemap/0.9')
self.assertEqual(3, len(index.getElementsByTagName('sitemap')))
def test_geositemap_kml(self):
"Tests KML/KMZ geographic sitemaps."
for kml_type in ('kml', 'kmz'):
doc = minidom.parseString(self.client.get('/sitemaps/%s.xml' % kml_type).content)
# Ensuring the right sitemaps namespaces are present.
urlset = doc.firstChild
self.assertEqual(urlset.getAttribute('xmlns'), 'http://www.sitemaps.org/schemas/sitemap/0.9')
self.assertEqual(urlset.getAttribute('xmlns:geo'), 'http://www.google.com/geo/schemas/sitemap/1.0')
urls = urlset.getElementsByTagName('url')
self.assertEqual(2, len(urls)) # Should only be 2 sitemaps.
for url in urls:
self.assertChildNodes(url, ['loc', 'geo:geo'])
# Making sure the 'geo:format' element was properly set.
geo_elem = url.getElementsByTagName('geo:geo')[0]
geo_format = geo_elem.getElementsByTagName('geo:format')[0]
self.assertEqual(kml_type, geo_format.childNodes[0].data)
# Getting the relative URL since we don't have a real site.
kml_url = url.getElementsByTagName('loc')[0].childNodes[0].data.split('http://example.com')[1]
if kml_type == 'kml':
kml_doc = minidom.parseString(self.client.get(kml_url).content)
elif kml_type == 'kmz':
# Have to decompress KMZ before parsing.
buf = BytesIO(self.client.get(kml_url).content)
zf = zipfile.ZipFile(buf)
self.assertEqual(1, len(zf.filelist))
self.assertEqual('doc.kml', zf.filelist[0].filename)
kml_doc = minidom.parseString(zf.read('doc.kml'))
# Ensuring the correct number of placemarks are in the KML doc.
if 'city' in kml_url:
model = City
elif 'country' in kml_url:
model = Country
self.assertEqual(model.objects.count(), len(kml_doc.getElementsByTagName('Placemark')))
def test_geositemap_georss(self):
"Tests GeoRSS geographic sitemaps."
from .feeds import feed_dict
doc = minidom.parseString(self.client.get('/sitemaps/georss.xml').content)
# Ensuring the right sitemaps namespaces are present.
urlset = doc.firstChild
self.assertEqual(urlset.getAttribute('xmlns'), 'http://www.sitemaps.org/schemas/sitemap/0.9')
self.assertEqual(urlset.getAttribute('xmlns:geo'), 'http://www.google.com/geo/schemas/sitemap/1.0')
# Making sure the correct number of feed URLs were included.
urls = urlset.getElementsByTagName('url')
self.assertEqual(len(feed_dict), len(urls))
for url in urls:
self.assertChildNodes(url, ['loc', 'geo:geo'])
# Making sure the 'geo:format' element was properly set to 'georss'.
geo_elem = url.getElementsByTagName('geo:geo')[0]
geo_format = geo_elem.getElementsByTagName('geo:format')[0]
self.assertEqual('georss', geo_format.childNodes[0].data)
|
apache-2.0
|
dennybaa/st2
|
st2common/tests/unit/test_keyvalue_lookup.py
|
7
|
2788
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2tests.base import CleanDbTestCase
from st2common.models.db.keyvalue import KeyValuePairDB
from st2common.persistence.keyvalue import KeyValuePair
from st2common.services.keyvalues import KeyValueLookup
class TestKeyValueLookup(CleanDbTestCase):
def test_non_hierarchical_lookup(self):
k1 = KeyValuePair.add_or_update(KeyValuePairDB(name='k1', value='v1'))
k2 = KeyValuePair.add_or_update(KeyValuePairDB(name='k2', value='v2'))
k3 = KeyValuePair.add_or_update(KeyValuePairDB(name='k3', value='v3'))
lookup = KeyValueLookup()
self.assertEquals(str(lookup.k1), k1.value)
self.assertEquals(str(lookup.k2), k2.value)
self.assertEquals(str(lookup.k3), k3.value)
def test_hierarchical_lookup_dotted(self):
k1 = KeyValuePair.add_or_update(KeyValuePairDB(name='a.b', value='v1'))
k2 = KeyValuePair.add_or_update(KeyValuePairDB(name='a.b.c', value='v2'))
k3 = KeyValuePair.add_or_update(KeyValuePairDB(name='b.c', value='v3'))
lookup = KeyValueLookup()
self.assertEquals(str(lookup.a.b), k1.value)
self.assertEquals(str(lookup.a.b.c), k2.value)
self.assertEquals(str(lookup.b.c), k3.value)
self.assertEquals(str(lookup.a), '')
def test_hierarchical_lookup_dict(self):
k1 = KeyValuePair.add_or_update(KeyValuePairDB(name='a.b', value='v1'))
k2 = KeyValuePair.add_or_update(KeyValuePairDB(name='a.b.c', value='v2'))
k3 = KeyValuePair.add_or_update(KeyValuePairDB(name='b.c', value='v3'))
lookup = KeyValueLookup()
self.assertEquals(str(lookup['a']['b']), k1.value)
self.assertEquals(str(lookup['a']['b']['c']), k2.value)
self.assertEquals(str(lookup['b']['c']), k3.value)
self.assertEquals(str(lookup['a']), '')
def test_missing_key_lookup(self):
lookup = KeyValueLookup()
self.assertEquals(str(lookup.missing_key), '')
self.assertTrue(lookup.missing_key, 'Should be not none.')
|
apache-2.0
|
Marketing1by1/petl
|
petl/transform/maps.py
|
2
|
12598
|
from __future__ import absolute_import, print_function, division
import operator
from collections import OrderedDict
from petl.compat import next, string_types, text_type
from petl.errors import ArgumentError
from petl.util.base import Table, expr, rowgroupby, Record
from petl.transform.sorts import sort
def fieldmap(table, mappings=None, failonerror=False, errorvalue=None):
"""
Transform a table, mapping fields arbitrarily between input and output.
E.g.::
>>> import petl as etl
>>> from collections import OrderedDict
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, 'female', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33, 45.2],
... [5, '-', 25, 1.65, 51.9]]
>>> mappings = OrderedDict()
>>> # rename a field
... mappings['subject_id'] = 'id'
>>> # translate a field
... mappings['gender'] = 'sex', {'male': 'M', 'female': 'F'}
>>> # apply a calculation to a field
... mappings['age_months'] = 'age', lambda v: v * 12
>>> # apply a calculation to a combination of fields
... mappings['bmi'] = lambda rec: rec['weight'] / rec['height']**2
>>> # transform and inspect the output
... table2 = etl.fieldmap(table1, mappings)
>>> table2
+------------+--------+------------+--------------------+
| subject_id | gender | age_months | bmi |
+============+========+============+====================+
| 1 | 'M' | 192 | 29.48870392390012 |
+------------+--------+------------+--------------------+
| 2 | 'F' | 228 | 30.8531967030519 |
+------------+--------+------------+--------------------+
| 3 | 'F' | 204 | 23.481883600555488 |
+------------+--------+------------+--------------------+
| 4 | 'M' | 252 | 25.55260331279326 |
+------------+--------+------------+--------------------+
| 5 | '-' | 300 | 19.0633608815427 |
+------------+--------+------------+--------------------+
Note also that the mapping value can be an expression string, which will be
converted to a lambda function via :func:`petl.util.base.expr`.
"""
return FieldMapView(table, mappings=mappings, failonerror=failonerror,
errorvalue=errorvalue)
Table.fieldmap = fieldmap
class FieldMapView(Table):
def __init__(self, source, mappings=None, failonerror=False,
errorvalue=None):
self.source = source
if mappings is None:
self.mappings = OrderedDict()
else:
self.mappings = mappings
self.failonerror = failonerror
self.errorvalue = errorvalue
def __setitem__(self, key, value):
self.mappings[key] = value
def __iter__(self):
return iterfieldmap(self.source, self.mappings, self.failonerror,
self.errorvalue)
def iterfieldmap(source, mappings, failonerror, errorvalue):
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
outhdr = mappings.keys()
yield tuple(outhdr)
mapfuns = dict()
for outfld, m in mappings.items():
if m in hdr:
mapfuns[outfld] = operator.itemgetter(m)
elif isinstance(m, int) and m < len(hdr):
mapfuns[outfld] = operator.itemgetter(m)
elif isinstance(m, string_types):
mapfuns[outfld] = expr(m)
elif callable(m):
mapfuns[outfld] = m
elif isinstance(m, (tuple, list)) and len(m) == 2:
srcfld = m[0]
fm = m[1]
if callable(fm):
mapfuns[outfld] = composefun(fm, srcfld)
elif isinstance(fm, dict):
mapfuns[outfld] = composedict(fm, srcfld)
else:
raise ArgumentError('expected callable or dict')
else:
raise ArgumentError('invalid mapping %r: %r' % (outfld, m))
# wrap rows as records
it = (Record(row, flds) for row in it)
for row in it:
outrow = list()
for outfld in outhdr:
try:
val = mapfuns[outfld](row)
except Exception as e:
if failonerror:
raise e
else:
val = errorvalue
outrow.append(val)
yield tuple(outrow)
def composefun(f, srcfld):
def g(rec):
return f(rec[srcfld])
return g
def composedict(d, srcfld):
def g(rec):
k = rec[srcfld]
if k in d:
return d[k]
else:
return k
return g
def rowmap(table, rowmapper, header, failonerror=False):
"""
Transform rows via an arbitrary function. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, 'female', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33, 45.2],
... [5, '-', 25, 1.65, 51.9]]
>>> def rowmapper(row):
... transmf = {'male': 'M', 'female': 'F'}
... return [row[0],
... transmf[row['sex']] if row['sex'] in transmf else None,
... row.age * 12,
... row.height / row.weight ** 2]
...
>>> table2 = etl.rowmap(table1, rowmapper,
... header=['subject_id', 'gender', 'age_months',
... 'bmi'])
>>> table2
+------------+--------+------------+-----------------------+
| subject_id | gender | age_months | bmi |
+============+========+============+=======================+
| 1 | 'M' | 192 | 0.0003772112382934443 |
+------------+--------+------------+-----------------------+
| 2 | 'F' | 228 | 0.0004366015456998006 |
+------------+--------+------------+-----------------------+
| 3 | 'F' | 204 | 0.0003215689675106949 |
+------------+--------+------------+-----------------------+
| 4 | 'M' | 252 | 0.0006509906805544679 |
+------------+--------+------------+-----------------------+
| 5 | None | 300 | 0.0006125608384287258 |
+------------+--------+------------+-----------------------+
The `rowmapper` function should accept a single row and return a single
row (list or tuple).
"""
return RowMapView(table, rowmapper, header, failonerror=failonerror)
Table.rowmap = rowmap
class RowMapView(Table):
def __init__(self, source, rowmapper, header, failonerror=False):
self.source = source
self.rowmapper = rowmapper
self.header = header
self.failonerror = failonerror
def __iter__(self):
return iterrowmap(self.source, self.rowmapper, self.header,
self.failonerror)
def iterrowmap(source, rowmapper, header, failonerror):
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
yield tuple(header)
it = (Record(row, flds) for row in it)
for row in it:
try:
outrow = rowmapper(row)
yield tuple(outrow)
except Exception as e:
if failonerror:
raise e
def rowmapmany(table, rowgenerator, header, failonerror=False):
"""
Map each input row to any number of output rows via an arbitrary
function. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, '-', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33]]
>>> def rowgenerator(row):
... transmf = {'male': 'M', 'female': 'F'}
... yield [row[0], 'gender',
... transmf[row['sex']] if row['sex'] in transmf else None]
... yield [row[0], 'age_months', row.age * 12]
... yield [row[0], 'bmi', row.height / row.weight ** 2]
...
>>> table2 = etl.rowmapmany(table1, rowgenerator,
... header=['subject_id', 'variable', 'value'])
>>> table2.lookall()
+------------+--------------+-----------------------+
| subject_id | variable | value |
+============+==============+=======================+
| 1 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 1 | 'age_months' | 192 |
+------------+--------------+-----------------------+
| 1 | 'bmi' | 0.0003772112382934443 |
+------------+--------------+-----------------------+
| 2 | 'gender' | 'F' |
+------------+--------------+-----------------------+
| 2 | 'age_months' | 228 |
+------------+--------------+-----------------------+
| 2 | 'bmi' | 0.0004366015456998006 |
+------------+--------------+-----------------------+
| 3 | 'gender' | None |
+------------+--------------+-----------------------+
| 3 | 'age_months' | 204 |
+------------+--------------+-----------------------+
| 3 | 'bmi' | 0.0003215689675106949 |
+------------+--------------+-----------------------+
| 4 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 4 | 'age_months' | 252 |
+------------+--------------+-----------------------+
The `rowgenerator` function should accept a single row and yield zero or
more rows (lists or tuples).
See also the :func:`petl.transform.reshape.melt` function.
"""
return RowMapManyView(table, rowgenerator, header, failonerror=failonerror)
Table.rowmapmany = rowmapmany
class RowMapManyView(Table):
def __init__(self, source, rowgenerator, header, failonerror=False):
self.source = source
self.rowgenerator = rowgenerator
self.header = header
self.failonerror = failonerror
def __iter__(self):
return iterrowmapmany(self.source, self.rowgenerator, self.header,
self.failonerror)
def iterrowmapmany(source, rowgenerator, header, failonerror):
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
yield tuple(header)
it = (Record(row, flds) for row in it)
for row in it:
try:
for outrow in rowgenerator(row):
yield tuple(outrow)
except Exception as e:
if failonerror:
raise e
else:
pass
def rowgroupmap(table, key, mapper, header=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
"""
Group rows under the given key then apply `mapper` to yield zero or more
output rows for each input group of rows.
"""
return RowGroupMapView(table, key, mapper, header=header,
presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.rowgroupmap = rowgroupmap
class RowGroupMapView(Table):
def __init__(self, source, key, mapper, header=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.header = header
self.mapper = mapper
def __iter__(self):
return iterrowgroupmap(self.source, self.key, self.mapper, self.header)
def iterrowgroupmap(source, key, mapper, header):
yield tuple(header)
for key, rows in rowgroupby(source, key):
for row in mapper(key, rows):
yield row
|
mit
|
hachreak/invenio-previewer
|
invenio_previewer/utils.py
|
2
|
1963
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio Previewer Utilities."""
import cchardet
from flask import current_app
def detect_encoding(fp, default=None):
"""Detect the cahracter encoding of a file.
:param fp: Open Python file pointer.
:param default: Fallback encoding to use.
:returns: The detected encoding.
.. note:: The file pointer is returned at its original read position.
"""
init_pos = fp.tell()
try:
sample = fp.read(
current_app.config.get('PREVIEWER_CHARDET_BYTES', 1024))
# Result contains 'confidence' and 'encoding'
result = cchardet.detect(sample)
threshold = current_app.config.get('PREVIEWER_CHARDET_CONFIDENCE', 0.9)
if result.get('confidence', 0) > threshold:
return result.get('encoding', default)
else:
return default
except Exception:
current_app.logger.warning('Encoding detection failed.', exc_info=True)
return default
finally:
fp.seek(init_pos)
|
gpl-2.0
|
maheshp/novatest
|
nova/virt/hyperv/pathutils.py
|
7
|
4794
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
from eventlet.green import subprocess
from nova.openstack.common import log as logging
from oslo.config import cfg
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('instances_path_share',
default="",
help='The name of a Windows share name mapped to the '
'"instances_path" dir and used by the resize feature '
'to copy files to the target host. If left blank, an '
'administrative share will be used, looking for the same '
'"instances_path" used locally'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('instances_path', 'nova.compute.manager')
class PathUtils(object):
def open(self, path, mode):
"""Wrapper on __builin__.open used to simplify unit testing."""
import __builtin__
return __builtin__.open(path, mode)
def exists(self, path):
return os.path.exists(path)
def makedirs(self, path):
os.makedirs(path)
def remove(self, path):
os.remove(path)
def rename(self, src, dest):
os.rename(src, dest)
def copyfile(self, src, dest):
self.copy(src, dest)
def copy(self, src, dest):
# With large files this is 2x-3x faster than shutil.copy(src, dest),
# especially when copying to a UNC target.
# shutil.copyfileobj(...) with a proper buffer is better than
# shutil.copy(...) but still 20% slower than a shell copy.
# It can be replaced with Win32 API calls to avoid the process
# spawning overhead.
if subprocess.call(['cmd.exe', '/C', 'copy', '/Y', src, dest]):
raise IOError(_('The file copy from %(src)s to %(dest)s failed'))
def rmtree(self, path):
shutil.rmtree(path)
def get_instances_dir(self, remote_server=None):
local_instance_path = os.path.normpath(CONF.instances_path)
if remote_server:
if CONF.hyperv.instances_path_share:
path = CONF.hyperv.instances_path_share
else:
# Use an administrative share
path = local_instance_path.replace(':', '$')
return '\\\\%(remote_server)s\\%(path)s' % locals()
else:
return local_instance_path
def _check_create_dir(self, path):
if not self.exists(path):
LOG.debug(_('Creating directory: %s') % path)
self.makedirs(path)
def _check_remove_dir(self, path):
if self.exists(path):
LOG.debug(_('Removing directory: %s') % path)
self.rmtree(path)
def _get_instances_sub_dir(self, dir_name, remote_server=None,
create_dir=True, remove_dir=False):
instances_path = self.get_instances_dir(remote_server)
path = os.path.join(instances_path, dir_name)
if remove_dir:
self._check_remove_dir(path)
if create_dir:
self._check_create_dir(path)
return path
def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
remove_dir=False):
dir_name = '%s_revert' % instance_name
return self._get_instances_sub_dir(dir_name, None, create_dir,
remove_dir)
def get_instance_dir(self, instance_name, remote_server=None,
create_dir=True, remove_dir=False):
return self._get_instances_sub_dir(instance_name, remote_server,
create_dir, remove_dir)
def get_vhd_path(self, instance_name):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.vhd')
def get_base_vhd_dir(self):
return self._get_instances_sub_dir('_base')
def get_export_dir(self, instance_name):
dir_name = os.path.join('export', instance_name)
return self._get_instances_sub_dir(dir_name, create_dir=True,
remove_dir=True)
|
apache-2.0
|
vovanbo/django-oscar
|
tests/unit/wishlist_tests.py
|
69
|
1388
|
from django.test import TestCase
from oscar.apps.wishlists.models import WishList
from oscar.core.compat import get_user_model
User = get_user_model()
class TestAWishlist(TestCase):
def test_can_generate_a_random_key(self):
key = WishList.random_key(6)
self.assertTrue(len(key) == 6)
class TestAPublicWishList(TestCase):
def setUp(self):
self.wishlist = WishList(visibility=WishList.PUBLIC)
def test_is_visible_to_anyone(self):
user = User()
self.assertTrue(self.wishlist.is_allowed_to_see(user))
class TestASharedWishList(TestCase):
def setUp(self):
self.wishlist = WishList(visibility=WishList.SHARED)
def test_is_visible_to_anyone(self):
user = User()
self.assertTrue(self.wishlist.is_allowed_to_see(user))
class TestAPrivateWishList(TestCase):
def setUp(self):
self.owner = User(id=1)
self.another_user = User(id=2)
self.wishlist = WishList(owner=self.owner)
def test_is_visible_only_to_its_owner(self):
self.assertTrue(self.wishlist.is_allowed_to_see(self.owner))
self.assertFalse(self.wishlist.is_allowed_to_see(self.another_user))
def test_can_only_be_edited_by_its_owner(self):
self.assertTrue(self.wishlist.is_allowed_to_edit(self.owner))
self.assertFalse(self.wishlist.is_allowed_to_edit(self.another_user))
|
bsd-3-clause
|
Milad1993/linux
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
1935
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
gpl-2.0
|
rafiqsaleh/VERCE
|
verce-hpc-pe/src/networkx/readwrite/json_graph/tests/test_serialize.py
|
35
|
1329
|
import json
from nose.tools import assert_equal, assert_raises, assert_not_equal,assert_true
import networkx as nx
from networkx.readwrite.json_graph import *
class TestAdjacency:
def test_graph(self):
G = nx.path_graph(4)
H = loads(dumps(G))
nx.is_isomorphic(G,H)
def test_graph_attributes(self):
G = nx.path_graph(4)
G.add_node(1,color='red')
G.add_edge(1,2,width=7)
G.graph['foo']='bar'
G.graph[1]='one'
H = loads(dumps(G))
assert_equal(H.graph['foo'],'bar')
assert_equal(H.graph[1],'one')
assert_equal(H.node[1]['color'],'red')
assert_equal(H[1][2]['width'],7)
try:
from StringIO import StringIO
except:
from io import StringIO
io = StringIO()
dump(G,io)
io.seek(0)
H=load(io)
assert_equal(H.graph['foo'],'bar')
assert_equal(H.graph[1],'one')
assert_equal(H.node[1]['color'],'red')
assert_equal(H[1][2]['width'],7)
def test_digraph(self):
G = nx.DiGraph()
H = loads(dumps(G))
assert_true(H.is_directed())
def test_multidigraph(self):
G = nx.MultiDiGraph()
H = loads(dumps(G))
assert_true(H.is_directed())
assert_true(H.is_multigraph())
|
mit
|
fieldOfView/Cura
|
plugins/USBPrinting/avr_isp/stk500v2.py
|
7
|
8325
|
"""
STK500v2 protocol implementation for programming AVR chips.
The STK500v2 protocol is used by the ArduinoMega2560 and a few other Arduino platforms to load firmware.
This is a python 3 conversion of the code created by David Braam for the Cura project.
"""
import struct
import sys
import time
from serial import Serial # type: ignore
from serial import SerialException
from serial import SerialTimeoutException
from UM.Logger import Logger
from . import ispBase, intelHex
class Stk500v2(ispBase.IspBase):
def __init__(self):
self.serial = None
self.seq = 1
self.last_addr = -1
self.progress_callback = None
def connect(self, port = "COM22", speed = 115200):
if self.serial is not None:
self.close()
try:
self.serial = Serial(str(port), speed, timeout=1, writeTimeout=10000)
except SerialException:
raise ispBase.IspError("Failed to open serial port")
except:
raise ispBase.IspError("Unexpected error while connecting to serial port:" + port + ":" + str(sys.exc_info()[0]))
self.seq = 1
#Reset the controller
for n in range(0, 2):
self.serial.setDTR(True)
time.sleep(0.1)
self.serial.setDTR(False)
time.sleep(0.1)
time.sleep(0.2)
self.serial.flushInput()
self.serial.flushOutput()
try:
if self.sendMessage([0x10, 0xc8, 0x64, 0x19, 0x20, 0x00, 0x53, 0x03, 0xac, 0x53, 0x00, 0x00]) != [0x10, 0x00]:
raise ispBase.IspError("Failed to enter programming mode")
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
if self.sendMessage([0xEE])[1] == 0x00:
self._has_checksum = True
else:
self._has_checksum = False
except ispBase.IspError:
self.close()
raise
self.serial.timeout = 5
def close(self):
if self.serial is not None:
self.serial.close()
self.serial = None
#Leave ISP does not reset the serial port, only resets the device, and returns the serial port after disconnecting it from the programming interface.
# This allows you to use the serial port without opening it again.
def leaveISP(self):
if self.serial is not None:
if self.sendMessage([0x11]) != [0x11, 0x00]:
raise ispBase.IspError("Failed to leave programming mode")
ret = self.serial
self.serial = None
return ret
return None
def isConnected(self):
return self.serial is not None
def hasChecksumFunction(self):
return self._has_checksum
def sendISP(self, data):
recv = self.sendMessage([0x1D, 4, 4, 0, data[0], data[1], data[2], data[3]])
return recv[2:6]
def writeFlash(self, flash_data):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
page_size = self.chip["pageSize"] * 2
flash_size = page_size * self.chip["pageCount"]
Logger.log("d", "Writing flash")
if flash_size > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
load_count = (len(flash_data) + page_size - 1) / page_size
for i in range(0, int(load_count)):
self.sendMessage([0x13, page_size >> 8, page_size & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flash_data[(i * page_size):(i * page_size + page_size)])
if self.progress_callback is not None:
if self._has_checksum:
self.progress_callback(i + 1, load_count)
else:
self.progress_callback(i + 1, load_count * 2)
def verifyFlash(self, flash_data):
if self._has_checksum:
self.sendMessage([0x06, 0x00, (len(flash_data) >> 17) & 0xFF, (len(flash_data) >> 9) & 0xFF, (len(flash_data) >> 1) & 0xFF])
res = self.sendMessage([0xEE])
checksum_recv = res[2] | (res[3] << 8)
checksum = 0
for d in flash_data:
checksum += d
checksum &= 0xFFFF
if hex(checksum) != hex(checksum_recv):
raise ispBase.IspError("Verify checksum mismatch: 0x%x != 0x%x" % (checksum & 0xFFFF, checksum_recv))
else:
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
flash_size = self.chip["pageSize"] * 2 * self.chip["pageCount"]
if flash_size > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
load_count = (len(flash_data) + 0xFF) / 0x100
for i in range(0, int(load_count)):
recv = self.sendMessage([0x14, 0x01, 0x00, 0x20])[2:0x102]
if self.progress_callback is not None:
self.progress_callback(load_count + i + 1, load_count * 2)
for j in range(0, 0x100):
if i * 0x100 + j < len(flash_data) and flash_data[i * 0x100 + j] != recv[j]:
raise ispBase.IspError("Verify error at: 0x%x" % (i * 0x100 + j))
def sendMessage(self, data):
message = struct.pack(">BBHB", 0x1B, self.seq, len(data), 0x0E)
for c in data:
message += struct.pack(">B", c)
checksum = 0
for c in message:
checksum ^= c
message += struct.pack(">B", checksum)
try:
self.serial.write(message)
self.serial.flush()
except SerialTimeoutException:
raise ispBase.IspError("Serial send timeout")
self.seq = (self.seq + 1) & 0xFF
return self.recvMessage()
def recvMessage(self):
state = "Start"
checksum = 0
while True:
s = self.serial.read()
if len(s) < 1:
raise ispBase.IspError("Timeout")
b = struct.unpack(">B", s)[0]
checksum ^= b
if state == "Start":
if b == 0x1B:
state = "GetSeq"
checksum = 0x1B
elif state == "GetSeq":
state = "MsgSize1"
elif state == "MsgSize1":
msg_size = b << 8
state = "MsgSize2"
elif state == "MsgSize2":
msg_size |= b
state = "Token"
elif state == "Token":
if b != 0x0E:
state = "Start"
else:
state = "Data"
data = []
elif state == "Data":
data.append(b)
if len(data) == msg_size:
state = "Checksum"
elif state == "Checksum":
if checksum != 0:
state = "Start"
else:
return data
def portList():
ret = []
import _winreg # type: ignore
key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"HARDWARE\\DEVICEMAP\\SERIALCOMM") #@UndefinedVariable
i=0
while True:
try:
values = _winreg.EnumValue(key, i) #@UndefinedVariable
except:
return ret
if "USBSER" in values[0]:
ret.append(values[1])
i+=1
return ret
def runProgrammer(port, filename):
""" Run an STK500v2 program on serial port 'port' and write 'filename' into flash. """
programmer = Stk500v2()
programmer.connect(port = port)
programmer.programChip(intelHex.readHex(filename))
programmer.close()
def main():
""" Entry point to call the stk500v2 programmer from the commandline. """
import threading
if sys.argv[1] == "AUTO":
Logger.log("d", "portList(): ", repr(portList()))
for port in portList():
threading.Thread(target=runProgrammer, args=(port,sys.argv[2])).start()
time.sleep(5)
else:
programmer = Stk500v2()
programmer.connect(port = sys.argv[1])
programmer.programChip(intelHex.readHex(sys.argv[2]))
sys.exit(1)
if __name__ == "__main__":
main()
|
agpl-3.0
|
hoatle/odoo
|
addons/l10n_co/wizard/__init__.py
|
313
|
1165
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) David Arnold (devCO).
# Author David Arnold (devCO), dar@devco.co
# Co-Authors Juan Pablo Aries (devCO), jpa@devco.co
# Hector Ivan Valencia Muñoz (TIX SAS)
# Nhomar Hernandez (Vauxoo)
# Humberto Ochoa (Vauxoo)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
agpl-3.0
|
crawford/kubernetes
|
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
62
|
29611
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
from charms.kubernetes.common import get_version
from charms.kubernetes.flagmanager import FlagManager
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.destroy('feature-gates')
kubelet_opts.destroy('experimental-nvidia-gpus')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
# cleanup old flagmanagers
FlagManager('kubelet').destroy_all()
FlagManager('kube-proxy').destroy_all()
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the kubelet service
- stop the kube-proxy service
- remove the 'kubernetes-worker.cni-plugins.installed' state
'''
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname())
service_stop('kubelet')
service_stop('kube-proxy')
remove_state('kubernetes-worker.cni-plugins.installed')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
archive = hookenv.resource_get('cni')
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
if (_systemctl_is_active('snap.kubelet.daemon')):
hookenv.status_set('active', 'Kubernetes worker running.')
# if kubelet is not running, we're waiting on something else to converge
elif (not _systemctl_is_active('snap.kubelet.daemon')):
hookenv.status_set('waiting', 'Waiting for kubelet to start.')
@when('certificates.available')
def send_data(tls):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
hookenv.unit_private_ip(),
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved', 'kube-control.dns.available',
'cni.available', 'kubernetes-worker.restart-needed')
def start_worker(kube_api, kube_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
# set --allow-privileged flag for kubelet
set_privileged()
create_config(random.choice(servers))
configure_worker_services(servers, dns, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
'''
# scrub and try to format an array from the configuration option
config = hookenv.config()
user_labels = _parse_labels(config.get('labels'))
# For diffing sake, iterate the previous label set
if config.previous('labels'):
previous_labels = _parse_labels(config.previous('labels'))
hookenv.log('previous labels: {}'.format(previous_labels))
else:
# this handles first time run if there is no previous labels config
previous_labels = _parse_labels("")
# Calculate label removal
for label in previous_labels:
if label not in user_labels:
hookenv.log('Deleting node label {}'.format(label))
try:
_apply_node_label(label, delete=True)
except CalledProcessError:
hookenv.log('Error removing node label {}'.format(label))
# if the label is in user labels we do nothing here, it will get set
# during the atomic update below.
# Atomically set a label
for label in user_labels:
_apply_node_label(label)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
key = layer_options.get('client_key_path')
cert = layer_options.get('client_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca, key, cert,
user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig('/root/.kube/config', server, ca, key, cert,
user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca, key, cert,
user='kubelet')
def configure_worker_services(api_servers, dns, cluster_cidr):
''' Add remaining flags for the worker services and configure snaps to use
them '''
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add('require-kubeconfig', 'true')
kubelet_opts.add('kubeconfig', kubeconfig_path)
kubelet_opts.add('network-plugin', 'cni')
kubelet_opts.add('logtostderr', 'true')
kubelet_opts.add('v', '0')
kubelet_opts.add('address', '0.0.0.0')
kubelet_opts.add('port', '10250')
kubelet_opts.add('cluster-dns', dns['sdn-ip'])
kubelet_opts.add('cluster-domain', dns['domain'])
kubelet_opts.add('anonymous-auth', 'false')
kubelet_opts.add('client-ca-file', ca_cert_path)
kubelet_opts.add('tls-cert-file', server_cert_path)
kubelet_opts.add('tls-private-key-file', server_key_path)
kube_proxy_opts = FlagManager('kube-proxy')
kube_proxy_opts.add('cluster-cidr', cluster_cidr)
kube_proxy_opts.add('kubeconfig', kubeconfig_path)
kube_proxy_opts.add('logtostderr', 'true')
kube_proxy_opts.add('v', '0')
kube_proxy_opts.add('master', random.choice(api_servers), strict=True)
cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ')
check_call(cmd)
cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ')
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
context='juju-context', cluster='juju-cluster'):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
'--client-key={2} --client-certificate={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress replication controller manifest
manifest = addon_path.format('ingress-replication-controller.yaml')
render('ingress-replication-controller.yaml', manifest, context)
hookenv.log('Creating the ingress replication controller.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if succesful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
flag = 'allow-privileged'
hookenv.log('Setting {}={}'.format(flag, privileged))
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add(flag, privileged)
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('cuda.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts.add('experimental-nvidia-gpus', '1')
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts.add('feature-gates', 'Accelerators=true')
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('kubernetes-worker.privileged')
@when_not('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
This handler fires when we're running in gpu mode, and then the operator
sets allow-privileged="false". Since we can no longer run privileged
containers, we need to disable gpu mode.
"""
hookenv.log('Disabling gpu mode')
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
kubelet_opts.destroy('experimental-nvidia-gpus')
else:
kubelet_opts.remove('feature-gates', 'Accelerators=true')
# Remove node labels
_apply_node_label('gpu', delete=True)
_apply_node_label('cuda', delete=True)
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
hostname = gethostname()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, hostname, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
check_call(split(cmd))
def _parse_labels(labels):
''' Parse labels from a key=value string separated by space.'''
label_array = labels.split(' ')
sanitized_labels = []
for item in label_array:
if '=' in item:
sanitized_labels.append(item)
else:
hookenv.log('Skipping malformed option: {}'.format(item))
return sanitized_labels
|
apache-2.0
|
kxepal/couchdb-python
|
couchdb/tools/load.py
|
6
|
3225
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Utility for loading a snapshot of a CouchDB database from a multipart MIME
file.
"""
from base64 import b64encode
from optparse import OptionParser
import sys
from couchdb import __version__ as VERSION
from couchdb import json
from couchdb.client import Database
from couchdb.multipart import read_multipart
def load_db(fileobj, dburl, username=None, password=None, ignore_errors=False):
db = Database(dburl)
if username is not None and password is not None:
db.resource.credentials = (username, password)
for headers, is_multipart, payload in read_multipart(fileobj):
docid = headers['content-id']
if is_multipart: # doc has attachments
for headers, _, payload in payload:
if 'content-id' not in headers:
doc = json.decode(payload)
doc['_attachments'] = {}
else:
doc['_attachments'][headers['content-id']] = {
'data': b64encode(payload),
'content_type': headers['content-type'],
'length': len(payload)
}
else: # no attachments, just the JSON
doc = json.decode(payload)
del doc['_rev']
print>>sys.stderr, 'Loading document %r' % docid
try:
db[docid] = doc
except Exception as e:
if not ignore_errors:
raise
print>>sys.stderr, 'Error: %s' % e
def main():
parser = OptionParser(usage='%prog [options] dburl', version=VERSION)
parser.add_option('--input', action='store', dest='input', metavar='FILE',
help='the name of the file to read from')
parser.add_option('--ignore-errors', action='store_true',
dest='ignore_errors',
help='whether to ignore errors in document creation '
'and continue with the remaining documents')
parser.add_option('--json-module', action='store', dest='json_module',
help='the JSON module to use ("simplejson", "cjson", '
'or "json" are supported)')
parser.add_option('-u', '--username', action='store', dest='username',
help='the username to use for authentication')
parser.add_option('-p', '--password', action='store', dest='password',
help='the password to use for authentication')
parser.set_defaults(input='-')
options, args = parser.parse_args()
if len(args) != 1:
return parser.error('incorrect number of arguments')
if options.input != '-':
fileobj = open(options.input, 'rb')
else:
fileobj = sys.stdin
if options.json_module:
json.use(options.json_module)
load_db(fileobj, args[0], username=options.username,
password=options.password, ignore_errors=options.ignore_errors)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
westinedu/wrgroups
|
django/middleware/http.py
|
154
|
1696
|
from django.core.exceptions import MiddlewareNotUsed
from django.utils.http import http_date, parse_http_date_safe
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None:
last_modified = parse_http_date_safe(response['Last-Modified'])
if last_modified is not None and last_modified <= if_modified_since:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
|
bsd-3-clause
|
da1z/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/lock.py
|
92
|
4921
|
# lock.py - simple advisory locking scheme for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import util, error
import errno, os, socket, time
import warnings
class lock(object):
'''An advisory lock held by one process to control access to a set
of files. Non-cooperating processes or incorrectly written scripts
can ignore Mercurial's locking scheme and stomp all over the
repository, so don't do that.
Typically used via localrepository.lock() to lock the repository
store (.hg/store/) or localrepository.wlock() to lock everything
else under .hg/.'''
# lock is symlink on platforms that support it, file on others.
# symlink is used because create of directory entry and contents
# are atomic even over nfs.
# old-style lock: symlink to pid
# new-style lock: symlink to hostname:pid
_host = None
def __init__(self, file, timeout=-1, releasefn=None, desc=None):
self.f = file
self.held = 0
self.timeout = timeout
self.releasefn = releasefn
self.desc = desc
self.postrelease = []
self.pid = os.getpid()
self.lock()
def __del__(self):
if self.held:
warnings.warn("use lock.release instead of del lock",
category=DeprecationWarning,
stacklevel=2)
# ensure the lock will be removed
# even if recursive locking did occur
self.held = 1
self.release()
def lock(self):
timeout = self.timeout
while True:
try:
self.trylock()
return 1
except error.LockHeld, inst:
if timeout != 0:
time.sleep(1)
if timeout > 0:
timeout -= 1
continue
raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
inst.locker)
def trylock(self):
if self.held:
self.held += 1
return
if lock._host is None:
lock._host = socket.gethostname()
lockname = '%s:%s' % (lock._host, self.pid)
while not self.held:
try:
util.makelock(lockname, self.f)
self.held = 1
except (OSError, IOError), why:
if why.errno == errno.EEXIST:
locker = self.testlock()
if locker is not None:
raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
locker)
else:
raise error.LockUnavailable(why.errno, why.strerror,
why.filename, self.desc)
def testlock(self):
"""return id of locker if lock is valid, else None.
If old-style lock, we cannot tell what machine locker is on.
with new-style lock, if locker is on this machine, we can
see if locker is alive. If locker is on this machine but
not alive, we can safely break lock.
The lock file is only deleted when None is returned.
"""
try:
locker = util.readlock(self.f)
except OSError, why:
if why.errno == errno.ENOENT:
return None
raise
try:
host, pid = locker.split(":", 1)
except ValueError:
return locker
if host != lock._host:
return locker
try:
pid = int(pid)
except ValueError:
return locker
if util.testpid(pid):
return locker
# if locker dead, break lock. must do this with another lock
# held, or can race and break valid lock.
try:
l = lock(self.f + '.break', timeout=0)
util.unlink(self.f)
l.release()
except error.LockError:
return locker
def release(self):
"""release the lock and execute callback function if any
If the lock has been acquired multiple times, the actual release is
delayed to the last release call."""
if self.held > 1:
self.held -= 1
elif self.held == 1:
self.held = 0
if os.getpid() != self.pid:
# we forked, and are not the parent
return
if self.releasefn:
self.releasefn()
try:
util.unlink(self.f)
except OSError:
pass
for callback in self.postrelease:
callback()
def release(*locks):
for lock in locks:
if lock is not None:
lock.release()
|
apache-2.0
|
ronfung/incubator-airflow
|
tests/contrib/hooks/test_gcp_dataflow_hook.py
|
34
|
1812
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
TASK_ID = 'test-python-dataflow'
PY_FILE = 'apache_beam.examples.wordcount'
PY_OPTIONS = ['-m']
OPTIONS = {
'project': 'test',
'staging_location': 'gs://test/staging'
}
BASE_STRING = 'airflow.contrib.hooks.gcp_api_base_hook.{}'
DATAFLOW_STRING = 'airflow.contrib.hooks.gcp_dataflow_hook.{}'
def mock_init(self, gcp_conn_id, delegate_to=None):
pass
class DataFlowHookTest(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleCloudBaseHook.__init__'),
new=mock_init):
self.dataflow_hook = DataFlowHook(gcp_conn_id='test')
@mock.patch(DATAFLOW_STRING.format('DataFlowHook._start_dataflow'))
def test_start_python_dataflow(self, internal_dataflow_mock):
self.dataflow_hook.start_python_dataflow(
task_id=TASK_ID, variables=OPTIONS,
dataflow=PY_FILE, py_options=PY_OPTIONS)
internal_dataflow_mock.assert_called_once_with(
TASK_ID, OPTIONS, PY_FILE, mock.ANY, ['python'] + PY_OPTIONS)
|
apache-2.0
|
vwvww/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py
|
499
|
1789
|
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write('sub/plain_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
|
mpl-2.0
|
rh-s/heat
|
contrib/rackspace/rackspace/tests/test_cloudnetworks.py
|
3
|
5895
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
from ..resources import cloudnetworks # noqa
try:
from pyrax.exceptions import NotFound # noqa
except ImportError:
from ..resources.cloudnetworks import NotFound # noqa
class FakeNetwork(object):
def __init__(self, client, label="test_network", cidr="172.16.0.0/24"):
self.client = client
self.label = label
self.cidr = cidr
self.id = str(uuid.uuid4())
def _is_deleted(self):
return (self.client and
self.id not in [nw.id for nw in self.client.networks])
def get(self):
if self._is_deleted():
raise NotFound("I am deleted")
def delete(self):
self.client._delete(self)
class FakeClient(object):
def __init__(self):
self.networks = []
def create(self, label=None, cidr=None):
nw = FakeNetwork(self, label=label, cidr=cidr)
self.networks.append(nw)
return nw
def get(self, nwid):
for nw in self.networks:
if nw.id == nwid:
return nw
raise NotFound("No network %s" % nwid)
def _delete(self, nw):
try:
self.networks.remove(nw)
except ValueError:
pass
@mock.patch.object(cloudnetworks.CloudNetwork, "cloud_networks")
class CloudNetworkTest(common.HeatTestCase):
_template = template_format.parse("""
heat_template_version: 2013-05-23
description: Test stack for Rackspace Cloud Networks
resources:
cnw:
type: Rackspace::Cloud::Network
properties:
label: test_network
cidr: 172.16.0.0/24
""")
def setUp(self):
super(CloudNetworkTest, self).setUp()
resource._register_class("Rackspace::Cloud::Network",
cloudnetworks.CloudNetwork)
def _parse_stack(self):
self.stack = utils.parse_stack(self._template,
stack_name=self.__class__.__name__)
def _setup_stack(self, mock_client, *args):
self.fake_cnw = FakeClient(*args)
mock_client.return_value = self.fake_cnw
self._parse_stack()
self.stack.create()
self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
self.stack.state)
res = self.stack['cnw']
self.assertEqual((res.CREATE, res.COMPLETE), res.state)
def test_attributes(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
template_resource = self._template['resources']['cnw']
expect_label = template_resource['properties']['label']
expect_cidr = template_resource['properties']['cidr']
self.assertEqual(expect_label, res.FnGetAtt('label'))
self.assertEqual(expect_cidr, res.FnGetAtt('cidr'))
def test_create_bad_cidr(self, mock_client):
prop = self._template['resources']['cnw']['properties']
prop['cidr'] = "bad cidr"
self._parse_stack()
exc = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("Invalid net cidr", six.text_type(exc))
# reset property
prop['cidr'] = "172.16.0.0/24"
def test_check(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
self.fake_cnw.networks = []
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertEqual((res.CHECK, res.FAILED), res.state)
self.assertIn('No network', str(exc))
def test_delete(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
res_id = res.FnGetRefId()
scheduler.TaskRunner(res.delete)()
self.assertEqual((res.DELETE, res.COMPLETE), res.state)
exc = self.assertRaises(NotFound, self.fake_cnw.get, res_id)
self.assertIn(res_id, six.text_type(exc))
def test_delete_in_use(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
fake_network = res.network()
fake_network.delete = mock.Mock()
fake_network.delete.side_effect = [cloudnetworks.NetworkInUse(), True]
fake_network.get = mock.Mock(side_effect=cloudnetworks.NotFound())
scheduler.TaskRunner(res.delete)()
self.assertEqual((res.DELETE, res.COMPLETE), res.state)
def test_delete_not_complete(self, mock_client):
self._setup_stack(mock_client)
res = self.stack['cnw']
fake_network = res.network()
fake_network.get = mock.Mock()
task = res.handle_delete()
self.assertFalse(res.check_delete_complete(task))
def test_delete_not_found(self, mock_client):
self._setup_stack(mock_client)
self.fake_cnw.networks = []
res = self.stack['cnw']
scheduler.TaskRunner(res.delete)()
self.assertEqual((res.DELETE, res.COMPLETE), res.state)
|
apache-2.0
|
calfonso/ansible
|
lib/ansible/plugins/action/bigip.py
|
16
|
6756
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.utils import load_provider
from ansible.plugins.action.normal import ActionModule as _ActionModule
try:
from library.module_utils.network.f5.common import f5_provider_spec
except:
from ansible.module_utils.network.f5.common import f5_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
elif self._play_context.connection == 'local':
provider = load_provider(f5_provider_spec, self._task.args)
transport = provider['transport'] or 'rest'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'bigip'
pc.remote_addr = provider.get('server', self._play_context.remote_addr)
pc.port = int(provider['server_port'] or self._play_context.port or 22)
pc.remote_user = provider.get('user', self._play_context.connection_user)
pc.password = provider.get('password', self._play_context.password)
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
self._task.args['provider'] = ActionModule.rest_implementation(provider, self._play_context)
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while '(config' in to_text(out, errors='surrogate_then_replace').strip():
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
@staticmethod
def rest_implementation(provider, play_context):
"""Provides a generic argument spec using Play context vars
This method will return a set of default values to use for connecting
to a remote BIG-IP in the event that you do not use either
* The environment fallback variables F5_USER, F5_PASSWORD, etc
* The "provider" spec
With this "spec" (for lack of a better name) Ansible will attempt
to fill in the provider arguments itself using the play context variables.
These variables are contained in the list of MAGIC_VARIABLE_MAPPING
found in the constants file
* https://github.com/ansible/ansible/blob/devel/lib/ansible/constants.py
Therefore, if you do not use the provider nor that environment args, this
method here will be populate the "provider" dict with with the necessary
F5 connection params, from the following host vars,
* remote_addr=('ansible_ssh_host', 'ansible_host'),
* remote_user=('ansible_ssh_user', 'ansible_user'),
* password=('ansible_ssh_pass', 'ansible_password'),
* port=('ansible_ssh_port', 'ansible_port'),
* timeout=('ansible_ssh_timeout', 'ansible_timeout'),
* private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
For example, this may leave your inventory looking like this
bigip2 ansible_host=1.2.3.4 ansible_port=10443 ansible_user=admin ansible_password=admin
:param provider:
:param play_context:
:return:
"""
provider['transport'] = 'rest'
if provider.get('server') is None:
provider['server'] = play_context.remote_addr
if provider.get('server_port') is None:
default_port = provider['server_port'] if provider['server_port'] else 443
provider['server_port'] = int(play_context.port or default_port)
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('user') is None:
provider['user'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
return provider
|
gpl-3.0
|
reaganhenke/8Bit-Campus
|
fightModule.py
|
1
|
14568
|
import pygame, random
WHITE = (255,255,255)
BLACK = (35,35,35)
BLUE = (50,50,255)
GREEN = (0,255,0)
GRAY = (200,200,200)
FPS = 60
PLAYERX = 100
PLAYERY= 250
ENEMYX= 250
ENEMYY= 100
class GuitarHero():
def __init__(self,screen):
HALF_WIDTH = screen.get_width()/2
lineDist = (HALF_WIDTH / 5)
yBuff = 30
self.letters = ["U", "I", "O", "P"]
self.fills = [WHITE,WHITE,WHITE,WHITE]
self.xpos = []
self.highY = 30
self.lowY = 300
self.speed = 2
for i in xrange(1,5):
self.xpos.append(HALF_WIDTH + lineDist*i)
self.notes = [(self.xpos[random.randrange(0,4)],self.highY),\
(self.xpos[random.randrange(0,4)],self.highY - 30),\
(self.xpos[random.randrange(0,4)],self.highY - 60),\
(self.xpos[random.randrange(0,4)],self.highY - 90),\
(self.xpos[random.randrange(0,4)],self.highY - 120),\
(self.xpos[random.randrange(0,4)],self.highY - 150)]
def update(self):
complete = False
for i in xrange(len(self.notes)):
(x,y) = self.notes[i]
if y <= self.lowY:
self.notes[i] = (x,y+self.speed)
complete = True
return complete
def draw(self,screen):
for i in xrange(4):
pygame.draw.line(screen, BLACK, (self.xpos[i], self.highY), (self.xpos[i],self.lowY))
box = pygame.Rect(0,0,30,30)
box.midtop = (self.xpos[i],self.lowY)
pygame.draw.rect(screen, self.fills[i], box)
fontObj = pygame.font.SysFont('couriernew', 25)
textSurfaceObj = fontObj.render(self.letters[i],False,BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.midtop = (self.xpos[i],self.lowY)
screen.blit(textSurfaceObj,textRectObj)
for (x,y) in self.notes:
if (y <= self.lowY) and (y >= self.highY):
pygame.draw.circle(screen, BLACK, (x,y), 10)
def moveToTarget((x,y),(tx,ty)):
if (x <= tx) or (y >= ty):
return (0,0)
else:
return (-3,3)
class Notes():
def __init__(self,(x,y),(targetX,targetY),number):
self.notes = [((x-5,y),True),((x,y-10),True),((x-20,y-20),True),((x,y-30),True),((x-20,y-40),True) ]
(self.originx, self.originy) = (x,y)
for i in xrange(len(self.notes)):
((xpos,ypos),status) = self.notes[i]
self.notes[i] = ((xpos,ypos),((xpos<=x) and (ypos>=y)))
self.target = (targetX,targetY)
self.damage = number
self.image = scale(pygame.image.load("note.gif"),3)
def update(self, player):
pulse = (pygame.time.get_ticks()//100) % 6
if pulse > 3:
pulse = 6 - pulse
complete = True
for i in xrange(len(self.notes)):
status = True
((x,y),display) = self.notes[i]
(dx,dy) = moveToTarget((x,y),self.target)
if (dx,dy) != (0,0):
if not display:
if ((x+dx <= self.originx) and (y+dy>=self.originy)):
display = True
self.notes[i] = ((x, y + dy), display)
if display:
if (dx,dy) == (0,0):
player.health -= self.damage
pygame.mixer.music.load("./music/hit.wav")
pygame.mixer.music.play()
status = False
self.notes[i] = ((x + dx, y + dy + pulse), status)
if (dx,dy) != (0,0):
complete = False
return complete
def drawNotes(self,screen):
for (note,status) in self.notes:
if status:
(x,y) = note
screen.blit(self.image,(x-5,y-25))
def scale(image,factor):
(width,height) = image.get_size()
return pygame.transform.scale(image, (int(float(width) * float(factor)),int(float(height) * float(factor))))
def drawDescription(screen,menuItems,(i,j)):
(SCREENWIDTH,SCREENHEIGHT) = screen.get_size()
disBufferX = 30
disHeight = 100
disBufferY = 10
disWidth = SCREENWIDTH - (2*disBufferX)
disY = SCREENHEIGHT - (disBufferY + disHeight)
displayBackground = pygame.Rect(disBufferX,disY,disWidth,disHeight)
menubar = pygame.image.load("menuBar.gif")
screen.blit(menubar,displayBackground)
# pygame.draw.rect(screen, WHITE, displayBackground)
textshift = disHeight/4
textLeft = disBufferX + 100
textRight = disBufferX +disWidth - 200
y = disY+textshift
for row in xrange(0,len(menuItems)):
x = textLeft
for col in xrange(0,len(menuItems[row])):
fontObj = pygame.font.SysFont('couriernew', 25)
textSurfaceObj = fontObj.render(menuItems[row][col],False,BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.midleft = (x, y)
screen.blit(textSurfaceObj,textRectObj)
if (row,col) == (i,j):
xbase = x - 15
pygame.draw.polygon(screen,BLACK,[(xbase,y - 5),\
(xbase + 5,y),\
(xbase,y + 5)])
x = textRight
y = disY+(3*textshift)
def loadItems(items):
menuList = [[],[]]
itemList = items.keys()
for i in xrange(4):
row = i//2
if i < len(itemList):
menuList[row].append(itemList[i])
return menuList
def fight(screen, player, enemy):
(SCREENWIDTH, SCREENHEIGHT) = screen.get_size()
HALF_WIDTH = int(SCREENWIDTH / 2)
HALF_HEIGHT = int(SCREENHEIGHT / 2)
mode = "ActiveMenu"
menuActions = [["Fight","Run"],["Item"]]
itemItems = loadItems(player.items)
(i,j) = (0,0)
clock = pygame.time.Clock()
done = False
enemyNotes = False
enemyNotesPos = (ENEMYX,ENEMYY)
fighting = False
pygame.mixer.music.stop()
while not done:
itemItems = loadItems(player.items)
if mode == "Item":
menuItems = itemItems
else:
menuItems = menuActions
if mode == "FightInit" :
fighting = True
guitarHero = GuitarHero(screen)
mode = "Fight"
if mode == "OppTurnInit":
(dx,dy) = enemy.noteOrigin
notes = Notes((ENEMYX+dx,ENEMYY+dy),(PLAYERX+dx,PLAYERY+dy),enemy.getAttackDamage())
enemyNotes = True
mode = "OppTurn"
if mode == "ActiveMenu" or mode == "Item" :
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
if j != 0:
pygame.mixer.music.load("./music/Select.wav")
pygame.mixer.music.play()
j = 0
elif event.key == pygame.K_RIGHT:
if len(menuItems[i]) > 1:
if j != 1:
pygame.mixer.music.load("./music/Select.wav")
pygame.mixer.music.play()
j = 1
elif event.key == pygame.K_UP:
if i != 0:
pygame.mixer.music.load("./music/Select.wav")
pygame.mixer.music.play()
i = 0
elif event.key == pygame.K_DOWN:
if len(menuItems[1]) > j:
if i != 1:
pygame.mixer.music.load("./music/Select.wav")
pygame.mixer.music.play()
i = 1
elif event.key == pygame.K_b:
if mode == "Item":
(i,j) = (0,0)
mode = "ActiveMenu"
elif event.key == pygame.K_RETURN:
if mode == "ActiveMenu":
if menuItems[i][j] == "Fight":
mode = "FightInit"
else:
mode = menuItems[i][j]
if mode == "Item":
if len(player.items)==0 :
mode = "ActiveMenu"
print "No items left"
(i,j) = (0,0)
elif mode == "Item":
heal = player.items.pop(menuItems[i][j])
if (player.health + heal) > player.maxHealth:
player.health = player.maxHealth
print "your health was maxed out"
else:
player.health += heal
print "you healed " + str(heal) + "points"
pygame.mixer.music.load("./music/heal.wav")
pygame.mixer.music.play()
pygame.time.wait(250)
mode = "OppTurnInit"
if mode == "Fight":
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_u:
guitarHero.fills[0] = GRAY
for (x,y) in guitarHero.notes:
if x == guitarHero.xpos[0] and (y < (guitarHero.lowY+5)) and (y > (guitarHero.lowY-5)):
guitarHero.notes.remove((x,y))
enemy.health -= 5
pygame.mixer.music.load("./music/note0.wav")
pygame.mixer.music.play()
elif event.key == pygame.K_i:
guitarHero.fills[1] = GRAY
for (x,y) in guitarHero.notes:
if x == guitarHero.xpos[1] and (y < (guitarHero.lowY+5)) and (y > (guitarHero.lowY-5)):
guitarHero.notes.remove((x,y))
enemy.health -= 5
pygame.mixer.music.load("./music/note1.wav")
pygame.mixer.music.play()
elif event.key == pygame.K_o:
guitarHero.fills[2] = GRAY
for (x,y) in guitarHero.notes:
if x == guitarHero.xpos[2] and (y < (guitarHero.lowY+5)) and (y > (guitarHero.lowY-5)):
guitarHero.notes.remove((x,y))
enemy.health -= 5
pygame.mixer.music.load("./music/note2.wav")
pygame.mixer.music.play()
elif event.key == pygame.K_p:
guitarHero.fills[3] = GRAY
for (x,y) in guitarHero.notes:
if x == guitarHero.xpos[3] and (y < (guitarHero.lowY+5)) and (y > (guitarHero.lowY-5)):
guitarHero.notes.remove((x,y))
enemy.health -= 5
pygame.mixer.music.load("./music/note3.wav")
pygame.mixer.music.play()
elif event.type == pygame.KEYUP:
if event.key == pygame.K_u:
guitarHero.fills[0] = WHITE
elif event.key == pygame.K_i:
guitarHero.fills[1] = WHITE
elif event.key == pygame.K_o:
guitarHero.fills[2] = WHITE
elif event.key == pygame.K_p:
guitarHero.fills[3] = WHITE
pulse = (pygame.time.get_ticks()//100) % 10
if pulse > 5:
pulse = 10 - pulse
screen.fill(BLACK)
screen.blit(scale(pygame.image.load("fightBackground.gif"),3.02),(0,0))
drawDescription(screen,menuItems,(i,j))
screen.blit(player.fightImage,(PLAYERX,PLAYERY-pulse))
healthRect = pygame.Rect(203,270,125,30)
fullhearts = scale(pygame.image.load("hearts.gif"),3)
(heartwidth,heartheight) = fullhearts.get_size()
cropped = pygame.Surface((heartwidth,heartheight))
cropped.fill(WHITE)
newWidth = (float(player.health)/player.maxHealth) * heartwidth
cropped.blit(fullhearts,(0,0),(0,0,newWidth,heartheight))
screen.blit(cropped,healthRect)
screen.blit(enemy.image,(ENEMYX,ENEMYY+pulse))
healthRect = pygame.Rect(56,113,125,30)
cropped = pygame.Surface((heartwidth,heartheight))
cropped.fill(WHITE)
newWidth = (float(enemy.health)/enemy.maxHealth) * heartwidth
cropped.blit(fullhearts,(0,0),(0,0,newWidth,heartheight))
screen.blit(cropped,healthRect)
if enemyNotes == True:
notes.drawNotes(screen)
enemyNotes = not notes.update(player)
if not enemyNotes:
mode = "ActiveMenu"
if fighting == True:
guitarHero.draw(screen)
fighting = guitarHero.update()
if not fighting:
mode = "OppTurnInit"
if mode == "Run":
return 1
done = True
if player.health <= 0:
pygame.mixer.music.load("./music/die.wav")
pygame.mixer.music.play()
pygame.time.wait(1000)
return 2
done = True
elif enemy.health <= 0:
pygame.mixer.music.load("./music/die.wav")
pygame.mixer.music.play()
pygame.time.wait(1000)
return 3
done = True
pygame.display.flip()
clock.tick(FPS)
|
mit
|
eltonkevani/tempest_el_env
|
tempest/api/compute/volumes/test_attach_volume.py
|
3
|
4471
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common.utils.linux.remote_client import RemoteClient
import tempest.config
from tempest.test import attr
class AttachVolumeTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
run_ssh = tempest.config.TempestConfig().compute.run_ssh
def __init__(self, *args, **kwargs):
super(AttachVolumeTestJSON, self).__init__(*args, **kwargs)
self.server = None
self.volume = None
self.attached = False
@classmethod
def setUpClass(cls):
super(AttachVolumeTestJSON, cls).setUpClass()
cls.device = cls.config.compute.volume_device_name
if not cls.config.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
def _detach(self, server_id, volume_id):
if self.attached:
self.servers_client.detach_volume(server_id, volume_id)
self.volumes_client.wait_for_volume_status(volume_id, 'available')
def _delete_volume(self):
if self.volume:
self.volumes_client.delete_volume(self.volume['id'])
self.volume = None
def _create_and_attach(self):
# Start a server and wait for it to become ready
admin_pass = self.image_ssh_password
resp, server = self.create_test_server(wait_until='ACTIVE',
adminPass=admin_pass)
self.server = server
# Record addresses so that we can ssh later
resp, server['addresses'] = \
self.servers_client.list_addresses(server['id'])
# Create a volume and wait for it to become ready
resp, volume = self.volumes_client.create_volume(1,
display_name='test')
self.volume = volume
self.addCleanup(self._delete_volume)
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
# Attach the volume to the server
self.servers_client.attach_volume(server['id'], volume['id'],
device='/dev/%s' % self.device)
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
self.attached = True
self.addCleanup(self._detach, server['id'], volume['id'])
@testtools.skipIf(not run_ssh, 'SSH required for this test')
@attr(type='gate')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
self._create_and_attach()
server = self.server
volume = self.volume
self.servers_client.stop(server['id'])
self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
self.servers_client.start(server['id'])
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
linux_client = RemoteClient(server,
self.image_ssh_user, server['adminPass'])
partitions = linux_client.get_partitions()
self.assertIn(self.device, partitions)
self._detach(server['id'], volume['id'])
self.attached = False
self.servers_client.stop(server['id'])
self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')
self.servers_client.start(server['id'])
self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')
linux_client = RemoteClient(server,
self.image_ssh_user, server['adminPass'])
partitions = linux_client.get_partitions()
self.assertNotIn(self.device, partitions)
class AttachVolumeTestXML(AttachVolumeTestJSON):
_interface = 'xml'
|
apache-2.0
|
theanalyst/cinder
|
cinder/db/migration.py
|
11
|
1271
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database setup and migration commands."""
from cinder import utils
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='cinder.db.sqlalchemy.migration')
def db_sync(version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version)
def db_version():
"""Display the current database version."""
return IMPL.db_version()
def db_initial_version():
"""The starting version for the database."""
return IMPL.db_initial_version()
|
apache-2.0
|
sensysnetworks/uClinux
|
user/python/Lib/plat-freebsd4/SOCKET.py
|
8
|
2557
|
# Generated by h2py from /usr/include/sys/socket.h
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SO_DEBUG = 0x0001
SO_ACCEPTCONN = 0x0002
SO_REUSEADDR = 0x0004
SO_KEEPALIVE = 0x0008
SO_DONTROUTE = 0x0010
SO_BROADCAST = 0x0020
SO_USELOOPBACK = 0x0040
SO_LINGER = 0x0080
SO_OOBINLINE = 0x0100
SO_REUSEPORT = 0x0200
SO_TIMESTAMP = 0x0400
SO_ACCEPTFILTER = 0x1000
SO_SNDBUF = 0x1001
SO_RCVBUF = 0x1002
SO_SNDLOWAT = 0x1003
SO_RCVLOWAT = 0x1004
SO_SNDTIMEO = 0x1005
SO_RCVTIMEO = 0x1006
SO_ERROR = 0x1007
SO_TYPE = 0x1008
SOL_SOCKET = 0xffff
AF_UNSPEC = 0
AF_LOCAL = 1
AF_UNIX = AF_LOCAL
AF_INET = 2
AF_IMPLINK = 3
AF_PUP = 4
AF_CHAOS = 5
AF_NS = 6
AF_ISO = 7
AF_OSI = AF_ISO
AF_ECMA = 8
AF_DATAKIT = 9
AF_CCITT = 10
AF_SNA = 11
AF_DECnet = 12
AF_DLI = 13
AF_LAT = 14
AF_HYLINK = 15
AF_APPLETALK = 16
AF_ROUTE = 17
AF_LINK = 18
pseudo_AF_XTP = 19
AF_COIP = 20
AF_CNT = 21
pseudo_AF_RTIP = 22
AF_IPX = 23
AF_SIP = 24
pseudo_AF_PIP = 25
AF_ISDN = 26
AF_E164 = AF_ISDN
pseudo_AF_KEY = 27
AF_INET6 = 28
AF_NATM = 29
AF_ATM = 30
pseudo_AF_HDRCMPLT = 31
AF_NETGRAPH = 32
AF_MAX = 33
SOCK_MAXADDRLEN = 255
_SS_MAXSIZE = 128
PF_UNSPEC = AF_UNSPEC
PF_LOCAL = AF_LOCAL
PF_UNIX = PF_LOCAL
PF_INET = AF_INET
PF_IMPLINK = AF_IMPLINK
PF_PUP = AF_PUP
PF_CHAOS = AF_CHAOS
PF_NS = AF_NS
PF_ISO = AF_ISO
PF_OSI = AF_ISO
PF_ECMA = AF_ECMA
PF_DATAKIT = AF_DATAKIT
PF_CCITT = AF_CCITT
PF_SNA = AF_SNA
PF_DECnet = AF_DECnet
PF_DLI = AF_DLI
PF_LAT = AF_LAT
PF_HYLINK = AF_HYLINK
PF_APPLETALK = AF_APPLETALK
PF_ROUTE = AF_ROUTE
PF_LINK = AF_LINK
PF_XTP = pseudo_AF_XTP
PF_COIP = AF_COIP
PF_CNT = AF_CNT
PF_SIP = AF_SIP
PF_IPX = AF_IPX
PF_RTIP = pseudo_AF_RTIP
PF_PIP = pseudo_AF_PIP
PF_ISDN = AF_ISDN
PF_KEY = pseudo_AF_KEY
PF_INET6 = AF_INET6
PF_NATM = AF_NATM
PF_ATM = AF_ATM
PF_NETGRAPH = AF_NETGRAPH
PF_MAX = AF_MAX
NET_MAXID = AF_MAX
NET_RT_DUMP = 1
NET_RT_FLAGS = 2
NET_RT_IFLIST = 3
NET_RT_MAXID = 4
SOMAXCONN = 128
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_DONTROUTE = 0x4
MSG_EOR = 0x8
MSG_TRUNC = 0x10
MSG_CTRUNC = 0x20
MSG_WAITALL = 0x40
MSG_DONTWAIT = 0x80
MSG_EOF = 0x100
MSG_COMPAT = 0x8000
CMGROUP_MAX = 16
SCM_RIGHTS = 0x01
SCM_TIMESTAMP = 0x02
SCM_CREDS = 0x03
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
# Included from sys/cdefs.h
def __P(protos): return protos
def __STRING(x): return #x
def __XSTRING(x): return __STRING(x)
def __P(protos): return ()
def __STRING(x): return "x"
def __RCSID(s): return __IDSTRING(rcsid,s)
def __RCSID_SOURCE(s): return __IDSTRING(rcsid_source,s)
def __COPYRIGHT(s): return __IDSTRING(copyright,s)
|
gpl-2.0
|
dendisuhubdy/tensorflow
|
tensorflow/python/ops/losses/losses.py
|
61
|
1102
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection by default.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.python.ops.losses.losses_impl import *
from tensorflow.python.ops.losses.util import *
# pylint: enable=wildcard-import
|
apache-2.0
|
cristian69/KernotekV3
|
venv/lib/python2.7/site-packages/flask/testsuite/ext.py
|
563
|
5156
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.ext
~~~~~~~~~~~~~~~~~~~
Tests the extension import thing.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
try:
from imp import reload as reload_module
except ImportError:
reload_module = reload
from flask.testsuite import FlaskTestCase
from flask._compat import PY2
class ExtImportHookTestCase(FlaskTestCase):
def setup(self):
# we clear this out for various reasons. The most important one is
# that a real flaskext could be in there which would disable our
# fake package. Secondly we want to make sure that the flaskext
# import hook does not break on reloading.
for entry, value in list(sys.modules.items()):
if (entry.startswith('flask.ext.') or
entry.startswith('flask_') or
entry.startswith('flaskext.') or
entry == 'flaskext') and value is not None:
sys.modules.pop(entry, None)
from flask import ext
reload_module(ext)
# reloading must not add more hooks
import_hooks = 0
for item in sys.meta_path:
cls = type(item)
if cls.__module__ == 'flask.exthook' and \
cls.__name__ == 'ExtensionImporter':
import_hooks += 1
self.assert_equal(import_hooks, 1)
def teardown(self):
from flask import ext
for key in ext.__dict__:
self.assert_not_in('.', key)
def test_flaskext_new_simple_import_normal(self):
from flask.ext.newext_simple import ext_id
self.assert_equal(ext_id, 'newext_simple')
def test_flaskext_new_simple_import_module(self):
from flask.ext import newext_simple
self.assert_equal(newext_simple.ext_id, 'newext_simple')
self.assert_equal(newext_simple.__name__, 'flask_newext_simple')
def test_flaskext_new_package_import_normal(self):
from flask.ext.newext_package import ext_id
self.assert_equal(ext_id, 'newext_package')
def test_flaskext_new_package_import_module(self):
from flask.ext import newext_package
self.assert_equal(newext_package.ext_id, 'newext_package')
self.assert_equal(newext_package.__name__, 'flask_newext_package')
def test_flaskext_new_package_import_submodule_function(self):
from flask.ext.newext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_new_package_import_submodule(self):
from flask.ext.newext_package import submodule
self.assert_equal(submodule.__name__, 'flask_newext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_simple_import_normal(self):
from flask.ext.oldext_simple import ext_id
self.assert_equal(ext_id, 'oldext_simple')
def test_flaskext_old_simple_import_module(self):
from flask.ext import oldext_simple
self.assert_equal(oldext_simple.ext_id, 'oldext_simple')
self.assert_equal(oldext_simple.__name__, 'flaskext.oldext_simple')
def test_flaskext_old_package_import_normal(self):
from flask.ext.oldext_package import ext_id
self.assert_equal(ext_id, 'oldext_package')
def test_flaskext_old_package_import_module(self):
from flask.ext import oldext_package
self.assert_equal(oldext_package.ext_id, 'oldext_package')
self.assert_equal(oldext_package.__name__, 'flaskext.oldext_package')
def test_flaskext_old_package_import_submodule(self):
from flask.ext.oldext_package import submodule
self.assert_equal(submodule.__name__, 'flaskext.oldext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_package_import_submodule_function(self):
from flask.ext.oldext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_broken_package_no_module_caching(self):
for x in range(2):
with self.assert_raises(ImportError):
import flask.ext.broken
def test_no_error_swallowing(self):
try:
import flask.ext.broken
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
self.assert_true(exc_type is ImportError)
if PY2:
message = 'No module named missing_module'
else:
message = 'No module named \'missing_module\''
self.assert_equal(str(exc_value), message)
self.assert_true(tb.tb_frame.f_globals is globals())
# reraise() adds a second frame so we need to skip that one too.
# On PY3 we even have another one :(
next = tb.tb_next.tb_next
if not PY2:
next = next.tb_next
self.assert_in('flask_broken/__init__.py', next.tb_frame.f_code.co_filename)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtImportHookTestCase))
return suite
|
gpl-3.0
|
wujuguang/mako
|
test/test_cache.py
|
5
|
18997
|
import time
from mako import lookup
from mako.cache import CacheImpl
from mako.cache import register_plugin
from mako.compat import py27
from mako.ext import beaker_cache
from mako.lookup import TemplateLookup
from mako.template import Template
from test import eq_
from test import module_base
from test import SkipTest
from test import TemplateTest
from test.util import result_lines
if beaker_cache.has_beaker:
import beaker
class SimpleBackend(object):
def __init__(self):
self.cache = {}
def get(self, key, **kw):
return self.cache[key]
def invalidate(self, key, **kw):
self.cache.pop(key, None)
def put(self, key, value, **kw):
self.cache[key] = value
def get_or_create(self, key, creation_function, **kw):
if key in self.cache:
return self.cache[key]
else:
self.cache[key] = value = creation_function()
return value
class MockCacheImpl(CacheImpl):
realcacheimpl = None
def __init__(self, cache):
self.cache = cache
def set_backend(self, cache, backend):
if backend == "simple":
self.realcacheimpl = SimpleBackend()
else:
self.realcacheimpl = cache._load_impl(backend)
def _setup_kwargs(self, kw):
self.kwargs = kw.copy()
self.kwargs.pop("regions", None)
self.kwargs.pop("manager", None)
if self.kwargs.get("region") != "myregion":
self.kwargs.pop("region", None)
def get_or_create(self, key, creation_function, **kw):
self.key = key
self._setup_kwargs(kw)
return self.realcacheimpl.get_or_create(key, creation_function, **kw)
def put(self, key, value, **kw):
self.key = key
self._setup_kwargs(kw)
self.realcacheimpl.put(key, value, **kw)
def get(self, key, **kw):
self.key = key
self._setup_kwargs(kw)
return self.realcacheimpl.get(key, **kw)
def invalidate(self, key, **kw):
self.key = key
self._setup_kwargs(kw)
self.realcacheimpl.invalidate(key, **kw)
register_plugin("mock", __name__, "MockCacheImpl")
class CacheTest(TemplateTest):
real_backend = "simple"
def _install_mock_cache(self, template, implname=None):
template.cache_impl = "mock"
impl = template.cache.impl
impl.set_backend(template.cache, implname or self.real_backend)
return impl
def test_def(self):
t = Template(
"""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
"""
)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
assert m.kwargs == {}
def test_cache_enable(self):
t = Template(
"""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
<% callcount[0] += 1 %>
</%def>
${foo()}
${foo()}
callcount: ${callcount}
""",
cache_enabled=False,
)
self._install_mock_cache(t)
eq_(t.render().strip(), "callcount: [2]")
def test_nested_def(self):
t = Template(
"""
<%!
callcount = [0]
%>
<%def name="foo()">
<%def name="bar()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${bar()}
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
"""
)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
assert m.kwargs == {}
def test_page(self):
t = Template(
"""
<%!
callcount = [0]
%>
<%page cached="True"/>
this is foo
<%
callcount[0] += 1
%>
callcount: ${callcount}
"""
)
m = self._install_mock_cache(t)
t.render()
t.render()
assert result_lines(t.render()) == ["this is foo", "callcount: [1]"]
assert m.kwargs == {}
def test_dynamic_key_with_context(self):
t = Template(
"""
<%block name="foo" cached="True" cache_key="${mykey}">
some block
</%block>
"""
)
m = self._install_mock_cache(t)
t.render(mykey="thekey")
t.render(mykey="thekey")
eq_(result_lines(t.render(mykey="thekey")), ["some block"])
eq_(m.key, "thekey")
t = Template(
"""
<%def name="foo()" cached="True" cache_key="${mykey}">
some def
</%def>
${foo()}
"""
)
m = self._install_mock_cache(t)
t.render(mykey="thekey")
t.render(mykey="thekey")
eq_(result_lines(t.render(mykey="thekey")), ["some def"])
eq_(m.key, "thekey")
def test_dynamic_key_with_funcargs(self):
t = Template(
"""
<%def name="foo(num=5)" cached="True" cache_key="foo_${str(num)}">
hi
</%def>
${foo()}
"""
)
m = self._install_mock_cache(t)
t.render()
t.render()
assert result_lines(t.render()) == ["hi"]
assert m.key == "foo_5"
t = Template(
"""
<%def name="foo(*args, **kwargs)" cached="True"
cache_key="foo_${kwargs['bar']}">
hi
</%def>
${foo(1, 2, bar='lala')}
"""
)
m = self._install_mock_cache(t)
t.render()
assert result_lines(t.render()) == ["hi"]
assert m.key == "foo_lala"
t = Template(
"""
<%page args="bar='hi'" cache_key="foo_${bar}" cached="True"/>
hi
"""
)
m = self._install_mock_cache(t)
t.render()
assert result_lines(t.render()) == ["hi"]
assert m.key == "foo_hi"
def test_dynamic_key_with_imports(self):
lookup = TemplateLookup()
lookup.put_string(
"foo.html",
"""
<%!
callcount = [0]
%>
<%namespace file="ns.html" import="*"/>
<%page cached="True" cache_key="${foo}"/>
this is foo
<%
callcount[0] += 1
%>
callcount: ${callcount}
""",
)
lookup.put_string("ns.html", """""")
t = lookup.get_template("foo.html")
m = self._install_mock_cache(t)
t.render(foo="somekey")
t.render(foo="somekey")
assert result_lines(t.render(foo="somekey")) == [
"this is foo",
"callcount: [1]",
]
assert m.kwargs == {}
def test_fileargs_implicit(self):
l = lookup.TemplateLookup(module_directory=module_base)
l.put_string(
"test",
"""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True" cache_type='dbm'>
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""",
)
m = self._install_mock_cache(l.get_template("test"))
assert result_lines(l.get_template("test").render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
eq_(m.kwargs, {"type": "dbm"})
def test_fileargs_deftag(self):
t = Template(
"""
<%%!
callcount = [0]
%%>
<%%def name="foo()" cached="True" cache_type='file' cache_dir='%s'>
this is foo
<%%
callcount[0] += 1
%%>
</%%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
"""
% module_base
)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
assert m.kwargs == {"type": "file", "dir": module_base}
def test_fileargs_pagetag(self):
t = Template(
"""
<%%page cache_dir='%s' cache_type='dbm'/>
<%%!
callcount = [0]
%%>
<%%def name="foo()" cached="True">
this is foo
<%%
callcount[0] += 1
%%>
</%%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
"""
% module_base
)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
eq_(m.kwargs, {"dir": module_base, "type": "dbm"})
def test_args_complete(self):
t = Template(
"""
<%%def name="foo()" cached="True" cache_timeout="30" cache_dir="%s"
cache_type="file" cache_key='somekey'>
this is foo
</%%def>
${foo()}
"""
% module_base
)
m = self._install_mock_cache(t)
t.render()
eq_(m.kwargs, {"dir": module_base, "type": "file", "timeout": 30})
t2 = Template(
"""
<%%page cached="True" cache_timeout="30" cache_dir="%s"
cache_type="file" cache_key='somekey'/>
hi
"""
% module_base
)
m = self._install_mock_cache(t2)
t2.render()
eq_(m.kwargs, {"dir": module_base, "type": "file", "timeout": 30})
def test_fileargs_lookup(self):
l = lookup.TemplateLookup(cache_dir=module_base, cache_type="file")
l.put_string(
"test",
"""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""",
)
t = l.get_template("test")
m = self._install_mock_cache(t)
assert result_lines(l.get_template("test").render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
eq_(m.kwargs, {"dir": module_base, "type": "file"})
def test_buffered(self):
t = Template(
"""
<%!
def a(text):
return "this is a " + text.strip()
%>
${foo()}
${foo()}
<%def name="foo()" cached="True" buffered="True">
this is a test
</%def>
""",
buffer_filters=["a"],
)
self._install_mock_cache(t)
eq_(
result_lines(t.render()),
["this is a this is a test", "this is a this is a test"],
)
def test_load_from_expired(self):
"""test that the cache callable can be called safely after the
originating template has completed rendering.
"""
t = Template(
"""
${foo()}
<%def name="foo()" cached="True" cache_timeout="1">
foo
</%def>
"""
)
self._install_mock_cache(t)
x1 = t.render()
time.sleep(1.2)
x2 = t.render()
assert x1.strip() == x2.strip() == "foo"
def test_namespace_access(self):
t = Template(
"""
<%def name="foo(x)" cached="True">
foo: ${x}
</%def>
<%
foo(1)
foo(2)
local.cache.invalidate_def('foo')
foo(3)
foo(4)
%>
"""
)
self._install_mock_cache(t)
eq_(result_lines(t.render()), ["foo: 1", "foo: 1", "foo: 3", "foo: 3"])
def test_lookup(self):
l = TemplateLookup(cache_impl="mock")
l.put_string(
"x",
"""
<%page cached="True" />
${y}
""",
)
t = l.get_template("x")
self._install_mock_cache(t)
assert result_lines(t.render(y=5)) == ["5"]
assert result_lines(t.render(y=7)) == ["5"]
assert isinstance(t.cache.impl, MockCacheImpl)
def test_invalidate(self):
t = Template(
"""
<%%def name="foo()" cached="True">
foo: ${x}
</%%def>
<%%def name="bar()" cached="True" cache_type='dbm' cache_dir='%s'>
bar: ${x}
</%%def>
${foo()} ${bar()}
"""
% module_base
)
self._install_mock_cache(t)
assert result_lines(t.render(x=1)) == ["foo: 1", "bar: 1"]
assert result_lines(t.render(x=2)) == ["foo: 1", "bar: 1"]
t.cache.invalidate_def("foo")
assert result_lines(t.render(x=3)) == ["foo: 3", "bar: 1"]
t.cache.invalidate_def("bar")
assert result_lines(t.render(x=4)) == ["foo: 3", "bar: 4"]
t = Template(
"""
<%%page cached="True" cache_type="dbm" cache_dir="%s"/>
page: ${x}
"""
% module_base
)
self._install_mock_cache(t)
assert result_lines(t.render(x=1)) == ["page: 1"]
assert result_lines(t.render(x=2)) == ["page: 1"]
t.cache.invalidate_body()
assert result_lines(t.render(x=3)) == ["page: 3"]
assert result_lines(t.render(x=4)) == ["page: 3"]
def test_custom_args_def(self):
t = Template(
"""
<%def name="foo()" cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob">
</%def>
${foo()}
"""
)
m = self._install_mock_cache(t, "simple")
t.render()
eq_(m.kwargs, {"region": "myregion", "timeout": 50, "foo": "foob"})
def test_custom_args_block(self):
t = Template(
"""
<%block name="foo" cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob">
</%block>
"""
)
m = self._install_mock_cache(t, "simple")
t.render()
eq_(m.kwargs, {"region": "myregion", "timeout": 50, "foo": "foob"})
def test_custom_args_page(self):
t = Template(
"""
<%page cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob"/>
"""
)
m = self._install_mock_cache(t, "simple")
t.render()
eq_(m.kwargs, {"region": "myregion", "timeout": 50, "foo": "foob"})
def test_pass_context(self):
t = Template(
"""
<%page cached="True"/>
"""
)
m = self._install_mock_cache(t)
t.render()
assert "context" not in m.kwargs
m.pass_context = True
t.render(x="bar")
assert "context" in m.kwargs
assert m.kwargs["context"].get("x") == "bar"
class RealBackendTest(object):
def test_cache_uses_current_context(self):
t = Template(
"""
${foo()}
<%def name="foo()" cached="True" cache_timeout="1">
foo: ${x}
</%def>
"""
)
self._install_mock_cache(t)
x1 = t.render(x=1)
time.sleep(1.2)
x2 = t.render(x=2)
eq_(x1.strip(), "foo: 1")
eq_(x2.strip(), "foo: 2")
def test_region(self):
t = Template(
"""
<%block name="foo" cached="True" cache_region="short">
short term ${x}
</%block>
<%block name="bar" cached="True" cache_region="long">
long term ${x}
</%block>
<%block name="lala">
none ${x}
</%block>
"""
)
self._install_mock_cache(t)
r1 = result_lines(t.render(x=5))
time.sleep(1.2)
r2 = result_lines(t.render(x=6))
r3 = result_lines(t.render(x=7))
eq_(r1, ["short term 5", "long term 5", "none 5"])
eq_(r2, ["short term 6", "long term 5", "none 6"])
eq_(r3, ["short term 6", "long term 5", "none 7"])
class BeakerCacheTest(RealBackendTest, CacheTest):
real_backend = "beaker"
def setUp(self):
if not beaker_cache.has_beaker:
raise SkipTest("Beaker is required for these tests.")
if not py27:
raise SkipTest("newer beakers not working w/ py26")
def _install_mock_cache(self, template, implname=None):
template.cache_args["manager"] = self._regions()
impl = super(BeakerCacheTest, self)._install_mock_cache(
template, implname
)
return impl
def _regions(self):
return beaker.cache.CacheManager(
cache_regions={
"short": {"expire": 1, "type": "memory"},
"long": {"expire": 60, "type": "memory"},
}
)
class DogpileCacheTest(RealBackendTest, CacheTest):
real_backend = "dogpile.cache"
def setUp(self):
try:
import dogpile.cache # noqa
except ImportError:
raise SkipTest("dogpile.cache is required to run these tests")
def _install_mock_cache(self, template, implname=None):
template.cache_args["regions"] = self._regions()
template.cache_args.setdefault("region", "short")
impl = super(DogpileCacheTest, self)._install_mock_cache(
template, implname
)
return impl
def _regions(self):
from dogpile.cache import make_region
my_regions = {
"short": make_region().configure(
"dogpile.cache.memory", expiration_time=1
),
"long": make_region().configure(
"dogpile.cache.memory", expiration_time=60
),
"myregion": make_region().configure(
"dogpile.cache.memory", expiration_time=60
),
}
return my_regions
|
mit
|
HybridF5/tempest
|
tempest/api/orchestration/base.py
|
3
|
6372
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import yaml
from tempest.common.utils import data_utils
from tempest import config
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
class BaseOrchestrationTest(tempest.test.BaseTestCase):
"""Base test case class for all Orchestration API tests."""
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BaseOrchestrationTest, cls).skip_checks()
if not CONF.service_available.heat:
raise cls.skipException("Heat support is required")
@classmethod
def setup_credentials(cls):
super(BaseOrchestrationTest, cls).setup_credentials()
stack_owner_role = CONF.orchestration.stack_owner_role
cls.os = cls.get_client_manager(roles=[stack_owner_role])
@classmethod
def setup_clients(cls):
super(BaseOrchestrationTest, cls).setup_clients()
cls.orchestration_client = cls.os.orchestration_client
cls.client = cls.orchestration_client
cls.servers_client = cls.os.servers_client
cls.keypairs_client = cls.os.keypairs_client
cls.networks_client = cls.os.networks_client
cls.volumes_client = cls.os.volumes_client
cls.images_v2_client = cls.os.image_client_v2
if CONF.volume_feature_enabled.api_v2:
cls.volumes_client = cls.os.volumes_v2_client
else:
cls.volumes_client = cls.os.volumes_client
@classmethod
def resource_setup(cls):
super(BaseOrchestrationTest, cls).resource_setup()
cls.build_timeout = CONF.orchestration.build_timeout
cls.build_interval = CONF.orchestration.build_interval
cls.stacks = []
cls.keypairs = []
cls.images = []
@classmethod
def create_stack(cls, stack_name, template_data, parameters=None,
environment=None, files=None):
if parameters is None:
parameters = {}
body = cls.client.create_stack(
stack_name,
template=template_data,
parameters=parameters,
environment=environment,
files=files)
stack_id = body.response['location'].split('/')[-1]
stack_identifier = '%s/%s' % (stack_name, stack_id)
cls.stacks.append(stack_identifier)
return stack_identifier
@classmethod
def _clear_stacks(cls):
for stack_identifier in cls.stacks:
try:
cls.client.delete_stack(stack_identifier)
except lib_exc.NotFound:
pass
for stack_identifier in cls.stacks:
try:
cls.client.wait_for_stack_status(
stack_identifier, 'DELETE_COMPLETE')
except lib_exc.NotFound:
pass
@classmethod
def _create_keypair(cls, name_start='keypair-heat-'):
kp_name = data_utils.rand_name(name_start)
body = cls.keypairs_client.create_keypair(name=kp_name)['keypair']
cls.keypairs.append(kp_name)
return body
@classmethod
def _clear_keypairs(cls):
for kp_name in cls.keypairs:
try:
cls.keypairs_client.delete_keypair(kp_name)
except Exception:
pass
@classmethod
def _create_image(cls, name_start='image-heat-', container_format='bare',
disk_format='iso'):
image_name = data_utils.rand_name(name_start)
body = cls.images_v2_client.create_image(image_name,
container_format,
disk_format)
image_id = body['id']
cls.images.append(image_id)
return body
@classmethod
def _clear_images(cls):
for image_id in cls.images:
try:
cls.images_v2_client.delete_image(image_id)
except lib_exc.NotFound:
pass
@classmethod
def read_template(cls, name, ext='yaml'):
loc = ["stacks", "templates", "%s.%s" % (name, ext)]
fullpath = os.path.join(os.path.dirname(__file__), *loc)
with open(fullpath, "r") as f:
content = f.read()
return content
@classmethod
def load_template(cls, name, ext='yaml'):
loc = ["stacks", "templates", "%s.%s" % (name, ext)]
fullpath = os.path.join(os.path.dirname(__file__), *loc)
with open(fullpath, "r") as f:
return yaml.safe_load(f)
@classmethod
def resource_cleanup(cls):
cls._clear_stacks()
cls._clear_keypairs()
cls._clear_images()
super(BaseOrchestrationTest, cls).resource_cleanup()
@staticmethod
def stack_output(stack, output_key):
"""Return a stack output value for a given key."""
return next((o['output_value'] for o in stack['outputs']
if o['output_key'] == output_key), None)
def assert_fields_in_dict(self, obj, *fields):
for field in fields:
self.assertIn(field, obj)
def list_resources(self, stack_identifier):
"""Get a dict mapping of resource names to types."""
resources = self.client.list_resources(stack_identifier)['resources']
self.assertIsInstance(resources, list)
for res in resources:
self.assert_fields_in_dict(res, 'logical_resource_id',
'resource_type', 'resource_status',
'updated_time')
return dict((r['resource_name'], r['resource_type'])
for r in resources)
def get_stack_output(self, stack_identifier, output_key):
body = self.client.show_stack(stack_identifier)['stack']
return self.stack_output(body, output_key)
|
apache-2.0
|
egenerat/flight-manager
|
app/common/colorer.py
|
2
|
3749
|
# -*- coding: utf-8 -*-
import logging
import platform
# now we patch Python code to add color support to logging.StreamHandler
def add_coloring_to_emit_windows(fn):
# add methods we need to the class
def _out_handle(self):
import ctypes
return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
out_handle = property(_out_handle)
def _set_color(self, code):
import ctypes
# Constants from the Windows API
self.STD_OUTPUT_HANDLE = -11
hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)
setattr(logging.StreamHandler, '_set_color', _set_color)
def new(*args):
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
levelno = args[1].levelno
if levelno >= 50:
color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
elif levelno >= 40:
color = FOREGROUND_RED | FOREGROUND_INTENSITY
elif levelno >= 30:
color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
elif levelno >= 20:
color = FOREGROUND_GREEN
elif levelno >= 10:
color = FOREGROUND_MAGENTA
else:
color = FOREGROUND_WHITE
args[0]._set_color(color)
ret = fn(*args)
args[0]._set_color(FOREGROUND_WHITE)
# print "after"
return ret
return new
def add_coloring_to_emit_ansi(fn):
# add methods we need to the class
def new(*args):
levelno = args[1].levelno
if levelno >= 50:
color = '\x1b[31m' # red
elif levelno >= 40:
color = '\x1b[31m' # red
elif levelno >= 30:
color = '\x1b[33m' # yellow
elif levelno >= 20:
color = '\x1b[32m' # green
elif levelno >= 10:
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
args[1].msg = color + args[1].msg + '\x1b[0m' # normal
# print "after"
return fn(*args)
return new
if platform.system() == 'Windows':
# Windows does not support ANSI escapes and we are using API calls to set the console color
logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
else:
# all non-Windows platforms are supporting ANSI escapes so we use them
logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
# log = logging.getLogger()
# log.addFilter(log_filter())
# //hdlr = logging.StreamHandler()
# //hdlr.setFormatter(formatter())
|
mit
|
enriquesanchezb/practica_utad_2016
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/compat.py
|
1039
|
1469
|
# -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
apache-2.0
|
henridwyer/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
xcoder123/KodiLatviesiem
|
plugin.video.dzivaistv/js2py/translators/__init__.py
|
44
|
1757
|
# The MIT License
#
# Copyright 2014, 2015 Piotr Dabkowski
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
__all__ = ['PyJsParser', 'Node', 'WrappingNode', 'node_to_dict', 'parse', 'translate_js', 'translate', 'syntax_tree_translate',
'DEFAULT_HEADER']
__author__ = 'Piotr Dabkowski'
__version__ = '2.2.0'
from .pyjsparser import PyJsParser, Node, WrappingNode, node_to_dict
from .translator import translate_js, trasnlate, syntax_tree_translate, DEFAULT_HEADER
def parse(javascript_code):
"""Returns syntax tree of javascript_code.
Syntax tree has the same structure as syntax tree produced by esprima.js
Same as PyJsParser().parse For your convenience :) """
p = PyJsParser()
return p.parse(javascript_code)
|
gpl-3.0
|
jakevdp/megaman
|
megaman/geometry/tests/test_affinity.py
|
4
|
3661
|
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
from __future__ import division ## removes integer division
import os
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_raises
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.sparse import csr_matrix
from scipy import io
from megaman.geometry import (compute_adjacency_matrix,
compute_affinity_matrix, Affinity,
affinity_methods)
random_state = np.random.RandomState(36)
n_sample = 10
d = 2
X = random_state.randn(n_sample, d)
D = squareform(pdist(X))
D[D > 1/d] = 0
TEST_DATA = os.path.join(os.path.dirname(__file__),
'testmegaman_laplacian_rad0_2_lam1_5_n200.mat')
def test_affinity_methods():
assert_equal(set(affinity_methods()), {'auto', 'gaussian'})
def test_affinity_input_validation():
X = np.random.rand(20, 3)
D = compute_adjacency_matrix(X, radius=1)
assert_raises(ValueError, compute_affinity_matrix, X)
def test_affinity_sparse_vs_dense():
"""
Test that A_sparse is the same as A_dense for a small A matrix
"""
rad = 2.
n_samples = 6
X = np.arange(n_samples)
X = X[ :,np.newaxis]
X = np.concatenate((X,np.zeros((n_samples,1),dtype=float)),axis=1)
X = np.asarray( X, order="C" )
test_dist_matrix = compute_adjacency_matrix( X, method = 'auto', radius = rad )
A_dense = compute_affinity_matrix(test_dist_matrix.toarray(), method = 'auto',
radius = rad, symmetrize = False )
A_sparse = compute_affinity_matrix(csr_matrix(test_dist_matrix),
method = 'auto', radius = rad, symmetrize = False)
A_spdense = A_sparse.toarray()
A_spdense[ A_spdense == 0 ] = 1.
assert_allclose(A_dense, A_spdense)
def test_affinity_vs_matlab():
"""Test that the affinity calculation matches the matlab result"""
matlab = io.loadmat(TEST_DATA)
D = np.sqrt(matlab['S']) # matlab outputs squared distances
A_matlab = matlab['A']
radius = matlab['rad'][0]
# check dense affinity computation
A_dense = compute_affinity_matrix(D, radius=radius)
assert_allclose(A_dense, A_matlab)
# check sparse affinity computation
A_sparse = compute_affinity_matrix(csr_matrix(D), radius=radius)
assert_allclose(A_sparse.toarray(), A_matlab)
def test_affinity():
rand = np.random.RandomState(42)
X = np.random.rand(20, 3)
D = cdist(X, X)
def check_affinity(adjacency_radius, affinity_radius, symmetrize):
adj = compute_adjacency_matrix(X, radius=adjacency_radius)
aff = compute_affinity_matrix(adj, radius=affinity_radius,
symmetrize=True)
A = np.exp(-(D / affinity_radius) ** 2)
A[D > adjacency_radius] = 0
assert_allclose(aff.toarray(), A)
for adjacency_radius in [0.5, 1.0, 5.0]:
for affinity_radius in [0.1, 0.5, 1.0]:
for symmetrize in [True, False]:
yield (check_affinity, adjacency_radius,
affinity_radius, symmetrize)
def test_custom_affinity():
class CustomAffinity(Affinity):
name = "custom"
def affinity_matrix(self, adjacency_matrix):
return np.exp(-abs(adjacency_matrix.toarray()))
rand = np.random.RandomState(42)
X = rand.rand(10, 2)
D = compute_adjacency_matrix(X, radius=10)
A = compute_affinity_matrix(D, method='custom', radius=1)
assert_allclose(A, np.exp(-abs(D.toarray())))
Affinity._remove_from_registry("custom")
|
bsd-2-clause
|
alon/servo
|
components/script/dom/bindings/codegen/ply/ply/lex.py
|
344
|
40739
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
|
mpl-2.0
|
tomjelinek/pcs
|
pcs_test/tier0/lib/test_stonith_agent.py
|
3
|
7009
|
from unittest import mock, TestCase
from lxml import etree
from pcs_test.tools.assertions import assert_report_item_list_equal
from pcs.common.reports import ReportItemSeverity as severity
from pcs.common.reports import codes as report_codes
from pcs.lib import resource_agent as lib_ra
from pcs.lib.external import CommandRunner
class ValidateParameters(TestCase):
def setUp(self):
self.agent = lib_ra.StonithAgent(
mock.MagicMock(spec_set=CommandRunner), "fence_dummy"
)
self.metadata = etree.XML(
"""
<resource-agent>
<parameters>
<parameter name="test_param" required="0">
<longdesc>Long description</longdesc>
<shortdesc>short description</shortdesc>
<content type="string" default="default_value" />
</parameter>
<parameter name="required_param" required="1">
<content type="boolean" />
</parameter>
<parameter name="action">
<content type="string" default="reboot" />
<shortdesc>Fencing action</shortdesc>
</parameter>
</parameters>
</resource-agent>
"""
)
patcher = mock.patch.object(lib_ra.StonithAgent, "_get_metadata")
self.addCleanup(patcher.stop)
self.get_metadata = patcher.start()
self.get_metadata.return_value = self.metadata
patcher_fenced = mock.patch.object(
lib_ra.FencedMetadata, "_get_metadata"
)
self.addCleanup(patcher_fenced.stop)
self.get_fenced_metadata = patcher_fenced.start()
self.get_fenced_metadata.return_value = etree.XML(
"""
<resource-agent>
<parameters />
</resource-agent>
"""
)
self.report_error = (
severity.ERROR,
report_codes.DEPRECATED_OPTION,
{
"option_name": "action",
"option_type": "stonith",
"replaced_by": ["pcmk_off_action", "pcmk_reboot_action"],
},
report_codes.FORCE_OPTIONS,
)
self.report_warning = (
severity.WARNING,
report_codes.DEPRECATED_OPTION,
{
"option_name": "action",
"option_type": "stonith",
"replaced_by": ["pcmk_off_action", "pcmk_reboot_action"],
},
None,
)
class ValidateParametersCreate(ValidateParameters):
def test_action_is_deprecated(self):
assert_report_item_list_equal(
self.agent.validate_parameters_create(
{
"action": "reboot",
"required_param": "value",
}
),
[
self.report_error,
],
)
def test_action_is_deprecated_forced(self):
assert_report_item_list_equal(
self.agent.validate_parameters_create(
{
"action": "reboot",
"required_param": "value",
},
force=True,
),
[
self.report_warning,
],
)
def test_action_not_reported_deprecated_when_empty(self):
assert_report_item_list_equal(
self.agent.validate_parameters_create(
{
"action": "",
"required_param": "value",
}
),
[],
)
class ValidateParametersUpdate(ValidateParameters):
def test_action_is_deprecated(self):
assert_report_item_list_equal(
self.agent.validate_parameters_update(
{
"required_param": "value",
},
{
"action": "reboot",
},
),
[
self.report_error,
],
)
def test_action_not_reported_when_not_updated(self):
assert_report_item_list_equal(
self.agent.validate_parameters_update(
{
"required_param": "value",
"action": "reboot",
},
{
"required_param": "value2",
},
),
[],
)
def test_action_is_deprecated_when_set_already(self):
assert_report_item_list_equal(
self.agent.validate_parameters_update(
{
"required_param": "value",
"action": "off",
},
{
"action": "reboot",
},
),
[
self.report_error,
],
)
def test_action_is_deprecated_forced(self):
assert_report_item_list_equal(
self.agent.validate_parameters_update(
{
"required_param": "value",
},
{
"action": "reboot",
},
force=True,
),
[
self.report_warning,
],
)
def test_action_not_reported_deprecated_when_empty(self):
assert_report_item_list_equal(
self.agent.validate_parameters_update(
{
"required_param": "value",
"action": "reboot",
},
{
"action": "",
},
),
[],
)
@mock.patch.object(lib_ra.StonithAgent, "get_actions")
class StonithAgentMetadataGetCibDefaultActions(TestCase):
fixture_actions = [
{"name": "custom1", "timeout": "40s"},
{"name": "custom2", "interval": "25s", "timeout": "60s"},
{"name": "meta-data"},
{"name": "monitor", "interval": "10s", "timeout": "30s"},
{"name": "start", "interval": "40s"},
{"name": "status", "interval": "15s", "timeout": "20s"},
{"name": "validate-all"},
]
def setUp(self):
self.agent = lib_ra.StonithAgent(
mock.MagicMock(spec_set=CommandRunner), "fence_dummy"
)
def test_select_only_actions_for_cib(self, get_actions):
get_actions.return_value = self.fixture_actions
self.assertEqual(
[{"name": "monitor", "interval": "10s", "timeout": "30s"}],
self.agent.get_cib_default_actions(),
)
def test_select_only_necessary_actions_for_cib(self, get_actions):
get_actions.return_value = self.fixture_actions
self.assertEqual(
[{"name": "monitor", "interval": "10s", "timeout": "30s"}],
self.agent.get_cib_default_actions(necessary_only=True),
)
|
gpl-2.0
|
tjth/lotterycoin
|
qa/rpc-tests/replace-by-fee.py
|
24
|
22010
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
COIN = 100000000
MAX_REPLACEMENT_LIMIT = 100
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def txToHex(tx):
return binascii.hexlify(tx.serialize()).decode('utf-8')
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
binascii.hexlify(tx2.serialize()).decode('utf-8')
signed_tx = node.signrawtransaction(binascii.hexlify(tx2.serialize()).decode('utf-8'))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-relaypriority=0", "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print "Running test simple doublespend..."
self.test_simple_doublespend()
print "Running test doublespend chain..."
self.test_doublespend_chain()
print "Running test doublespend tree..."
self.test_doublespend_tree()
print "Running test replacement feeperkb..."
self.test_replacement_feeperkb()
print "Running test spends of conflicting outputs..."
self.test_spends_of_conflicting_outputs()
print "Running test new unconfirmed inputs..."
self.test_new_unconfirmed_inputs()
print "Running test too many replacements..."
self.test_too_many_replacements()
print "Running test opt-in..."
self.test_opt_in()
print "Running test prioritised transactions..."
self.test_prioritised_transactions()
print "Passed\n"
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = 0.0001*COIN
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = 0.0001*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], 1.2*COIN)
utxo2 = make_utxo(self.nodes[0], 3.0*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(1.1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1.0*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], 1.1*COIN)
unconfirmed_utxo = make_utxo(self.nodes[0], 0.1*COIN, False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1.0*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = 0.0001*COIN
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print tx1b_txid
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(0.9*COIN, CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(0.9*COIN, CScript([b'c'])), CTxOut(0.9*COIN, CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(0.5*COIN, CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(0.5*COIN, CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(0.001*COIN, CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], 1.1*COIN)
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(1.01*COIN, CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
mit
|
ThreatConnect-Inc/tcex
|
tcex/sessions/external_session.py
|
2
|
12578
|
"""ThreatConnect Requests Session"""
# standard library
import logging
import time
from typing import Callable, Optional
# third-party
import urllib3
from requests import Response, Session, adapters, exceptions
from requests.adapters import DEFAULT_POOLBLOCK, DEFAULT_POOLSIZE, DEFAULT_RETRIES
from urllib3.util.retry import Retry
from ..utils import Utils
from .rate_limit_handler import RateLimitHandler
# disable ssl warning message
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def default_too_many_requests_handler(response: Response) -> float:
"""Implement 429 response handling that uses the Retry-After header.
Will return the value in Retry-After. See: https://tools.ietf.org/html/rfc6585#page-3.
Assumptions:
- Response has a Retry-After header.
Args:
response: The 429 response.
Returns:
The number of seconds to wait before sending the next request, from the Retry-After header.
"""
utils = Utils()
retry_after = response.headers.get('Retry-After', 0)
try:
seconds = (
float(utils.datetime.format_datetime(float(retry_after), date_format='%s'))
- time.time()
)
except RuntimeError:
# retry_after must be in seconds
seconds = retry_after
return float(seconds)
class CustomAdapter(adapters.HTTPAdapter):
"""Custom Adapter to properly handle retries."""
def __init__(
self,
rate_limit_handler: Optional[RateLimitHandler] = None,
pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE,
max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK,
):
"""Initialize CustomAdapter.
Args:
rate_limit_handler: RateLimitHandler responsible for throttling.
pool_connections: passed to super
pool_maxsize: passed to super
max_retries: passed to super
pool_block: passed to super
"""
super().__init__(pool_connections, pool_maxsize, max_retries, pool_block)
self._rate_limit_handler = rate_limit_handler
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Send PreparedRequest object. Returns Response object."""
if self.rate_limit_handler:
self.rate_limit_handler.pre_send(request)
try:
response = super().send(request, stream, timeout, verify, cert, proxies)
except exceptions.RetryError:
# store current retries configuration
max_retries = self.max_retries
# temporarily disable retries and make one last request
self.max_retries = Retry(0, read=False)
# make request with max_retries turned off
response = super().send(request, stream, timeout, verify, cert, proxies)
# reset retries configuration
self.max_retries = max_retries
if self.rate_limit_handler:
self.rate_limit_handler.post_send(response)
return response
@property
def rate_limit_handler(self) -> RateLimitHandler:
"""Get the RateLimitHandler."""
return self._rate_limit_handler
@rate_limit_handler.setter
def rate_limit_handler(self, rate_limit_handler: RateLimitHandler) -> None:
"""Set the RateLimitHandler."""
self._rate_limit_handler = rate_limit_handler
class ExternalSession(Session):
"""ThreatConnect REST API Requests Session for external requests
Args:
base_url (Optional[str] = None): The base URL for all requests.
logger (Optional[object] = None): An instance of Logger.
"""
__attrs__ = [
'adapters',
'auth',
'cert',
'cookies',
'headers',
'hooks',
'max_redirects',
'proxies',
'params',
'stream',
'verify',
'trust_env',
# custom attrs
'_base_url',
'_mask_headers',
'_mask_patterns',
'log',
'utils',
]
def __init__(self, base_url: Optional[str] = None, logger: Optional[object] = None):
"""Initialize the Class properties."""
super().__init__()
self._base_url: str = base_url
self.log: object = logger or logging.getLogger('session')
self._custom_adapter: Optional[CustomAdapter] = None
self.utils: object = Utils()
# properties
self._log_curl: bool = False
self._mask_body = False
self._mask_headers = True
self._mask_patterns = None
self._rate_limit_handler = RateLimitHandler()
self._too_many_requests_handler = None
# Add default Retry
self.retry()
@property
def base_url(self) -> str:
"""Return the base url."""
return self._base_url
@base_url.setter
def base_url(self, url):
"""Set base_url."""
self._base_url = url.strip('/')
@property
def log_curl(self) -> bool:
"""Return whether or not requests will be logged as a curl command."""
return self._log_curl
@log_curl.setter
def log_curl(self, log_curl: bool):
"""Enable or disable logging curl commands."""
self._log_curl = log_curl
@property
def mask_body(self) -> bool:
"""Return property"""
return self._mask_body
@mask_body.setter
def mask_body(self, mask_bool: bool):
"""Set property"""
self._mask_body = mask_bool
@property
def mask_headers(self) -> bool:
"""Return property"""
return self._mask_headers
@mask_headers.setter
def mask_headers(self, mask_bool: bool):
"""Set property"""
self._mask_headers = mask_bool
@property
def mask_patterns(self) -> list:
"""Return property"""
return self._mask_patterns
@mask_patterns.setter
def mask_patterns(self, patterns: list):
"""Set property"""
self._mask_patterns = patterns
@property
def too_many_requests_handler(self) -> Callable[[Response], float]:
"""Get the too_many_requests_handler.
The too_many_requests_handler is responsible for determining how long to sleep (in seconds)
on a 429 response. The default returns the value in the `Retry-After` header.
"""
if not self._too_many_requests_handler:
self._too_many_requests_handler = default_too_many_requests_handler
return self._too_many_requests_handler
@too_many_requests_handler.setter
def too_many_requests_handler(self, too_many_requests_handler: Callable[[Response], float]):
"""Set the too_many_requests_handler.
The too_many_requests_handler is responsible for determining how long to sleep (in seconds)
on a 429 response. The default returns the value in the `Retry-After` header.
Args:
too_many_requests_handler: callable that returns the number of seconds to wait on a
429 response.
"""
self._too_many_requests_handler = too_many_requests_handler
@property
def rate_limit_handler(self) -> RateLimitHandler:
"""Return the RateLimitHandler.
The RateLimitHandler is responsible for throttling request frequency. The default
implementation uses X-RateLimit-Remaining and X-RateLimit-Reset headers.
"""
return self._rate_limit_handler
@rate_limit_handler.setter
def rate_limit_handler(self, rate_limit_handler: RateLimitHandler):
"""Set the RateLimitHandler.
The RateLimitHandler is responsible for throttling request frequency. The default
implementation uses X-RateLimit-Remaining and X-RateLimit-Reset headers.
Args:
rate_limit_handler: the RateLimitHandler object to use.
"""
self._rate_limit_handler = rate_limit_handler
if self._custom_adapter:
self._custom_adapter.rate_limit_handler = rate_limit_handler
def request( # pylint: disable=arguments-differ
self, method: str, url: str, **kwargs
) -> object:
"""Override request method disabling verify on token renewal if disabled on session.
Args:
method (str): The HTTP method
url (str): The URL or path for the request.
Returns:
object: The requests Response object .
"""
if self.base_url is not None and not url.startswith('https'):
url = f'{self.base_url}{url}'
# this kwargs value is used to signal 429 handling that this is a retry, but the super
# method doesn't expect it so it needs to be removed.
tc_is_retry = kwargs.pop('tc_is_retry', False)
response: Response = super().request(method, url, **kwargs)
if response.status_code == 429 and not tc_is_retry:
too_many_requests_handler = self.too_many_requests_handler
time.sleep(too_many_requests_handler(response))
kwargs['tc_is_retry'] = True
return self.request(method, url, **kwargs)
# APP-79 - adding logging of request as curl commands
if not response.ok or self.log_curl:
try:
self.log.debug(
self.utils.requests_to_curl(
response.request,
mask_body=self.mask_body,
mask_headers=self.mask_headers,
mask_patterns=self.mask_patterns,
proxies=self.proxies,
verify=self.verify,
)
)
except Exception: # nosec
pass # logging curl command is best effort
self.log.debug(
f'feature=external-session, request-url={response.request.url}, '
f'status_code={response.status_code}, elapsed={response.elapsed}'
)
return response
def rate_limit_config(
self,
limit_remaining_header: str = 'X-RateLimit-Remaining',
limit_reset_header: str = 'X-RateLimit-Reset',
remaining_threshold: int = 0,
):
"""Configure rate-limiting.
Configures the RateLimitHandler to use the given headers and remaining requests threshold.
Args:
limit_remaining_header: The header containing the number of requests remaining.
limit_reset_header: The header that specifies when the rate limit period will reset.
remaining_threshold: When the value in the limit_remaining_header is this value or
lower, sleep until the time from the limit_reset_header.
"""
self.rate_limit_handler.limit_remaining_header = limit_remaining_header
self.rate_limit_handler.limit_reset_header = limit_reset_header
self.rate_limit_handler.remaining_threshold = remaining_threshold
def retry(
self,
retries: Optional[int] = 3,
backoff_factor: Optional[float] = 0.3,
status_forcelist: Optional[list] = None,
**kwargs,
):
"""Add retry to Requests Session
https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry
Args:
retries (Optional[int] = 3): The number of retry attempts.
backoff_factor (Optional[float] = 0.3): The backoff factor for retries.
status_forcelist (Optional[list] = [500, 502, 504]): A list of status code to retry on.
urls (list, kwargs): An optional URL to apply the retry. If not provided the retry
applies to all request with "https://".
"""
retry_object: object = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist or [500, 502, 504],
)
urls = kwargs.get('urls') or ['https://']
if self._custom_adapter:
self._custom_adapter.max_retries = retry_object
else:
self._custom_adapter = CustomAdapter(
rate_limit_handler=self.rate_limit_handler, max_retries=retry_object
)
# mount the custom adapter
for url in urls:
self.log.info(
f'feature=external-session, action=applying-retry, retries={retries}, '
f'backoff-factor={backoff_factor}, status-forcelist={status_forcelist}, url={url}'
)
self.mount(url, self._custom_adapter)
|
apache-2.0
|
weimingtom/python-for-android
|
python3-alpha/python3-src/Lib/encodings/cp1252.py
|
272
|
13511
|
""" Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1252',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
'\ufffe' # 0x8D -> UNDEFINED
'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
'\ufffe' # 0x9D -> UNDEFINED
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
GraemeFulton/job-search
|
docutils-0.12/docutils/writers/latex2e/__init__.py
|
84
|
124479
|
# .. coding: utf-8
# $Id: __init__.py 7745 2014-02-28 14:15:59Z milde $
# Author: Engelbert Gruber, Günter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import sys
import os
import time
import re
import string
import urllib
try:
import roman
except ImportError:
import docutils.utils.roman as roman
from docutils import frontend, nodes, languages, writers, utils, io
from docutils.utils.error_reporting import SafeString
from docutils.transforms import writer_aux
from docutils.utils.math import pick_math_environment, unichar2tex
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(__file__)
default_preamble = '\n'.join([r'% PDF Standard Fonts',
r'\usepackage{mathptmx} % Times',
r'\usepackage[scaled=.90]{helvet}',
r'\usepackage{courier}'])
settings_spec = (
'LaTeX-Specific Options',
None,
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Footnotes with numbers/symbols by Docutils. (default)',
['--docutils-footnotes'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Alias for --docutils-footnotes (deprecated)',
['--use-latex-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for footnote text (deprecated)',
['--figure-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. ',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
' A style is referenced with \\usepackage if extension is '
'".sty" or omitted and with \\input else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file[,file,...]>',
'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of LaTeX packages/stylesheets. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output *.tex file. ',
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "."',
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': ['.']}),
('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default) ',
['--use-latex-toc'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers). ',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default) ',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract. ",
['--use-latex-abstract'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "false" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Additional options to the "hyperref" package '
'(default: "").',
['--hyperref-options'], {'default': ''}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'This is the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.'
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use the specified environment for literal-blocks. '
'Default is quoting of whitespace and special chars.',
['--literal-block-env'],
{'default': ''}),
('When possibile, use verbatim for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "borderless". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines', 'borderless'],
'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': None, }),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': None, }),
),)
settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
}
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
head_parts = ('head_prefix', 'requirements', 'latex_preamble',
'stylesheet', 'fallbacks', 'pdfsetup',
'title', 'subtitle', 'titledata')
visitor_attributes = head_parts + ('body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
def get_transforms(self):
return writers.Writer.get_transforms(self) + [
# Convert specific admonitions to generic one
writer_aux.Admonitions,
# TODO: footnote collection transform
]
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
template_file = open(self.document.settings.template, 'rb')
except IOError:
template_file = open(os.path.join(self.default_template_path,
self.document.settings.template), 'rb')
template = string.Template(unicode(template_file.read(), 'utf-8'))
template_file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# TeX (babel) language names:
# ! not all of these are supported by Docutils!
#
# based on LyX' languages file with adaptions to `BCP 47`_
# (http://www.rfc-editor.org/rfc/bcp/bcp47.txt) and
# http://www.tug.org/TUGboat/Articles/tb29-3/tb93miklavec.pdf
# * the key without subtags is the default
# * case is ignored
# cf. http://docutils.sourceforge.net/docs/howto/i18n.html
# http://www.w3.org/International/articles/language-tags/
# and http://www.iana.org/assignments/language-subtag-registry
language_codes = {
# code TeX/Babel-name comment
'af': 'afrikaans',
'ar': 'arabic',
# 'be': 'belarusian',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
# 'cop': 'coptic',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'de': 'ngerman', # new spelling (de_1996)
'de-1901': 'german', # old spelling
'de-AT': 'naustrian',
'de-AT-1901': 'austrian',
'dsb': 'lowersorbian',
'el': 'greek', # monotonic (el-monoton)
'el-polyton': 'polutonikogreek',
'en': 'english', # TeX' default language
'en-AU': 'australian',
'en-CA': 'canadian',
'en-GB': 'british',
'en-NZ': 'newzealand',
'en-US': 'american',
'eo': 'esperanto',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
# 'fa': 'farsi',
'fi': 'finnish',
'fr': 'french',
'fr-CA': 'canadien',
'ga': 'irish', # Irish Gaelic
# 'grc': # Ancient Greek
'grc-ibycus': 'ibycus', # Ibycus encoding
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hsb': 'uppersorbian',
'hu': 'magyar',
'ia': 'interlingua',
'id': 'bahasai', # Bahasa (Indonesian)
'is': 'icelandic',
'it': 'italian',
'ja': 'japanese',
'kk': 'kazakh',
'la': 'latin',
'lt': 'lithuanian',
'lv': 'latvian',
'mn': 'mongolian', # Mongolian, Cyrillic script (mn-cyrl)
'ms': 'bahasam', # Bahasa (Malay)
'nb': 'norsk', # Norwegian Bokmal
'nl': 'dutch',
'nn': 'nynorsk', # Norwegian Nynorsk
'no': 'norsk', # Norwegian (Bokmal)
'pl': 'polish',
'pt': 'portuges',
'pt-BR': 'brazil',
'ro': 'romanian',
'ru': 'russian',
'se': 'samin', # North Sami
'sh-Cyrl': 'serbianc', # Serbo-Croatian, Cyrillic script
'sh-Latn': 'serbian', # Serbo-Croatian, Latin script see also 'hr'
'sk': 'slovak',
'sl': 'slovene',
'sq': 'albanian',
'sr': 'serbianc', # Serbian, Cyrillic script (contributed)
'sr-Latn': 'serbian', # Serbian, Latin script
'sv': 'swedish',
# 'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'vi': 'vietnam',
# zh-Latn: Chinese Pinyin
}
# normalize (downcase) keys
language_codes = dict([(k.lower(), v) for (k,v) in language_codes.items()])
warn_msg = 'Language "%s" not supported by LaTeX (babel)'
# "Active characters" are shortcuts that start a LaTeX macro and may need
# escaping for literals use. Characters that prevent literal use (e.g.
# starting accent macros like "a -> ä) will be deactivated if one of the
# defining languages is used in the document.
# Special cases:
# ~ (tilde) -- used in estonian, basque, galician, and old versions of
# spanish -- cannot be deactivated as it denotes a no-break space macro,
# " (straight quote) -- used in albanian, austrian, basque
# brazil, bulgarian, catalan, czech, danish, dutch, estonian,
# finnish, galician, german, icelandic, italian, latin, naustrian,
# ngerman, norsk, nynorsk, polish, portuges, russian, serbian, slovak,
# slovene, spanish, swedish, ukrainian, and uppersorbian --
# is escaped as ``\textquotedbl``.
active_chars = {# TeX/Babel-name: active characters to deactivate
# 'breton': ':;!?' # ensure whitespace
# 'esperanto': '^',
# 'estonian': '~"`',
# 'french': ':;!?' # ensure whitespace
'galician': '.<>', # also '~"'
# 'magyar': '`', # for special hyphenation cases
'spanish': '.<>', # old versions also '~'
# 'turkish': ':!=' # ensure whitespace
}
def __init__(self, language_code, reporter=None):
self.reporter = reporter
self.language = self.language_name(language_code)
self.otherlanguages = {}
def __call__(self):
"""Return the babel call with correct options and settings"""
languages = sorted(self.otherlanguages.keys())
languages.append(self.language or 'english')
self.setup = [r'\usepackage[%s]{babel}' % ','.join(languages)]
# Deactivate "active characters"
shorthands = []
for c in ''.join([self.active_chars.get(l, '') for l in languages]):
if c not in shorthands:
shorthands.append(c)
if shorthands:
self.setup.append(r'\AtBeginDocument{\shorthandoff{%s}}'
% ''.join(shorthands))
# Including '~' in shorthandoff prevents its use as no-break space
if 'galician' in languages:
self.setup.append(r'\deactivatetilden % restore ~ in Galician')
if 'estonian' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasestonian{\bbl@deactivate{~}}',
r'\makeatother'])
if 'basque' in languages:
self.setup.extend([r'\makeatletter',
r' \addto\extrasbasque{\bbl@deactivate{~}}',
r'\makeatother'])
if (languages[-1] == 'english' and
'french' in self.otherlanguages.keys()):
self.setup += ['% Prevent side-effects if French hyphenation '
'patterns are not loaded:',
r'\frenchbsetup{StandardLayout}',
r'\AtBeginDocument{\selectlanguage{%s}'
r'\noextrasfrench}' % self.language]
return '\n'.join(self.setup)
def language_name(self, language_code):
"""Return TeX language name for `language_code`"""
for tag in utils.normalize_language_tag(language_code):
try:
return self.language_codes[tag]
except KeyError:
pass
if self.reporter is not None:
self.reporter.warning(self.warn_msg % language_code)
return ''
def get_language(self):
# Obsolete, kept for backwards compatibility with Sphinx
return self.language
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods
Tip: use key starting with with '_' for sorting before small letters
and with '~' for sorting after small letters.
"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = self.keys()
keys.sort()
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
PreambleCmds.abstract = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centering\textbf{#1}}"""
PreambleCmds.admonition = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\textwidth}{#2}}
\end{center}
\fi
}"""
PreambleCmds.align_center = r"""
\makeatletter
\@namedef{DUrolealign-center}{\centering}
\makeatother
"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.docinfo = r"""
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\textwidth}"""
# PreambleCmds.docinfo._depends = 'providelength'
PreambleCmds.dedication = r"""
% dedication topic
\providecommand{\DUtopicdedication}[1]{\begin{center}#1\end{center}}"""
PreambleCmds.error = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
# PreambleCmds.errortitle._depends = 'color'
PreambleCmds.fieldlist = r"""
% fieldlist environment
\ifthenelse{\isundefined{\DUfieldlist}}{
\newenvironment{DUfieldlist}%
{\quote\description}
{\enddescription\endquote}
}{}"""
PreambleCmds.float_settings = r"""\usepackage{float} % float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.footnotes = r"""% numeric or symbol footnotes with hyperlinks
\providecommand*{\DUfootnotemark}[3]{%
\raisebox{1em}{\hypertarget{#1}{}}%
\hyperlink{#2}{\textsuperscript{#3}}%
}
\providecommand{\DUfootnotetext}[4]{%
\begingroup%
\renewcommand{\thefootnote}{%
\protect\raisebox{1em}{\protect\hypertarget{#1}{}}%
\protect\hyperlink{#2}{#3}}%
\footnotetext{#4}%
\endgroup%
}"""
PreambleCmds.footnote_floats = r"""% settings for footnotes as floats:
\setlength{\floatsep}{0.5em}
\setlength{\textfloatsep}{\fill}
\addtolength{\textfloatsep}{3em}
\renewcommand{\textfraction}{0.5}
\renewcommand{\topfraction}{0.5}
\renewcommand{\bottomfraction}{0.5}
\setcounter{totalnumber}{50}
\setcounter{topnumber}{50}
\setcounter{bottomnumber}{50}"""
PreambleCmds.graphicx_auto = r"""% Check output format
\ifx\pdftexversion\undefined
\usepackage{graphicx}
\else
\usepackage[pdftex]{graphicx}
\fi"""
PreambleCmds.highlight_rules = r"""% basic code highlight:
\providecommand*\DUrolecomment[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUroledeleted[1]{\textcolor[rgb]{0.40,0.40,0.40}{#1}}
\providecommand*\DUrolekeyword[1]{\textbf{#1}}
\providecommand*\DUrolestring[1]{\textit{#1}}"""
PreambleCmds.inline = r"""
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}"""
PreambleCmds.legend = r"""
% legend environment
\ifthenelse{\isundefined{\DUlegend}}{
\newenvironment{DUlegend}{\small}{}
}{}"""
PreambleCmds.lineblock = r"""
% lineblock environment
\DUprovidelength{\DUlineblockindent}{2.5em}
\ifthenelse{\isundefined{\DUlineblock}}{
\newenvironment{DUlineblock}[1]{%
\list{}{\setlength{\partopsep}{\parskip}
\addtolength{\partopsep}{\baselineskip}
\setlength{\topsep}{0pt}
\setlength{\itemsep}{0.15\baselineskip}
\setlength{\parsep}{0pt}
\setlength{\leftmargin}{#1}}
\raggedright
}
{\endlist}
}{}"""
# PreambleCmds.lineblock._depends = 'providelength'
PreambleCmds.linking = r"""
%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[%s]{hyperref}
\urlstyle{same} %% normal text font (alternatives: tt, rm, sf)
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.optionlist = r"""
% optionlist environment
\providecommand*{\DUoptionlistlabel}[1]{\bf #1 \hfill}
\DUprovidelength{\DUoptionlistindent}{3cm}
\ifthenelse{\isundefined{\DUoptionlist}}{
\newenvironment{DUoptionlist}{%
\list{}{\setlength{\labelwidth}{\DUoptionlistindent}
\setlength{\rightmargin}{1cm}
\setlength{\leftmargin}{\rightmargin}
\addtolength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\renewcommand{\makelabel}{\DUoptionlistlabel}}
}
{\endlist}
}{}"""
# PreambleCmds.optionlist._depends = 'providelength'
PreambleCmds.providelength = r"""
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}"""
PreambleCmds.rubric = r"""
% rubric (informal heading)
\providecommand*{\DUrubric}[2][class-arg]{%
\subsubsection*{\centering\textit{\textmd{#2}}}}"""
PreambleCmds.sidebar = r"""
% sidebar (text outside the main text flow)
\providecommand{\DUsidebar}[2][class-arg]{%
\begin{center}
\colorbox[gray]{0.80}{\parbox{0.9\textwidth}{#2}}
\end{center}
}"""
PreambleCmds.subtitle = r"""
% subtitle (for topic/sidebar)
\providecommand*{\DUsubtitle}[2][class-arg]{\par\emph{#2}\smallskip}"""
PreambleCmds.documentsubtitle = r"""
% subtitle (in document title)
\providecommand*{\DUdocumentsubtitle}[1]{{\large #1}}"""
PreambleCmds.table = r"""\usepackage{longtable,ltcaption,array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
# Options [force,almostfull] prevent spurious error messages, see
# de.comp.text.tex/2005-12/msg01855
PreambleCmds.textcomp = """\
\\usepackage{textcomp} % text symbol macros"""
PreambleCmds.titlereference = r"""
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}"""
PreambleCmds.title = r"""
% title for topics, admonitions, unsupported section levels, and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
PreambleCmds.topic = r"""
% topic (quote with heading)
\providecommand{\DUtopic}[2][class-arg]{%
\ifcsname DUtopic#1\endcsname%
\csname DUtopic#1\endcsname{#2}%
\else
\begin{quote}#2\end{quote}
\fi
}"""
PreambleCmds.transition = r"""
% transition (break, fancybreak, anonymous section)
\providecommand*{\DUtransition}[1][class-arg]{%
\hspace*{\fill}\hrulefill\hspace*{\fill}
\vskip 0.5\baselineskip
}"""
# LaTeX encoding maps
# -------------------
# ::
class CharMaps(object):
"""LaTeX representations for active and Unicode characters."""
# characters that always need escaping:
special = {
ord('#'): ur'\#',
ord('$'): ur'\$',
ord('%'): ur'\%',
ord('&'): ur'\&',
ord('~'): ur'\textasciitilde{}',
ord('_'): ur'\_',
ord('^'): ur'\textasciicircum{}',
ord('\\'): ur'\textbackslash{}',
ord('{'): ur'\{',
ord('}'): ur'\}',
# straight double quotes are 'active' in many languages
ord('"'): ur'\textquotedbl{}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put in a
# group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): ur'{[}',
ord(']'): ur'{]}',
# the soft hyphen is unknown in 8-bit text
# and not properly handled by XeTeX
0x00AD: ur'\-', # SOFT HYPHEN
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode = {
0x00A0: ur'~', # NO-BREAK SPACE
# TODO: ensure white space also at the beginning of a line?
# 0x00A0: ur'\leavevmode\nobreak\vadjust{}~'
0x2008: ur'\,', # PUNCTUATION SPACE
0x2011: ur'\hbox{-}', # NON-BREAKING HYPHEN
0x202F: ur'\,', # NARROW NO-BREAK SPACE
0x21d4: ur'$\Leftrightarrow$',
# Docutils footnote symbols:
0x2660: ur'$\spadesuit$',
0x2663: ur'$\clubsuit$',
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
utf8_supported_unicode = {
0x00AB: ur'\guillemotleft', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bb: ur'\guillemotright', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x200C: ur'\textcompwordmark', # ZERO WIDTH NON-JOINER
0x2013: ur'\textendash{}',
0x2014: ur'\textemdash{}',
0x2018: ur'\textquoteleft{}',
0x2019: ur'\textquoteright{}',
0x201A: ur'\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: ur'\textquotedblleft{}',
0x201D: ur'\textquotedblright{}',
0x201E: ur'\quotedblbase{}', # DOUBLE LOW-9 QUOTATION MARK
0x2030: ur'\textperthousand{}', # PER MILLE SIGN
0x2031: ur'\textpertenthousand{}', # PER TEN THOUSAND SIGN
0x2039: ur'\guilsinglleft{}',
0x203A: ur'\guilsinglright{}',
0x2423: ur'\textvisiblespace{}', # OPEN BOX
0x2020: ur'\dag{}',
0x2021: ur'\ddag{}',
0x2026: ur'\dots{}',
0x2122: ur'\texttrademark{}',
}
# recognized with 'utf8', if textcomp is loaded
textcomp = {
# Latin-1 Supplement
0x00a2: ur'\textcent{}', # ¢ CENT SIGN
0x00a4: ur'\textcurrency{}', # ¤ CURRENCY SYMBOL
0x00a5: ur'\textyen{}', # ¥ YEN SIGN
0x00a6: ur'\textbrokenbar{}', # ¦ BROKEN BAR
0x00a7: ur'\textsection{}', # § SECTION SIGN
0x00a8: ur'\textasciidieresis{}', # ¨ DIAERESIS
0x00a9: ur'\textcopyright{}', # © COPYRIGHT SIGN
0x00aa: ur'\textordfeminine{}', # ª FEMININE ORDINAL INDICATOR
0x00ac: ur'\textlnot{}', # ¬ NOT SIGN
0x00ae: ur'\textregistered{}', # ® REGISTERED SIGN
0x00af: ur'\textasciimacron{}', # ¯ MACRON
0x00b0: ur'\textdegree{}', # ° DEGREE SIGN
0x00b1: ur'\textpm{}', # ± PLUS-MINUS SIGN
0x00b2: ur'\texttwosuperior{}', # ² SUPERSCRIPT TWO
0x00b3: ur'\textthreesuperior{}', # ³ SUPERSCRIPT THREE
0x00b4: ur'\textasciiacute{}', # ´ ACUTE ACCENT
0x00b5: ur'\textmu{}', # µ MICRO SIGN
0x00b6: ur'\textparagraph{}', # ¶ PILCROW SIGN # != \textpilcrow
0x00b9: ur'\textonesuperior{}', # ¹ SUPERSCRIPT ONE
0x00ba: ur'\textordmasculine{}', # º MASCULINE ORDINAL INDICATOR
0x00bc: ur'\textonequarter{}', # 1/4 FRACTION
0x00bd: ur'\textonehalf{}', # 1/2 FRACTION
0x00be: ur'\textthreequarters{}', # 3/4 FRACTION
0x00d7: ur'\texttimes{}', # × MULTIPLICATION SIGN
0x00f7: ur'\textdiv{}', # ÷ DIVISION SIGN
# others
0x0192: ur'\textflorin{}', # LATIN SMALL LETTER F WITH HOOK
0x02b9: ur'\textasciiacute{}', # MODIFIER LETTER PRIME
0x02ba: ur'\textacutedbl{}', # MODIFIER LETTER DOUBLE PRIME
0x2016: ur'\textbardbl{}', # DOUBLE VERTICAL LINE
0x2022: ur'\textbullet{}', # BULLET
0x2032: ur'\textasciiacute{}', # PRIME
0x2033: ur'\textacutedbl{}', # DOUBLE PRIME
0x2035: ur'\textasciigrave{}', # REVERSED PRIME
0x2036: ur'\textgravedbl{}', # REVERSED DOUBLE PRIME
0x203b: ur'\textreferencemark{}', # REFERENCE MARK
0x203d: ur'\textinterrobang{}', # INTERROBANG
0x2044: ur'\textfractionsolidus{}', # FRACTION SLASH
0x2045: ur'\textlquill{}', # LEFT SQUARE BRACKET WITH QUILL
0x2046: ur'\textrquill{}', # RIGHT SQUARE BRACKET WITH QUILL
0x2052: ur'\textdiscount{}', # COMMERCIAL MINUS SIGN
0x20a1: ur'\textcolonmonetary{}', # COLON SIGN
0x20a3: ur'\textfrenchfranc{}', # FRENCH FRANC SIGN
0x20a4: ur'\textlira{}', # LIRA SIGN
0x20a6: ur'\textnaira{}', # NAIRA SIGN
0x20a9: ur'\textwon{}', # WON SIGN
0x20ab: ur'\textdong{}', # DONG SIGN
0x20ac: ur'\texteuro{}', # EURO SIGN
0x20b1: ur'\textpeso{}', # PESO SIGN
0x20b2: ur'\textguarani{}', # GUARANI SIGN
0x2103: ur'\textcelsius{}', # DEGREE CELSIUS
0x2116: ur'\textnumero{}', # NUMERO SIGN
0x2117: ur'\textcircledP{}', # SOUND RECORDING COYRIGHT
0x211e: ur'\textrecipe{}', # PRESCRIPTION TAKE
0x2120: ur'\textservicemark{}', # SERVICE MARK
0x2122: ur'\texttrademark{}', # TRADE MARK SIGN
0x2126: ur'\textohm{}', # OHM SIGN
0x2127: ur'\textmho{}', # INVERTED OHM SIGN
0x212e: ur'\textestimated{}', # ESTIMATED SYMBOL
0x2190: ur'\textleftarrow{}', # LEFTWARDS ARROW
0x2191: ur'\textuparrow{}', # UPWARDS ARROW
0x2192: ur'\textrightarrow{}', # RIGHTWARDS ARROW
0x2193: ur'\textdownarrow{}', # DOWNWARDS ARROW
0x2212: ur'\textminus{}', # MINUS SIGN
0x2217: ur'\textasteriskcentered{}', # ASTERISK OPERATOR
0x221a: ur'\textsurd{}', # SQUARE ROOT
0x2422: ur'\textblank{}', # BLANK SYMBOL
0x25e6: ur'\textopenbullet{}', # WHITE BULLET
0x25ef: ur'\textbigcircle{}', # LARGE CIRCLE
0x266a: ur'\textmusicalnote{}', # EIGHTH NOTE
0x26ad: ur'\textmarried{}', # MARRIAGE SYMBOL
0x26ae: ur'\textdivorced{}', # DIVORCE SYMBOL
0x27e8: ur'\textlangle{}', # MATHEMATICAL LEFT ANGLE BRACKET
0x27e9: ur'\textrangle{}', # MATHEMATICAL RIGHT ANGLE BRACKET
}
# Unicode chars that require a feature/package to render
pifont = {
0x2665: ur'\ding{170}', # black heartsuit
0x2666: ur'\ding{169}', # black diamondsuit
0x2713: ur'\ding{51}', # check mark
0x2717: ur'\ding{55}', # check mark
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
else: # unsupported levels
return 'DUtitle[section%s]' % roman.toRoman(level)
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:borderless: no borders around table cells
:nolines: alias for borderless
"""
def __init__(self,translator,latex_type,table_style):
self._translator = translator
self._latex_type = latex_type
self._table_style = table_style
self._open = False
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self._in_thead = 0
def open(self):
self._open = True
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = False # maybe context with search
def close(self):
self._open = False
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
def is_open(self):
return self._open
def set_table_style(self, table_style):
if not table_style in ('standard','booktabs','borderless','nolines'):
return
self._table_style = table_style
def get_latex_type(self):
if self._latex_type == 'longtable' and not self.caption:
# do not advance the "table" counter (requires "ltcaption" package)
return('longtable*')
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self):
return '\n'.join([r'\setlength{\DUtablewidth}{\linewidth}',
r'\begin{%s}[c]' % self.get_latex_type()])
def get_closing(self):
closing = []
if self._table_style == 'booktabs':
closing.append(r'\bottomrule')
# elif self._table_style == 'standard':
# closing.append(r'\hline')
closing.append(r'\end{%s}' % self.get_latex_type())
return '\n'.join(closing)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row-1]
def get_multicolumn_width(self, start, len_):
"""Return sum of columnwidths for multicell."""
mc_width = sum([width
for width in ([self._col_width[start + co - 1]
for co in range (len_)])])
return '%.2f\\DUtablewidth' % mc_width
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while True:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row-1]
return False
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Write code for typesetting with 8-bit tex/pdftex (vs. xetex/luatex) engine
# overwritten by the XeTeX writer
is_xetex = False
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = False
# If using compound enumerations, include section information.
section_prefix_for_enumerators = False
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # literal text (block or inline)
def __init__(self, document, babel_class=Babel):
nodes.NodeVisitor.__init__(self, document)
# Reporter
# ~~~~~~~~
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self._use_latex_citations = settings.use_latex_citations
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = getattr(settings, 'font_encoding', '')
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', r'\_'))
# literal blocks:
self.literal_block_env = ''
self.literal_block_options = ''
if settings.literal_block_env != '':
(none,
self.literal_block_env,
self.literal_block_options,
none ) = re.split('(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',',1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language module for Docutils-generated text
# (labels, bibliographic_fields, and author_separators)
self.language_module = languages.get_language(settings.language_code,
document.reporter)
self.babel = babel_class(settings.language_code, document.reporter)
self.author_separator = self.language_module.author_separators[0]
d_options = [self.settings.documentoptions]
if self.babel.language not in ('english', ''):
d_options.append(self.babel.language)
self.documentoptions = ','.join(filter(None, d_options))
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
# graphic package options:
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = PreambleCmds.graphicx_auto
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# footnotes:
self.docutils_footnotes = settings.docutils_footnotes
if settings.use_latex_footnotes:
self.docutils_footnotes = True
self.warn('`use_latex_footnotes` is deprecated. '
'The setting has been renamed to `docutils_footnotes` '
'and the alias will be removed in a future version.')
self.figure_footnotes = settings.figure_footnotes
if self.figure_footnotes:
self.docutils_footnotes = True
self.warn('The "figure footnotes" workaround/setting is strongly '
'deprecated and will be removed in a future version.')
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.documentoptions, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.requirements['__static'] = r'\usepackage{ifthen}'
self.latex_preamble = [settings.latex_preamble]
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
self.titledata = [] # \title, \author, \date
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
self.date = []
# PDF properties: pdftitle, pdfauthor
# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords
self.pdfinfo = []
self.pdfauthor = []
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable', settings.table_style)
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# Encodings:
# Docutils' output-encoding => TeX input encoding
if self.latex_encoding != 'ascii':
self.requirements['_inputenc'] = (r'\usepackage[%s]{inputenc}'
% self.latex_encoding)
# TeX font encoding
if not self.is_xetex:
if self.font_encoding:
self.requirements['_fontenc'] = (r'\usepackage[%s]{fontenc}' %
self.font_encoding)
# ensure \textquotedbl is defined:
for enc in self.font_encoding.split(','):
enc = enc.strip()
if enc == 'OT1':
self.requirements['_textquotedblOT1'] = (
r'\DeclareTextSymbol{\textquotedbl}{OT1}{`\"}')
elif enc not in ('T1', 'T2A', 'T2B', 'T2C', 'T4', 'T5'):
self.requirements['_textquotedbl'] = (
r'\DeclareTextSymbolDefault{\textquotedbl}{T1}')
# page layout with typearea (if there are relevant document options)
if (settings.documentclass.find('scr') == -1 and
(self.documentoptions.find('DIV') != -1 or
self.documentoptions.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# (the name `self.stylesheet` is singular because only one
# stylesheet was supported before Docutils 0.6).
self.stylesheet = [self.stylesheet_call(path)
for path in utils.get_stylesheet_list(settings)]
# PDF setup
if self.hyperlink_color in ('0', 'false', 'False', ''):
self.hyperref_options = ''
else:
self.hyperref_options = 'colorlinks=true,linkcolor=%s,urlcolor=%s' % (
self.hyperlink_color, self.hyperlink_color)
if settings.hyperref_options:
self.hyperref_options += ',' + settings.hyperref_options
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# Section numbering
if settings.sectnum_xform: # section numbering by Docutils
PreambleCmds.secnumdepth = r'\setcounter{secnumdepth}{0}'
else: # section numbering by LaTeX:
secnumdepth = settings.sectnum_depth
# Possible values of settings.sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# >0 value of "depth" argument -> translate to LaTeX levels:
# -1 part (0 with "article" document class)
# 0 chapter (missing in "article" document class)
# 1 section
# 2 subsection
# 3 subsubsection
# 4 paragraph
# 5 subparagraph
if secnumdepth is not None:
# limit to supported levels
secnumdepth = min(secnumdepth, len(self.d_class.sections))
# adjust to document class and use_part_section settings
if 'chapter' in self.d_class.sections:
secnumdepth -= 1
if self.d_class.sections[0] == 'part':
secnumdepth -= 1
PreambleCmds.secnumdepth = \
r'\setcounter{secnumdepth}{%d}' % secnumdepth
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# TODO: currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def stylesheet_call(self, path):
"""Return code to reference or embed stylesheet file `path`"""
# is it a package (no extension or *.sty) or "normal" tex code:
(base, ext) = os.path.splitext(path)
is_package = ext in ['.sty', '']
# Embed content of style file:
if self.settings.embed_stylesheet:
if is_package:
path = base + '.sty' # ensure extension
try:
content = io.FileInput(source_path=path,
encoding='utf-8').read()
self.settings.record_dependencies.add(path)
except IOError, err:
msg = u"Cannot embed stylesheet '%s':\n %s." % (
path, SafeString(err.strerror))
self.document.reporter.error(msg)
return '% ' + msg.replace('\n', '\n% ')
if is_package:
content = '\n'.join([r'\makeatletter',
content,
r'\makeatother'])
return '%% embedded stylesheet: %s\n%s' % (path, content)
# Link to style file:
if is_package:
path = base # drop extension
cmd = r'\usepackage{%s}'
else:
cmd = r'\input{%s}'
if self.settings.stylesheet_path:
# adapt path relative to output (cf. config.html#stylesheet-path)
path = utils.relative_path(self.settings._destination, path)
return cmd % path
def to_latex_encoding(self,docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# drop hyphen or low-line from "latin-1", "latin_1", "utf-8" and similar
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language_module.labels[docutil_label]
def encode(self, text):
"""Return text with 'problematic' characters escaped.
* Escape the ten special printing characters ``# $ % & ~ _ ^ \ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
* Translate non-supported Unicode characters.
* Separate ``-`` (and more in literal text) to prevent input ligatures.
"""
if self.verbatim:
return text
# Set up the translation table:
table = CharMaps.special.copy()
# keep the underscore in citation references
if self.inside_citation_reference_label:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', ''] and not self.is_xetex:
# * out-of-order characters in cmtt
if self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = u'\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = ur'\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = ur'\textbar{}'
table[ord('<')] = ur'\textless{}'
table[ord('>')] = ur'\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = ur'~'
# Unicode replacements for 8-bit tex engines (not required with XeTeX/LuaTeX):
if not self.is_xetex:
table.update(CharMaps.unsupported_unicode)
if not self.latex_encoding.startswith('utf8'):
table.update(CharMaps.utf8_supported_unicode)
table.update(CharMaps.textcomp)
table.update(CharMaps.pifont)
# Characters that require a feature/package to render
if [True for ch in text if ord(ch) in CharMaps.textcomp]:
self.requirements['textcomp'] = PreambleCmds.textcomp
if [True for ch in text if ord(ch) in CharMaps.pifont]:
self.requirements['pifont'] = '\\usepackage{pifont}'
text = text.translate(table)
# Break up input ligatures e.g. '--' to '-{}-'.
if not self.is_xetex: # Not required with xetex/luatex
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal:
separate_chars += ',`\'"<>'
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline:
lines = text.split('\n')
# Add a protected space to blank lines (except the last)
# to avoid ``! LaTeX Error: There's no line here to end.``
for i, line in enumerate(lines[:-1]):
if not line.lstrip():
lines[i] += '~'
text = (r'\\' + '\n').join(lines)
if self.literal and not self.insert_non_breaking_blanks:
# preserve runs of spaces but allow wrapping
text = text.replace(' ', ' ~')
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? -> update (use template) or delete
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline or block-level element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \phantomsection.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_abbreviation(self, node):
node['classes'].insert(0, 'abbreviation')
self.visit_inline(node)
def depart_abbreviation(self, node):
self.depart_inline(node)
def visit_acronym(self, node):
node['classes'].insert(0, 'acronym')
self.visit_inline(node)
def depart_acronym(self, node):
self.depart_inline(node)
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
self.fallbacks['admonition'] = PreambleCmds.admonition
if 'error' in node['classes']:
self.fallbacks['error'] = PreambleCmds.error
# strip the generic 'admonition' from the list of classes
node['classes'] = [cls for cls in node['classes']
if cls != 'admonition']
self.out.append('\n\\DUadmonition[%s]{\n' % ','.join(node['classes']))
def depart_admonition(self, node=None):
self.out.append('}\n')
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.out.append( '%\n\\begin{quote}\n')
if node['classes']:
self.visit_inline(node)
def depart_block_quote(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '\n\\end{quote}\n')
def visit_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '%\n\\begin{list}{}{}\n' )
else:
self.out.append( '%\n\\begin{itemize}\n' )
# if node['classes']:
# self.visit_inline(node)
def depart_bullet_list(self, node):
# if node['classes']:
# self.depart_inline(node)
if self.is_toc_list:
self.out.append( '\n\\end{list}\n' )
else:
self.out.append( '\n\\end{itemize}\n' )
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{') # requires `fixltx2e`
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append('\n\\caption{')
def depart_caption(self, node):
self.out.append('}\n')
def visit_title_reference(self, node):
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.push_output_collector([])
else:
# TODO: do we need these?
## self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
label = self.out[0]
text = ''.join(self.out[1:])
self._bibitems.append([label, text])
self.pop_output_collector()
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.body[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.body[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('\\hyperlink{%s}{[' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=False, siblings=True,
include_self=False)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def visit_classifier(self, node):
self.out.append( '(\\textbf{' )
def depart_classifier(self, node):
self.out.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Precede every line with a comment sign, wrap in newlines
self.out.append('\n%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n')
def visit_definition_list(self, node):
self.out.append( '%\n\\begin{description}\n' )
def depart_definition_list(self, node):
self.out.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if name == 'author':
self.pdfauthor.append(self.attval(node.astext()))
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = True
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = 1
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
self.title_labels += self.ids_to_labels(node, set_anchor=False)
def depart_document(self, node):
# Complete header with information gained from walkabout
# * language setup
if (self.babel.otherlanguages or
self.babel.language not in ('', 'english')):
self.requirements['babel'] = self.babel()
# * conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# * coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# * PDF properties
self.pdfsetup.append(PreambleCmds.linking % self.hyperref_options)
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# * document title (with "use_latex_docinfo" also
# 'author', 'organization', 'contact', 'address' and 'date')
if self.title or (
self.use_latex_docinfo and (self.author_stack or self.date)):
# with the default template, titledata is written to the preamble
self.titledata.append('%%% Title Data')
# \title (empty \title prevents error with \maketitle)
if self.title:
self.title.insert(0, '\phantomsection%\n ')
title = [''.join(self.title)] + self.title_labels
if self.subtitle:
title += [r'\\ % subtitle',
r'\DUdocumentsubtitle{%s}' % ''.join(self.subtitle)
] + self.subtitle_labels
self.titledata.append(r'\title{%s}' % '%\n '.join(title))
# \author (empty \author prevents warning with \maketitle)
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
self.titledata.append(r'\author{%s}' %
' \\and\n'.join(authors))
# \date (empty \date prevents defaulting to \today)
self.titledata.append(r'\date{%s}' % ', '.join(self.date))
# \maketitle in the body formats title with LaTeX
self.body_pre_docinfo.append('\\maketitle\n')
# * bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_','_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# * make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation
# BUG: the following fails, with more than one multirow
# starting in the second column (or later) see
# ../../../test/functional/input/data/latex.txt
if self.active_table.get_entry_number() == 1:
# if the first row is a multirow, this actually is the second row.
# this gets hairy if rowspans follow each other.
if self.active_table.get_rowspan(0):
count = 0
while self.active_table.get_rowspan(count):
count += 1
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.out.append(' & ')
# multirow, multicolumn
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns are not supported, sorry.')
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
count = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number()-1,count)
# TODO why does multirow end on % ? needs to be checked for below
self.out.append('\\multirow{%d}{%s}{%%' %
(count,self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 1:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
count = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%sp{%s}%s}{' %
(count, bar1,
self.active_table.get_multicolumn_width(
self.active_table.get_entry_number(),
count),
self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
if self.out[-1].endswith("%"):
self.out.append("\n")
self.out.append('\\textbf{%')
self.context.append('}')
elif self.active_table.is_stub_column():
if self.out[-1].endswith("%"):
self.out.append("\n")
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ''
if 'suffix' in node:
enum_suffix = node['suffix']
enum_prefix = ''
if 'prefix' in node:
enum_prefix = node['prefix']
if self.compound_enumerators:
pref = ''
if self.section_prefix_for_enumerators and self.section_level:
for i in range(self.section_level):
pref += '%d.' % self._section_number[i]
pref = pref[:-1] + self.section_enumerator_separator
enum_prefix += pref
for ctype, cname in self._enumeration_counters:
enum_prefix += '\\%s{%s}.' % (ctype, cname)
enum_type = 'arabic'
if 'enumtype' in node:
enum_type = node['enumtype']
if enum_type in enum_style:
enum_type = enum_style[enum_type]
counter_name = 'listcnt%d' % len(self._enumeration_counters)
self._enumeration_counters.append((enum_type, counter_name))
# If we haven't used this counter name before, then create a
# new counter; otherwise, reset & reuse the old counter.
if len(self._enumeration_counters) > self._max_enumeration_counters:
self._max_enumeration_counters = len(self._enumeration_counters)
self.out.append('\\newcounter{%s}\n' % counter_name)
else:
self.out.append('\\setcounter{%s}{0}\n' % counter_name)
self.out.append('\\begin{list}{%s\\%s{%s}%s}\n' %
(enum_prefix,enum_type,counter_name,enum_suffix))
self.out.append('{\n')
self.out.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if 'start' in node:
self.out.append('\\addtocounter{%s}{%d}\n' %
(counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.out.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.out.append('}\n')
def depart_enumerated_list(self, node):
self.out.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.out.append('\n')
##self.out.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.out.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.out.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\')
def visit_field_list(self, node):
if self.out is not self.docinfo:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('%\n\\begin{DUfieldlist}\n')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
# The 'align' attribute sets the "outer alignment",
# for "inner alignment" use LaTeX default alignment (similar to HTML)
alignment = node.attributes.get('align', 'center')
if alignment != 'center':
# The LaTeX "figure" environment always uses the full textwidth,
# so "outer alignment" is ignored. Just write a comment.
# TODO: use the wrapfigure environment?
self.out.append('\n\\begin{figure} %% align = "%s"\n' % alignment)
else:
self.out.append('\n\\begin{figure}\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def depart_figure(self, node):
self.out.append('\\end{figure}\n')
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.settings.figure_footnotes:
self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append('\\begin{figure}[b]')
self.append_hypertargets(node)
if node.get('id') == node.get('name'): # explicite label
self.out += self.ids_to_labels(node)
elif self.docutils_footnotes:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num,text = node.astext().split(None,1)
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
# mask newline to prevent spurious whitespace:
self.out.append('%')
## else: # TODO: "real" LaTeX \footnote{}s
def depart_footnote(self, node):
if self.figure_footnotes:
self.out.append('\\end{figure}\n')
else:
self.out.append('}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if not self.docutils_footnotes:
# TODO: insert footnote content at (or near) this place
# print "footnote-ref to", node['refid']
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print footnote['ids']
# if node.get('refid', '') in footnote['ids']:
# print 'matches', footnote['ids']
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
if not self.figure_footnotes:
raise nodes.SkipNode
if self.settings.footnote_references == 'brackets':
self.out.append(bracket)
else:
self.out.append(superscript)
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def to_latex_length(self, length_str, pxunit=None):
"""Convert `length_str` with rst lenght to LaTeX length
"""
if pxunit is not None:
sys.stderr.write('deprecation warning: LaTeXTranslator.to_latex_length()'
' option `pxunit` will be removed.')
match = re.match('(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
elif self.is_xetex and unit == 'px':
# XeTeX does not know the length unit px.
# Use \pdfpxdimen, the macro to set the value of 1 px in pdftex.
# This way, configuring works the same for pdftex and xetex.
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['px'] = '\n\\DUprovidelength{\\pdfpxdimen}{1bp}\n'
length_str = r'%s\pdfpxdimen' % value
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Convert image URI to a local file path
imagepath = urllib.url2pathname(attrs['uri']).replace('\\', '/')
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
# query 'align-*' class argument
for cls in node['classes']:
if cls.startswith('align-'):
attrs['align'] = cls.split('-')[1]
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
align_codes = {
# inline images: by default latex aligns the bottom.
'bottom': ('', ''),
'middle': (r'\raisebox{-0.5\height}{', '}'),
'top': (r'\raisebox{-\height}{', '}'),
# block level images:
'center': (r'\noindent\makebox[\textwidth][c]{', '}'),
'left': (r'\noindent{', r'\hfill}'),
'right': (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
# TODO: warn or ignore non-applicable alignment settings?
try:
align_code = align_codes[attrs['align']]
pre.append(align_code[0])
post.append(align_code[1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not (self.is_inline(node) or
isinstance(node.parent, nodes.figure)):
pre.append('\n')
post.append('\n')
pre.reverse()
self.out.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, imagepath))
self.out.extend(post)
def depart_image(self, node):
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def visit_inline(self, node): # <span>, i.e. custom roles
self.context.append('}' * len(node['classes']))
for cls in node['classes']:
if cls == 'align-center':
self.fallbacks['align-center'] = PreambleCmds.align_center
if cls.startswith('language-'):
language = self.babel.language_name(cls[9:])
if language:
self.babel.otherlanguages[language] = True
self.out.append(r'\foreignlanguage{%s}{' % language)
else:
self.fallbacks['inline'] = PreambleCmds.inline
self.out.append(r'\DUrole{%s}{' % cls)
def depart_inline(self, node):
self.out.append(self.context.pop())
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append('\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.out.append('\n\\begin{DUlineblock}{0em}\n')
if node['classes']:
self.visit_inline(node)
self.out.append('\n')
def depart_line_block(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('\n')
self.out.append('\\end{DUlineblock}\n')
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
if 'code' in node['classes'] and (
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox or \alltt.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block."""
# environments and packages to typeset literal blocks
packages = {'listing': r'\usepackage{moreverb}',
'lstlisting': r'\usepackage{listings}',
'Verbatim': r'\usepackage{fancyvrb}',
# 'verbatim': '',
'verbatimtab': r'\usepackage{moreverb}'}
if not self.active_table.is_open():
# no quote inside tables, to avoid vertical space between
# table border and literal block.
# BUG: fails if normal text precedes the literal block.
self.out.append('%\n\\begin{quote}')
self.context.append('\n\\end{quote}\n')
else:
self.out.append('\n')
self.context.append('\n')
if self.literal_block_env != '' and self.is_plaintext(node):
self.requirements['literal_block'] = packages.get(
self.literal_block_env, '')
self.verbatim = True
self.out.append('\\begin{%s}%s\n' % (self.literal_block_env,
self.literal_block_options))
else:
self.literal = True
self.insert_newline = True
self.insert_non_breaking_blanks = True
if 'code' in node['classes'] and (
self.settings.syntax_highlight != 'none'):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['code'] = PreambleCmds.highlight_rules
self.out.append('{\\ttfamily \\raggedright \\noindent\n')
def depart_literal_block(self, node):
if self.verbatim:
self.out.append('\n\\end{%s}\n' % self.literal_block_env)
self.verbatim = False
else:
self.out.append('\n}')
self.insert_non_breaking_blanks = False
self.insert_newline = False
self.literal = False
self.out.append(self.context.pop())
## def visit_meta(self, node):
## self.out.append('[visit_meta]\n')
# TODO: set keywords for pdf?
# But:
# The reStructuredText "meta" directive creates a "pending" node,
# which contains knowledge that the embedded "meta" node can only
# be handled by HTML-compatible writers. The "pending" node is
# resolved by the docutils.transforms.components.Filter transform,
# which checks that the calling writer supports HTML; if it doesn't,
# the "pending" node (and enclosed "meta" node) is removed from the
# document.
# --- docutils/docs/peps/pep-0258.html#transformer
## def depart_meta(self, node):
## self.out.append('[depart_meta]\n')
def visit_math(self, node, math_env='$'):
"""math role"""
if node['classes']:
self.visit_inline(node)
self.requirements['amsmath'] = r'\usepackage{amsmath}'
math_code = node.astext().translate(unichar2tex.uni2tex_table)
if node.get('ids'):
math_code = '\n'.join([math_code] + self.ids_to_labels(node))
if math_env == '$':
wrapper = u'$%s$'
else:
wrapper = u'\n'.join(['%%',
r'\begin{%s}' % math_env,
'%s',
r'\end{%s}' % math_env])
# print repr(wrapper), repr(math_code)
self.out.append(wrapper % math_code)
if node['classes']:
self.depart_inline(node)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.out.append('%\n\\begin{DUoptionlist}\n')
def depart_option_list(self, node):
self.out.append('\n\\end{DUoptionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# insert blank line, if the paragraph is not first in a list item
# nor follows a non-paragraph node in a compound
index = node.parent.index(node)
if (index == 0 and (isinstance(node.parent, nodes.list_item) or
isinstance(node.parent, nodes.description))):
pass
elif (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
pass
else:
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
if node['classes']:
self.visit_inline(node)
def depart_paragraph(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if not self.is_inline(node):
self.out.append('\n')
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
if not self.is_inline(node):
self.out.append('\n')
def has_unbalanced_braces(self, string):
"""Test whether there are unmatched '{' or '}' characters."""
level = 0
for ch in string:
if ch == '{':
level += 1
if ch == '}':
level -= 1
if level < 0:
return True
return level != 0
def visit_reference(self, node):
# We need to escape #, \, and % if we use the URL in a command.
special_chars = {ord('#'): ur'\#',
ord('%'): ur'\%',
ord('\\'): ur'\\',
}
# external reference (URL)
if 'refuri' in node:
href = unicode(node['refuri']).translate(special_chars)
# problematic chars double caret and unbalanced braces:
if href.find('^^') != -1 or self.has_unbalanced_braces(href):
self.error(
'External link "%s" not supported by LaTeX.\n'
' (Must not contain "^^" or unbalanced braces.)' % href)
if node['refuri'] == node.astext():
self.out.append(r'\url{%s}' % href)
raise nodes.SkipNode
self.out.append(r'\href{%s}{' % href)
return
# internal reference
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label:
self.out.append('\\%s{%s}}' %
(self._reference_label, href.replace('#', '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\n\\DUsidebar{\n')
def depart_sidebar(self, node):
self.out.append('}\n')
attribution_formats = {'dash': (u'—', ''), # EM DASH
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\\nopagebreak\n\n\\raggedleft ')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.subtitle)
self.fallbacks['documentsubtitle'] = PreambleCmds.documentsubtitle
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False)
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle[%s]{' % node.parent.tagname)
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.pop_output_collector()
else:
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['title'] = PreambleCmds.title
node['classes'] = ['system-message']
self.visit_admonition(node)
self.out.append('\\DUtitle[system-message]{system-message}\n')
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('\n\n{\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition()
def visit_table(self, node):
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
self.active_table = Table(self,'tabular',self.settings.table_style)
# A longtable moves before \paragraph and \subparagraph
# section titles if it immediately follows them:
if (self.active_table._latex_type == 'longtable' and
isinstance(node.parent, nodes.section) and
node.parent.index(node) == 1 and
self.d_class.section(self.section_level).find('paragraph') != -1):
self.out.append('\\leavevmode')
self.active_table.open()
for cls in node['classes']:
self.active_table.set_table_style(cls)
if self.active_table._table_style == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.push_output_collector([])
def depart_table(self, node):
# wrap content in the right environment:
content = self.out
self.pop_output_collector()
self.out.append('\n' + self.active_table.get_opening())
self.out += content
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
else:
self.active_table.set_table_style(self.settings.table_style)
# Insert hyperlabel after (long)table, as
# other places (beginning, caption) result in LaTeX errors.
if node.get('ids'):
self.out += self.ids_to_labels(node, set_anchor=False) + ['\n']
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not(isinstance(node.parent, nodes.caption) or
isinstance(node.parent, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(None)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
# \leavevmode results in a line break if the
# term is followed by an item list.
self.out.append('}] \leavevmode ')
def visit_tgroup(self, node):
#self.out.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs())
self.active_table.set('preamble written',1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if node.parent.tagname == 'document':
self.push_output_collector(self.title)
self.context.append('')
self.pdfinfo.append(' pdftitle={%s},' %
self.encode(node.astext()))
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
self.fallbacks['title'] = PreambleCmds.title
classes = ','.join(node.parent['classes'])
if not classes:
classes = node.tagname
self.out.append('\\DUtitle[%s]{' % classes)
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
if hasattr(PreambleCmds, 'secnumdepth'):
self.requirements['secnumdepth'] = PreambleCmds.secnumdepth
section_name = self.d_class.section(self.section_level)
self.out.append('\n\n')
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
section_title = self.encode(node.astext())
self.out.append(r'\%s[%s]{\color{red}' % (
section_name,section_title))
else:
self.out.append(r'\%s{' % section_name)
if self.section_level > len(self.d_class.sections):
# section level not supported by LaTeX
self.fallbacks['title'] = PreambleCmds.title
# self.out.append('\\phantomsection%\n ')
# label and ToC entry:
bookmark = ['']
# add sections with unsupported level to toc and pdfbookmarks?
## if self.section_level > len(self.d_class.sections):
## section_title = self.encode(node.astext())
## bookmark.append(r'\addcontentsline{toc}{%s}{%s}' %
## (section_name, section_title))
bookmark += self.ids_to_labels(node.parent, set_anchor=False)
self.context.append('%\n '.join(bookmark) + '%\n}\n')
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if (isinstance(node.parent, nodes.table) or
node.parent.tagname == 'document'):
self.pop_output_collector()
def minitoc(self, node, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
self.warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"',
base_node=node)
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if isinstance(node.next_node(), nodes.title):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}\n' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(node, title, depth)
self.context.append('')
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\\renewcommand{\\contentsname}{%s}\n' %
title)
self.out.append('\\tableofcontents\n\n')
self.has_latex_toc = True
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
self.context.append('')
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
self.context.append('\\end{abstract}\n')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
self.fallbacks['topic'] = PreambleCmds.topic
# special topics:
if 'abstract' in node['classes']:
self.fallbacks['abstract'] = PreambleCmds.abstract
self.push_output_collector(self.abstract)
if 'dedication' in node['classes']:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
self.out.append('\n\\DUtopic[%s]{\n' % ','.join(node['classes']))
self.context.append('}\n')
def depart_topic(self, node):
self.out.append(self.context.pop())
self.is_toc_list = False
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_rubric(self, node):
self.fallbacks['rubric'] = PreambleCmds.rubric
self.out.append('\n\\DUrubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.out.append(self.context.pop())
def visit_transition(self, node):
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n\n')
self.out.append('%' + '_' * 75 + '\n')
self.out.append(r'\DUtransition')
self.out.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
|
gpl-2.0
|
40023154/final0627
|
static/Brython3.1.1-20150328-091302/Lib/xml/dom/minicompat.py
|
781
|
3228
|
"""Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
|
gpl-3.0
|
nielsbuwen/ilastik
|
tests/nanshe/testOpMaxProjection.py
|
3
|
3034
|
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Dec 19, 2014 15:35:22 EST$"
import numpy
import vigra
from lazyflow.graph import Graph
from lazyflow.operators import OpArrayPiper
import ilastik
import ilastik.applets
import ilastik.applets.nanshe
import ilastik.applets.nanshe.opMaxProjection
from ilastik.applets.nanshe.opMaxProjection import OpMaxProjection, OpMaxProjectionCached
class TestOpMaxProjection(object):
def testBasic1(self):
a = numpy.zeros((2,2,2,))
a[1,1,1] = 1
a[0,0,0] = 1
a = a[..., None]
a = vigra.taggedView(a, "tyxc")
expected_b = a.max(axis=0)
expected_b = vigra.taggedView(expected_b, "yxc")
graph = Graph()
op = OpMaxProjection(graph=graph)
opPrep = OpArrayPiper(graph=graph)
opPrep.Input.setValue(a)
op.Input.connect(opPrep.Output)
op.Axis.setValue(0)
b = op.Output[...].wait()
b = vigra.taggedView(b, "yxc")
assert((b == expected_b).all())
def testBasic2(self):
a = numpy.zeros((2,2,2,))
a[1,1,1] = 1
a[0,0,0] = 1
a = a[..., None]
a = vigra.taggedView(a, "tyxc")
expected_b = a.max(axis=0)
expected_b = vigra.taggedView(expected_b, "yxc")
graph = Graph()
op = OpMaxProjectionCached(graph=graph)
opPrep = OpArrayPiper(graph=graph)
opPrep.Input.setValue(a)
op.Input.connect(opPrep.Output)
op.Axis.setValue(0)
b = op.Output[...].wait()
b = vigra.taggedView(b, "yxc")
assert((b == expected_b).all())
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
ret = nose.run(defaultTest=__file__)
if not ret: sys.exit(1)
|
gpl-3.0
|
yexihu/volatility
|
volatility/plugins/mac/session_hash_table.py
|
45
|
2308
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.plugins.mac.pslist as pslist
import volatility.obj as obj
import volatility.plugins.mac.common as common
class mac_list_sessions(pslist.mac_pslist):
""" Enumerates sessions """
def calculate(self):
common.set_plugin_members(self)
shash_addr = self.addr_space.profile.get_symbol("_sesshash")
shash = obj.Object("unsigned long", offset = shash_addr, vm = self.addr_space)
shashtbl_addr = self.addr_space.profile.get_symbol("_sesshashtbl")
shashtbl_ptr = obj.Object("Pointer", offset = shashtbl_addr, vm = self.addr_space)
shash_array = obj.Object(theType = "Array", targetType = "sesshashhead", count = shash + 1, vm = self.addr_space, offset = shashtbl_ptr)
for sess in shash_array:
s = sess.lh_first
while s:
yield s
s = s.s_hash.le_next
def render_text(self, outfd, data):
self.table_header(outfd, [("Leader (Pid)", "8"),
("Leader (Name)", "20"),
("Login Name", "25")])
for sess in data:
if sess.s_leader:
pid = sess.s_leader.p_pid
pname = sess.s_leader.p_comm
else:
pid = -1
pname = "<INVALID LEADER>"
self.table_row(outfd, pid, pname, sess.s_login)
|
gpl-2.0
|
jalexvig/tensorflow
|
tensorflow/contrib/factorization/python/ops/clustering_ops.py
|
30
|
35921
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.factorization.python.ops import gen_clustering_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.factorization.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.platform import resource_loader
_clustering_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_clustering_ops.so'))
# Euclidean distance between vectors U and V is defined as \\(||U - V||_F\\)
# which is the square root of the sum of the absolute squares of the elements
# difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# \\(1 - (U \dot V) / (||U||_F ||V||_F)\\)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
KMC2_INIT = 'kmc2'
# The name of the variable holding the cluster centers. Used by the Estimator.
CLUSTERS_VAR_NAME = 'clusters'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2,
kmc2_chain_length=200):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors. It is assumed that the
data points have been previously randomly permuted.
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if initial_clusters is a tensor or numpy array.
initial_clusters: Specifies the clusters used during initialization. One
of the following:
- a tensor or numpy array with the initial cluster centers.
- a function f(inputs, k) that returns up to k centers from `inputs`.
- "random": Choose centers randomly from `inputs`.
- "kmeans_plus_plus": Use kmeans++ to choose centers from `inputs`.
- "kmc2": Use the fast k-MC2 algorithm to choose centers from `inputs`.
In the last three cases, one batch of `inputs` may not yield
`num_clusters` centers, in which case initialization will require
multiple batches until enough centers are chosen. In the case of
"random" or "kmeans_plus_plus", if the input size is <= `num_clusters`
then the entire batch is chosen to be cluster centers.
distance_metric: Distance metric used for clustering. Supported options:
"squared_euclidean", "cosine".
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: Number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
kmc2_chain_length: Determines how many candidate points are used by the
k-MC2 algorithm to produce one new cluster centers. If a (mini-)batch
contains less points, one new cluster center is generated from the
(mini-)batch.
Raises:
ValueError: An invalid argument was passed to initial_clusters or
distance_metric.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
RANDOM_INIT, KMEANS_PLUS_PLUS_INIT, KMC2_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
self._inputs = inputs if isinstance(inputs, list) else [inputs]
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, str(distance_metric)
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (
math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) -
2 * math_ops.matmul(inp, clusters, transpose_b=True) +
array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keepdims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidean_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp, ignore_existing=True):
(indices, distances) = gen_clustering_ops.nearest_neighbors(
inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append((score, array_ops.squeeze(distances, [-1]),
array_ops.squeeze(indices, [-1])))
return zip(*output)
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _create_variables(self, num_clusters):
"""Creates variables.
Args:
num_clusters: an integer Tensor providing the number of clusters.
Returns:
Tuple with following elements:
- cluster_centers: a Tensor for storing cluster centers
- cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
- cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
- cluster_centers_updated: Tensor representing copy of cluster centers
that are updated every step.
- update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.constant([], dtype=dtypes.float32)
cluster_centers = variable_scope.variable(
init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)
cluster_centers_initialized = variable_scope.variable(
False, dtype=dtypes.bool, name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(
init_value, name='clusters_updated', validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([num_clusters], dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (
variable_scope.variable(
array_ops.ones([num_clusters], dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers, cluster_centers_initialized, cluster_counts,
cluster_centers_updated, update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp, ignore_existing=True):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
This returns, among other things, an op that chooses initial centers
(init_op), a boolean variable that is set to True when the initial centers
are chosen (cluster_centers_initialized), and an op to perform either an
entire Lloyd iteration or a mini-batch of a Lloyd iteration (training_op).
The caller should use these components as follows. A single worker should
execute init_op multiple times until cluster_centers_initialized becomes
True. Then multiple workers may execute training_op any number of times.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
if (isinstance(self._initial_clusters, str) or
callable(self._initial_clusters)):
initial_clusters = self._initial_clusters
num_clusters = ops.convert_to_tensor(self._num_clusters)
else:
initial_clusters = ops.convert_to_tensor(self._initial_clusters)
num_clusters = array_ops.shape(initial_clusters)[0]
inputs = self._inputs
(cluster_centers_var, cluster_centers_initialized, total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables(num_clusters)
init_op = _InitializeClustersOpFactory(
self._inputs, num_clusters, initial_clusters, self._distance_metric,
self._random_seed, self._kmeans_plus_plus_num_retries,
self._kmc2_chain_length, cluster_centers_var, cluster_centers_updated,
cluster_centers_initialized).op()
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps, cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(
inputs, num_clusters, cluster_idx, cluster_centers_var)
return (all_scores, cluster_idx, scores, cluster_centers_initialized,
init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps, cluster_centers_var,
cluster_centers_updated, total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps, ignore_existing=True):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([
state_ops.assign(update_in_steps,
self._mini_batch_steps_per_iteration - 1)
]):
with ops.colocate_with(
cluster_centers_updated, ignore_existing=True):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(
cluster_centers_updated, dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var, ignore_existing=True):
with ops.control_dependencies(
[state_ops.assign(cluster_centers_var, cluster_centers)]):
with ops.colocate_with(None, ignore_existing=True):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))
]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0, _f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list, cluster_centers,
total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts, ignore_existing=True):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
with ops.colocate_with(cluster_centers, ignore_existing=True):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(unique_idx, dtype=total_counts.dtype),
unique_idx, num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# \\(x += (sum_i(d_i) - k * x) / (n + k)\\).
# Compute \\(sum_i(d_i)\\), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat([
array_ops.reshape(num_unique_cluster_idx, [1]),
array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
], 0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(total_counts, unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(
cluster_centers, unique_ids, cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list,
cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
num_clusters: an integer Tensor providing the number of clusters.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp, ignore_existing=True):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, num_clusters))
with ops.colocate_with(cluster_centers, ignore_existing=True):
new_clusters_centers = math_ops.add_n(cluster_sums) / (
math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) +
epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
class _InitializeClustersOpFactory(object):
"""Internal class to create the op to initialize the clusters.
The op performs this algorithm (see constructor args):
num_remaining = num_clusters - length(cluster_centers)
if num_remaining == 0:
assert that cluster_centers_initialized is true
else:
assert that num_remaining > 0
new_centers = choose up to num_remaining initial centers
l2-normalize new_centers if using cosine distance
all_centers = concat(cluster_centers, new_centers)
cluster_centers := all_centers
if there is a cluster_centers_updated variable:
cluster_centers_updated := cluster_centers
num_now_remaining = num_clusters - length(cluster_centers)
if num_now_remaining == 0:
cluster_centers_initialized := true
"""
# TODO(ccolby): Refactor this class so that kmc2 isn't so much a special case.
def __init__(self, inputs, num_clusters, initial_clusters, distance_metric,
random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length,
cluster_centers, cluster_centers_updated,
cluster_centers_initialized):
"""Creates an op factory.
Args:
inputs: See KMeans constructor.
num_clusters: An integer Tensor providing the number of clusters.
initial_clusters: See KMeans constructor.
distance_metric: See KMeans constructor.
random_seed: See KMeans constructor.
kmeans_plus_plus_num_retries: See KMeans constructor.
kmc2_chain_length: See KMeans constructor.
cluster_centers: The TF variable holding the initial centers. It may
already contain some centers when the op is executed.
cluster_centers_updated: A second TF variable to hold a copy of the
initial centers, used for full-batch mode. In mini-batch mode,
cluster_centers_updated is the same variable as cluster_centers.
cluster_centers_initialized: A boolean TF variable that will be set
to true when all the initial centers have been chosen.
"""
# All of these instance variables are constants.
self._inputs = inputs
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._kmc2_chain_length = kmc2_chain_length
self._cluster_centers = cluster_centers
self._cluster_centers_updated = cluster_centers_updated
self._cluster_centers_initialized = cluster_centers_initialized
self._num_selected = array_ops.shape(self._cluster_centers)[0]
self._num_remaining = self._num_clusters - self._num_selected
self._num_data = math_ops.add_n(
[array_ops.shape(i)[0] for i in self._inputs])
def _random(self):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_remaining, [-1]),
minval=0,
maxval=math_ops.cast(self._num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
return embedding_lookup(self._inputs, indices, partition_strategy='div')
def _kmeans_plus_plus(self):
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
return gen_clustering_ops.kmeans_plus_plus_initialization(
inp,
math_ops.to_int64(self._num_remaining), self._random_seed,
self._kmeans_plus_plus_num_retries)
def _kmc2_multiple_centers(self):
"""Adds new initial cluster centers using the k-MC2 algorithm.
In each call to the op, the provided batch is split into subsets based on
the specified `kmc2_chain_length`. On each subset, a single Markov chain of
the k-MC2 algorithm is used to add *one* new center cluster center. If there
are less than `kmc2_chain_length` points in the subset, a single center is
added using one Markov chain on the full input. It is assumed that the
provided batch has previously been randomly permuted. Otherwise, k-MC2 may
return suboptimal centers.
Returns:
An op that adds new cluster centers.
"""
# The op only operates on the first shard of data.
first_shard = self._inputs[0]
# Number of points in the input that can be used.
batch_size = array_ops.shape(first_shard)[0]
# Maximum number of subsets such that the size of each subset is at least
# `kmc2_chain_length`. Final subsets may be larger.
max_to_sample = math_ops.cast(
batch_size / self._kmc2_chain_length, dtype=dtypes.int32)
# We sample at least one new center and at most all remaining centers.
num_to_sample = math_ops.maximum(
math_ops.minimum(self._num_remaining, max_to_sample), 1)
def _cond(i, _):
"""Stopping condition for the while loop."""
return math_ops.less(i, num_to_sample)
def _body(i, _):
"""Body that adds a single new center based on a subset."""
def _sample_random():
"""Returns a random point as a cluster center."""
# By assumption the batch is reshuffled and _sample_random is always
# called for i=0. Hence, we simply return the first point.
new_center = array_ops.reshape(first_shard[0], [1, -1])
if self._distance_metric == COSINE_DISTANCE:
new_center = nn_impl.l2_normalize(new_center, dim=1)
return new_center
def _sample_kmc2_chain():
"""Returns previous centers as well as a new center sampled using k-MC2.
"""
# Extract the subset from the underlying batch.
start = i * self._kmc2_chain_length
end = start + self._kmc2_chain_length
subset = first_shard[start:end]
# Compute the distances from points in the subset to previous centers.
_, distances = gen_clustering_ops.nearest_neighbors(
subset, self._cluster_centers, 1)
# Sample index of new center using k-MC2 Markov chain.
new_center_index = gen_clustering_ops.kmc2_chain_initialization(
array_ops.squeeze(distances), self._random_seed)
# Extract actual new center.
newly_sampled_center = array_ops.reshape(subset[new_center_index],
[1, -1])
# Return concatenation with previously sampled centers.
if self._distance_metric == COSINE_DISTANCE:
newly_sampled_center = nn_impl.l2_normalize(
newly_sampled_center, dim=1)
return array_ops.concat([self._cluster_centers, newly_sampled_center],
0)
# Obtain a random point if there are no previously sampled centers.
# Otherwise, construct a k-MC2 Markov chain.
new_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), _sample_random,
_sample_kmc2_chain)
# Assign new cluster centers to underlying variable.
assigned_centers = state_ops.assign(
self._cluster_centers, new_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
assigned_centers = state_ops.assign(
self._cluster_centers_updated,
assigned_centers,
validate_shape=False)
return i + 1, self._num_clusters - array_ops.shape(assigned_centers)[0]
# Add num_to_sample new data points.
_, num_remaining = control_flow_ops.while_loop(_cond, _body, [0, 0])
return num_remaining
def _greedy_batch_sampler(self, sampler):
# If the input dataset size is smaller than the number of centers
# remaining, choose the entire input dataset as centers. This can happen
# with mini-batch. Otherwise, sample the batch according to the provided
# sampler.
return control_flow_ops.cond(self._num_data <= self._num_remaining,
lambda: array_ops.concat(self._inputs, 0),
sampler)
def _single_batch_sampler(self, sampler):
# Enforce that there are at least as many data points as centers
# remaining. This gives the provided sampler the chance to select all
# remaining centers from a single batch.
with ops.control_dependencies(
[check_ops.assert_greater_equal(self._num_data, self._num_remaining)]):
return sampler()
def _choose_initial_centers(self):
if isinstance(self._initial_clusters, str):
if self._initial_clusters == RANDOM_INIT:
return self._greedy_batch_sampler(self._random)
else: # self._initial_clusters == KMEANS_PLUS_PLUS_INIT
return self._single_batch_sampler(self._kmeans_plus_plus)
elif callable(self._initial_clusters):
return self._initial_clusters(self._inputs, self._num_remaining)
else:
with ops.control_dependencies([
check_ops.assert_equal(self._num_remaining,
array_ops.shape(self._initial_clusters)[0])
]):
return self._initial_clusters
def _add_new_centers(self):
"""Adds some centers and returns the number of centers remaining."""
new_centers = self._choose_initial_centers()
if self._distance_metric == COSINE_DISTANCE:
new_centers = nn_impl.l2_normalize(new_centers, dim=1)
# If cluster_centers is empty, it doesn't have the right shape for concat.
all_centers = control_flow_ops.cond(
math_ops.equal(self._num_selected, 0), lambda: new_centers,
lambda: array_ops.concat([self._cluster_centers, new_centers], 0))
# TODO(ccolby): De-dupe all_centers?
a = state_ops.assign(
self._cluster_centers, all_centers, validate_shape=False)
if self._cluster_centers_updated is not self._cluster_centers:
a = state_ops.assign(
self._cluster_centers_updated, a, validate_shape=False)
return self._num_clusters - array_ops.shape(a)[0]
def _initialize(self):
with ops.control_dependencies([
check_ops.assert_positive(self._num_remaining),
]):
if self._initial_clusters == KMC2_INIT:
num_now_remaining = self._kmc2_multiple_centers()
else:
num_now_remaining = self._add_new_centers()
return control_flow_ops.cond(
math_ops.equal(num_now_remaining, 0),
lambda: state_ops.assign(self._cluster_centers_initialized, True),
control_flow_ops.no_op)
def op(self):
"""Returns the cluster initializer op."""
return control_flow_ops.cond(
math_ops.equal(self._num_remaining, 0),
lambda: check_ops.assert_equal(self._cluster_centers_initialized, True),
self._initialize)
|
apache-2.0
|
AtonLerin/pymel
|
pymel/core/datatypes.py
|
5
|
155210
|
"""Data classes that are returned by functions within ``pymel.core``
A wrap of Maya's Vector, Point, Color, Matrix, TransformationMatrix, Quaternion, EulerRotation types
"""
import sys
import math
import copy
import operator
import colorsys
import pymel.util as util
import pymel.api as _api
from pymel.util.arrays import *
from pymel.util.arrays import _toCompOrArrayInstance
import pymel.internal.factories as _factories
# in python2.6/maya2010 'as' becomes a keyword.
# TODO: add a version check:
if sys.version_info >= (2, 6):
AS_UNITS = 'asUnits'
else:
AS_UNITS = 'as'
# patch some Maya api classes that miss __iter__ to make them iterable / convertible to list
def _patchMVector():
def __len__(self):
""" Number of components in the Maya api Vector, ie 3 """
return 3
type.__setattr__(_api.MVector, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api Vector """
for i in xrange(len(self)):
yield _api.MVector.__getitem__(self, i)
type.__setattr__(_api.MVector, '__iter__', __iter__)
def _patchMFloatVector():
def __len__(self):
""" Number of components in the Maya api FloatVector, ie 3 """
return 3
type.__setattr__(_api.MFloatVector, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api FloatVector """
for i in xrange(len(self)):
yield _api.MFloatVector.__getitem__(self, i)
type.__setattr__(_api.MFloatVector, '__iter__', __iter__)
def _patchMPoint():
def __len__(self):
""" Number of components in the Maya api Point, ie 4 """
return 4
type.__setattr__(_api.MPoint, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api Point """
for i in xrange(len(self)):
yield _api.MPoint.__getitem__(self, i)
type.__setattr__(_api.MPoint, '__iter__', __iter__)
def _patchMFloatPoint():
def __len__(self):
""" Number of components in the Maya api FloatPoint, ie 4 """
return 4
type.__setattr__(_api.MFloatPoint, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api FloatPoint """
for i in xrange(len(self)):
yield _api.MFloatPoint.__getitem__(self, i)
type.__setattr__(_api.MFloatPoint, '__iter__', __iter__)
def _patchMColor():
def __len__(self):
""" Number of components in the Maya api Color, ie 4 """
return 4
type.__setattr__(_api.MColor, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api Color """
for i in xrange(len(self)):
yield _api.MColor.__getitem__(self, i)
type.__setattr__(_api.MColor, '__iter__', __iter__)
def _patchMMatrix():
def __len__(self):
""" Number of rows in the Maya api Matrix, ie 4.
Not to be confused with the number of components (16) given by the size method """
return 4
type.__setattr__(_api.MMatrix, '__len__', __len__)
def __iter__(self):
""" Iterates on all 4 rows of a Maya api Matrix """
for r in xrange(4):
yield Array([_api.MScriptUtil.getDoubleArrayItem(_api.MMatrix.__getitem__(self, r), c) for c in xrange(4)])
type.__setattr__(_api.MMatrix, '__iter__', __iter__)
def _patchMFloatMatrix():
def __len__(self):
""" Number of rows in the Maya api FloatMatrix, ie 4.
Not to be confused with the number of components (16) given by the size method """
return 4
type.__setattr__(_api.MFloatMatrix, '__len__', __len__)
def __iter__(self):
""" Iterates on all 4 rows of a Maya api FloatMatrix """
for r in xrange(4):
yield Array([_api.MScriptUtil.getFloatArrayItem(_api.MFloatMatrix.__getitem__(self, r), c) for c in xrange(4)])
type.__setattr__(_api.MFloatMatrix, '__iter__', __iter__)
def _patchMTransformationMatrix():
def __len__(self):
""" Number of rows in the Maya api Matrix, ie 4.
Not to be confused with the number of components (16) given by the size method """
return 4
type.__setattr__(_api.MTransformationMatrix, '__len__', __len__)
def __iter__(self):
""" Iterates on all 4 rows of a Maya api TransformationMatrix """
return self.asMatrix().__iter__()
type.__setattr__(_api.MTransformationMatrix, '__iter__', __iter__)
def _patchMQuaternion():
def __len__(self):
""" Number of components in the Maya api Quaternion, ie 4 """
return 4
type.__setattr__(_api.MQuaternion, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api Quaternion """
for i in xrange(len(self)):
yield _api.MQuaternion.__getitem__(self, i)
type.__setattr__(_api.MQuaternion, '__iter__', __iter__)
def _patchMEulerRotation():
def __len__(self):
""" Number of components in the Maya api EulerRotation, ie 3 """
return 3
type.__setattr__(_api.MEulerRotation, '__len__', __len__)
def __iter__(self):
""" Iterates on all components of a Maya api EulerRotation """
for i in xrange(len(self)):
yield _api.MEulerRotation.__getitem__(self, i)
type.__setattr__(_api.MEulerRotation, '__iter__', __iter__)
_patchMVector()
_patchMFloatVector()
_patchMPoint()
_patchMFloatPoint()
_patchMColor()
_patchMMatrix()
_patchMFloatMatrix()
_patchMTransformationMatrix()
_patchMQuaternion()
_patchMEulerRotation()
# the meta class of metaMayaWrapper
class MetaMayaArrayTypeWrapper(_factories.MetaMayaTypeWrapper):
""" A metaclass to wrap Maya array type classes such as Vector, Matrix """
def __new__(mcl, classname, bases, classdict):
""" Create a new wrapping class for a Maya api type, such as Vector or Matrix """
if 'shape' in classdict:
# fixed shape means also fixed ndim and size
shape = classdict['shape']
ndim = len(shape)
size = reduce(operator.mul, shape, 1)
if 'ndim' not in classdict:
classdict['ndim'] = ndim
elif classdict['ndim'] != ndim:
raise ValueError, "class %s shape definition %s and number of dimensions definition %s do not match" % (classname, shape, ndim)
if 'size' not in classdict:
classdict['size'] = size
elif classdict['size'] != size:
raise ValueError, "class %s shape definition %s and size definition %s do not match" % (classname, shape, size)
# create the new class
newcls = super(MetaMayaArrayTypeWrapper, mcl).__new__(mcl, classname, bases, classdict)
try:
apicls = newcls.apicls
except:
apicls = None
try:
shape = newcls.shape
except:
shape = None
try:
cnames = newcls.cnames
except:
cnames = ()
if shape is not None:
# fixed shape means also fixed ndim and size
ndim = len(shape)
size = reduce(operator.mul, shape, 1)
if cnames:
# definition for component names
type.__setattr__(newcls, 'cnames', cnames)
subsizes = [reduce(operator.mul, shape[i + 1:], 1) for i in xrange(ndim)]
for index, compname in enumerate(cnames):
coords = []
for i in xrange(ndim):
c = index // subsizes[i]
index -= c * subsizes[i]
coords.append(c)
if len(coords) == 1:
coords = coords[0]
else:
coords = tuple(coords)
# def _get(self):
# return self.__getitem__(coords)
# _get.__name__ = '_get_' + compname
#
# # FIXME : the set property does not do anything in python 2.4 !!! It doesn't even get called.
#
# def _set(self, val):
# self.__setitem__(coords, val)
#
# _set.__name__ = '_set_' + compname
#
# p = property( _get, _set, None, 'set and get %s component' % compname )
cmd = "property( lambda self: self.__getitem__(%s) , lambda self, val: self.__setitem__(%s,val) )" % (coords, coords)
p = eval(cmd)
if compname not in classdict:
type.__setattr__(newcls, compname, p)
else:
raise AttributeError, "component name %s clashes with class method %r" % (compname, classdict[compname])
elif cnames:
raise ValueError, "can only define component names for classes with a fixed shape/size"
# constants for shape, ndim, size
if shape is not None:
type.__setattr__(newcls, 'shape', shape)
if ndim is not None:
type.__setattr__(newcls, 'ndim', ndim)
if size is not None:
type.__setattr__(newcls, 'size', size)
#__slots__ = ['_data', '_shape', '_size']
# add component names to read-only list
readonly = newcls.__readonly__
if hasattr(newcls, 'shape'):
readonly['shape'] = None
if hasattr(newcls, 'ndim'):
readonly['ndim'] = None
if hasattr(newcls, 'size'):
readonly['size'] = None
if 'cnames' not in readonly:
readonly['cnames'] = None
type.__setattr__(newcls, '__readonly__', readonly)
# print "created class", newcls
# print "bases", newcls.__bases__
# print "readonly", newcls.__readonly__
# print "slots", newcls.__slots__
return newcls
# generic math function that can operate on Arrays herited from arrays
# (min, max, sum, prod...)
# Functions that work on vectors will now be inherited from Array and properly defer
# to the class methods
class Vector(VectorN):
"""
A 3 dimensional vector class that wraps Maya's api Vector class
>>> from pymel.all import *
>>> import pymel.core.datatypes as dt
>>>
>>> v = dt.Vector(1, 2, 3)
>>> w = dt.Vector(x=1, z=2)
>>> z = dt.Vector( dt.Vector.xAxis, z=1)
>>> v = dt.Vector(1, 2, 3, unit='meters')
>>> print v
[1.0, 2.0, 3.0]
"""
__metaclass__ = MetaMayaArrayTypeWrapper
__slots__ = ()
# class specific info
apicls = _api.MVector
cnames = ('x', 'y', 'z')
shape = (3,)
unit = None
def __new__(cls, *args, **kwargs):
shape = kwargs.get('shape', None)
ndim = kwargs.get('ndim', None)
size = kwargs.get('size', None)
# will default to class constant shape = (3,), so it's just an error check to catch invalid shapes,
# as no other option is actually possible on Vector, but this method could be used to allow wrapping
# of Maya array classes that can have a variable number of elements
shape, ndim, size = cls._expandshape(shape, ndim, size)
new = cls.apicls.__new__(cls)
cls.apicls.__init__(new)
return new
def __init__(self, *args, **kwargs):
""" __init__ method, valid for Vector, Point and Color classes """
cls = self.__class__
if args:
# allow both forms for arguments
if len(args) == 1 and hasattr(args[0], '__iter__'):
args = args[0]
# shortcut when a direct api init is possible
try:
self.assign(args)
except:
# special exception to the rule that you cannot drop data in Arrays __init__
# to allow all conversion from Vector derived classes (MPoint, MColor) to a base class
# special case for MPoint to cartesianize if necessary
# note : we may want to premultiply MColor by the alpha in a similar way
if isinstance(args, _api.MPoint) and args.w != 1.0:
args = copy.deepcopy(args).cartesianize()
if isinstance(args, _api.MColor) and args.a != 1.0:
# note : we may want to premultiply Color by the alpha in a similar way
pass
if isinstance(args, _api.MVector) or isinstance(args, _api.MPoint) or isinstance(args, _api.MColor):
args = tuple(args)
if len(args) > len(self):
args = args[slice(self.shape[0])]
super(Vector, self).__init__(*args)
if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):
# can also use the form <componentname>=<number>
l = list(self.flat)
setcomp = False
for i, c in enumerate(cls.cnames):
if c in kwargs:
if float(l[i]) != float(kwargs[c]):
l[i] = float(kwargs[c])
setcomp = True
if setcomp:
try:
self.assign(l)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", cls.cnames, l))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (cls.__name__, msg, cls.__name__)
# units handling
self.unit = kwargs.get('unit', None)
if self.unit is not None:
self.assign([Distance(x, self.unit) for x in self])
def __repr__(self):
if hasattr(self, 'unit') and self.unit:
return "dt.%s(%s, unit='%s')" % (self.__class__.__name__, str(self), self.unit)
else:
return "dt.%s(%s)" % (self.__class__.__name__, str(self))
# for compatibility with base classes Array that actually hold a nested list in their _data attribute
# here, there is no _data attribute as we subclass _api.MVector directly, thus v.data is v
# for wraps
def _getdata(self):
return self.apicls(self)
def _setdata(self, value):
self.assign(value)
def _deldata(self):
if hasattr(self.apicls, 'clear'):
self.apicls.clear(self)
else:
raise TypeError, "cannot clear stored elements of %s" % (self.__class__.__name__)
data = property(_getdata, _setdata, _deldata, "The Vector/FloatVector/Point/FloatPoint/Color data")
# overloads for assign and get though standard way should be to use the data property
# to access stored values
def assign(self, value):
""" Wrap the Vector api assign method """
# don't accept instances as assign works on exact types
if type(value) != self.apicls and type(value) != type(self):
if not hasattr(value, '__iter__'):
value = (value,)
value = self.apicls(*value)
self.apicls.assign(self, value)
return self
# API get, actually not faster than pulling self[i] for such a short structure
def get(self):
""" Wrap the Vector api get method """
# need to keep a ref to the MScriptUtil alive until
# all pointers aren't needed...
ms = _api.MScriptUtil()
l = (0,) * self.size
ms.createFromDouble(*l)
p = ms.asDoublePtr()
self.apicls.get(self, p)
return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])
def __len__(self):
""" Number of components in the Vector instance, 3 for Vector, 4 for Point and Color """
return self.apicls.__len__(self)
# __getitem__ / __setitem__ override
# faster to override __getitem__ cause we know Vector only has one dimension
def __getitem__(self, i):
""" Get component i value from self """
if hasattr(i, '__iter__'):
i = list(i)
if len(i) == 1:
i = i[0]
else:
raise IndexError, "class %s instance %s has only %s dimension(s), index %s is out of bounds" % (util.clsname(self), self, self.ndim, i)
if isinstance(i, slice):
return _toCompOrArrayInstance(list(self)[i], VectorN)
try:
return _toCompOrArrayInstance(list(self)[i], VectorN)
except:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
else:
if i < 0:
i = self.size + i
if i < self.size and not i < 0:
if hasattr(self.apicls, '__getitem__'):
return self.apicls.__getitem__(self, i)
else:
return list(self)[i]
else:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
# as _api.Vector has no __setitem__ method, so need to reassign the whole Vector
def __setitem__(self, i, a):
""" Set component i value on self """
v = VectorN(self)
v.__setitem__(i, a)
self.assign(v)
# iterator override
# TODO : support for optional __iter__ arguments
def __iter__(self, *args, **kwargs):
""" Iterate on the api components """
return self.apicls.__iter__(self.data)
def __contains__(self, value):
""" True if at least one of the vector components is equal to the argument """
return value in self.__iter__()
# common operators without an api equivalent are herited from VectorN
# operators using the Maya API when applicable, but that can delegate to VectorN
def __eq__(self, other):
""" u.__eq__(v) <==> u == v
Equivalence test """
try:
return bool(self.apicls.__eq__(self, other))
except Exception:
return bool(super(Vector, self).__eq__(other))
def __ne__(self, other):
""" u.__ne__(v) <==> u != v
Equivalence test """
return (not self.__eq__(other))
def __neg__(self):
""" u.__neg__() <==> -u
The unary minus operator. Negates the value of each of the components of u """
return self.__class__(self.apicls.__neg__(self))
def __add__(self, other):
""" u.__add__(v) <==> u+v
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__add__(other))
def __radd__(self, other):
""" u.__radd__(v) <==> v+u
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__radd__(other))
def __iadd__(self, other):
""" u.__iadd__(v) <==> u += v
In place addition of u and v, see __add__ """
try:
return self.__class__(self.__add__(other))
except Exception:
return NotImplemented
def __sub__(self, other):
""" u.__sub__(v) <==> u-v
Returns the result of the substraction of v from u if v is convertible to a VectorN (element-wise substration),
substract v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__sub__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__sub__(other))
def __rsub__(self, other):
""" u.__rsub__(v) <==> v-u
Returns the result of the substraction of u from v if v is convertible to a VectorN (element-wise substration),
replace every component c of u by v-c if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rsub__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__rsub__(other))
def __isub__(self, other):
""" u.__isub__(v) <==> u -= v
In place substraction of u and v, see __sub__ """
try:
return self.__class__(self.__sub__(other))
except Exception:
return NotImplemented
def __div__(self, other):
""" u.__div__(v) <==> u/v
Returns the result of the division of u by v if v is convertible to a VectorN (element-wise division),
divide every component of u by v if v is a scalar """
try:
return self.__class__._convert(self.apicls.__div__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__div__(other))
def __rdiv__(self, other):
""" u.__rdiv__(v) <==> v/u
Returns the result of of the division of v by u if v is convertible to a VectorN (element-wise division),
invert every component of u and multiply it by v if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rdiv__(self, other))
except Exception:
return self.__class__._convert(super(Vector, self).__rdiv__(other))
def __idiv__(self, other):
""" u.__idiv__(v) <==> u /= v
In place division of u by v, see __div__ """
try:
return self.__class__(self.__div__(other))
except Exception:
return NotImplemented
# action depends on second object type
def __mul__(self, other):
""" u.__mul__(v) <==> u*v
The multiply '*' operator is mapped to the dot product when both objects are Vectors,
to the transformation of u by matrix v when v is a MatrixN,
to element wise multiplication when v is a sequence,
and multiplies each component of u by v when v is a numeric type. """
try:
res = self.apicls.__mul__(self, other)
assert res is not NotImplemented
except Exception:
res = super(Vector, self).__mul__(other)
if util.isNumeric(res) or res is NotImplemented:
return res
else:
return self.__class__._convert(res)
def __rmul__(self, other):
""" u.__rmul__(v) <==> v*u
The multiply '*' operator is mapped to the dot product when both objects are Vectors,
to the left side multiplication (pre-multiplication) of u by matrix v when v is a MatrixN,
to element wise multiplication when v is a sequence,
and multiplies each component of u by v when v is a numeric type. """
try:
res = self.apicls.__rmul__(self, other)
except:
res = super(Vector, self).__rmul__(other)
if util.isNumeric(res):
return res
else:
return self.__class__._convert(res)
def __imul__(self, other):
""" u.__imul__(v) <==> u *= v
Valid for Vector * Matrix multiplication, in place transformation of u by Matrix v
or Vector by scalar multiplication only """
try:
return self.__class__(self.__mul__(other))
except:
return NotImplemented
# special operators
def __xor__(self, other):
""" u.__xor__(v) <==> u^v
Defines the cross product operator between two 3D vectors,
if v is a MatrixN, u^v is equivalent to u.transformAsNormal(v) """
if isinstance(other, VectorN):
return self.cross(other)
elif isinstance(other, MatrixN):
return self.transformAsNormal(other)
else:
return NotImplemented
def __ixor__(self, other):
""" u.__xor__(v) <==> u^=v
Inplace cross product or transformation by inverse transpose of v is v is a MatrixN """
try:
return self.__class__(self.__xor__(other))
except:
return NotImplemented
# wrap of other API MVector methods, we use the api method if possible and delegate to Vector else
def isEquivalent(self, other, tol=None):
""" Returns true if both arguments considered as Vector are equal within the specified tolerance """
if tol is None:
tol = _api.MVector_kTol
try:
nself, nother = coerce(self, other)
except:
return False
if isinstance(nself, Vector):
return bool(nself.apicls.isEquivalent(nself, nother, tol))
else:
return bool(super(Vector, nself).isEquivalent(nother, tol))
def isParallel(self, other, tol=None):
""" Returns true if both arguments considered as Vector are parallel within the specified tolerance """
if tol is None:
tol = _api.MVector_kTol
try:
return bool(self.apicls.isParallel(Vector(self), Vector(other), tol))
except:
return super(Vector, self).isParallel(other, tol)
def distanceTo(self, other):
try:
return self.apicls.distanceTo(Point(self), Point(other))
except:
return super(Vector, self).dist(other)
def length(self):
""" Return the length of the vector """
return Vector.apicls.length(Vector(self))
def sqlength(self):
""" Return the square length of the vector """
return self.dot(self)
def normal(self):
""" Return a normalized copy of self """
return self.__class__(Vector.apicls.normal(Vector(self)))
def normalize(self):
""" Performs an in place normalization of self """
if type(self) is Vector:
Vector.apicls.normalize(self)
else:
self.assign(self.normal())
# additional api methods that work on Vector only, and don't have an equivalent on VectorN
def rotateTo(self, other):
""" u.rotateTo(v) --> Quaternion
Returns the Quaternion that represents the rotation of the Vector u into the Vector v
around their mutually perpendicular axis. It amounts to rotate u by angle(u, v) around axis(u, v) """
if isinstance(other, Vector):
return Quaternion(Vector.apicls.rotateTo(Vector(self), Vector(other)))
else:
raise TypeError, "%r is not a Vector instance" % other
def rotateBy(self, *args):
""" u.rotateBy(*args) --> Vector
Returns the result of rotating u by the specified arguments.
There are several ways the rotation can be specified:
args is a tuple of one Matrix, TransformationMatrix, Quaternion, EulerRotation
arg is tuple of 4 arguments, 3 rotation value and an optionnal rotation order
args is a tuple of one Vector, the axis and one float, the angle to rotate around that axis in radians"""
if args:
if len(args) == 2 and isinstance(args[0], Vector):
return self.__class__(self.apicls.rotateBy(self, Quaternion(Vector(args[0]), float(args[1]))))
elif len(args) == 1 and isinstance(args[0], Matrix):
return self.__class__(self.apicls.rotateBy(self, args[0].rotate))
else:
return self.__class__(self.apicls.rotateBy(self, EulerRotation(unit='radians', *args)))
else:
return self
# def asUnit(self, unit) :
# #kUnit = Distance.kUnit(unit)
# return self.__class__( [ Distance(x).asUnit(unit) for x in self ] )
#
# def asUnit(self) :
# return self.asUnit(self.unit)
#
# def asUIUnit()nits()self) :
# return self.asUnit(Distance.getUIUnit())
#
# def asInternalUnit(self) :
# return self.asUnit(Distance.getInternalUnit())
#
# def asMillimeter(self) :
# return self.asUnit('millimeter')
# def asCentimeters(self) :
# return self.asUnit('centimeters')
# def asKilometers(self) :
# return self.asUnit('kilometers')
# def asMeters(self) :
# return self.asUnit('meters')
#
# def asInches(self) :
# return self.asUnit('inches')
# def asFeet(self) :
# return self.asUnit('feet')
# def asYards(self) :
# return self.asUnit('yards')
# def asMiles(self) :
# return self.asUnit('miles')
# additional api methods that work on Vector only, but can also be delegated to VectorN
def transformAsNormal(self, other):
""" Returns the vector transformed by the matrix as a normal
Normal vectors are not transformed in the same way as position vectors or points.
If this vector is treated as a normal vector then it needs to be transformed by
post multiplying it by the inverse transpose of the transformation matrix.
This method will apply the proper transformation to the vector as if it were a normal. """
if isinstance(other, Matrix):
return self.__class__._convert(Vector.apicls.transformAsNormal(Vector(self), Matrix(other)))
else:
return self.__class__._convert(super(Vector, self).transformAsNormal(other))
def dot(self, other):
""" dot product of two vectors """
if isinstance(other, Vector):
return Vector.apicls.__mul__(Vector(self), Vector(other))
else:
return super(Vector, self).dot(other)
def cross(self, other):
""" cross product, only defined for two 3D vectors """
if isinstance(other, Vector):
return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)))
else:
return self.__class__._convert(super(Vector, self).cross(other))
def axis(self, other, normalize=False):
""" u.axis(v) <==> angle(u, v) --> Vector
Returns the axis of rotation from u to v as the vector n = u ^ v
if the normalize keyword argument is set to True, n is also normalized """
if isinstance(other, Vector):
if normalize:
return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)).normal())
else:
return self.__class__._convert(Vector.apicls.__xor__(Vector(self), Vector(other)))
else:
return self.__class__._convert(super(Vector, self).axis(other, normalize))
def angle(self, other):
""" u.angle(v) <==> angle(u, v) --> float
Returns the angle (in radians) between the two vectors u and v
Note that this angle is not signed, use axis to know the direction of the rotation """
if isinstance(other, Vector):
return Vector.apicls.angle(Vector(self), Vector(other))
else:
return super(Vector, self).angle(other)
# methods without an api equivalent
# cotan on MVectors only takes 2 arguments
def cotan(self, other):
""" u.cotan(v) <==> cotan(u, v) --> float :
cotangent of the a, b angle, a and b should be MVectors"""
return VectorN.cotan(self, other)
# rest derived from VectorN class
class FloatVector(Vector):
""" A 3 dimensional vector class that wraps Maya's api FloatVector class,
It behaves identically to Vector, but it also derives from api's FloatVector
to keep api methods happy
"""
apicls = _api.MFloatVector
# Point specific functions
def planar(p, *args, **kwargs):
""" planar(p[, q, r, s (...), tol=tolerance]) --> bool
Returns True if all provided MPoints are planar within given tolerance """
if not isinstance(p, Point):
try:
p = Point(p)
except:
raise TypeError, "%s is not convertible to type Point, planar is only defined for n MPoints" % (util.clsname(p))
return p.planar(*args, **kwargs)
def center(p, *args):
""" center(p[, q, r, s (...)]) --> Point
Returns the Point that is the center of p, q, r, s (...) """
if not isinstance(p, Point):
try:
p = Point(p)
except:
raise TypeError, "%s is not convertible to type Point, center is only defined for n MPoints" % (util.clsname(p))
return p.center(*args)
def bWeights(p, *args):
""" bWeights(p[, p0, p1, (...), pn]) --> tuple
Returns a tuple of (n0, n1, ...) normalized barycentric weights so that n0*p0 + n1*p1 + ... = p """
if not isinstance(p, Point):
try:
p = Point(p)
except:
raise TypeError, "%s is not convertible to type Point, bWeights is only defined for n MPoints" % (util.clsname(p))
return p.bWeights(*args)
class Point(Vector):
""" A 4 dimensional vector class that wraps Maya's api Point class,
"""
apicls = _api.MPoint
cnames = ('x', 'y', 'z', 'w')
shape = (4,)
def __melobject__(self):
"""Special method for returning a mel-friendly representation. In this case, a cartesian 3D point """
return self.cartesian()
# # base methods are inherited from Vector
# we only show the x, y, z components on an iter
def __len__(self):
l = len(self.data)
if self.w == 1.0:
l -= 1
return l
def __iter__(self, *args, **kwargs):
""" Iterate on the api components """
l = len(self)
for c in list(self.apicls.__iter__(self.data))[:l]:
yield c
# modified operators, when adding 2 Point consider second as Vector
def __add__(self, other):
""" u.__add__(v) <==> u+v
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
# prb with coerce when delegating to VectorN, either redefine coerce for Point or other fix
# if isinstance(other, Point) :
# other = Vector(other)
try:
other = Vector(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except:
return self.__class__._convert(super(Vector, self).__add__(other))
def __radd__(self, other):
""" u.__radd__(v) <==> v+u
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
if isinstance(other, Point):
other = Vector(other)
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except:
return self.__class__._convert(super(Point, self).__radd__(other))
def __iadd__(self, other):
""" u.__iadd__(v) <==> u += v
In place addition of u and v, see __add__ """
try:
return self.__class__(self.__add__(other))
except:
return NotImplemented
# specific api methods
def cartesianize(self):
""" p.cartesianize() --> Point
If the point instance p is of the form P(W*x, W*y, W*z, W), for some scale factor W != 0,
then it is reset to be P(x, y, z, 1).
This will only work correctly if the point is in homogenous form or cartesian form.
If the point is in rational form, the results are not defined. """
return self.__class__(self.apicls.cartesianize(self))
def cartesian(self):
""" p.cartesian() --> Point
Returns the cartesianized version of p, without changing p. """
t = copy.deepcopy(self)
self.apicls.cartesianize(t)
return t
def rationalize(self):
""" p.rationalize() --> Point
If the point instance p is of the form P(W*x, W*y, W*z, W) (ie. is in homogenous or (for W==1) cartesian form),
for some scale factor W != 0, then it is reset to be P(x, y, z, W).
This will only work correctly if the point is in homogenous or cartesian form.
If the point is already in rational form, the results are not defined. """
return self.__class__(self.apicls.rationalize(self))
def rational(self):
""" p.rational() --> Point
Returns the rationalized version of p, without changing p. """
t = copy.deepcopy(self)
self.apicls.rationalize(t)
return t
def homogenize(self):
""" p.homogenize() --> Point
If the point instance p is of the form P(x, y, z, W) (ie. is in rational or (for W==1) cartesian form),
for some scale factor W != 0, then it is reset to be P(W*x, W*y, W*z, W). """
return self.__class__(self.apicls.homogenize(self))
def homogen(self):
""" p.homogen() --> Point
Returns the homogenized version of p, without changing p. """
t = copy.deepcopy(self)
self.apicls.homogenize(t)
return t
# additionnal methods
def isEquivalent(self, other, tol=None):
""" Returns true if both arguments considered as Point are equal within the specified tolerance """
if tol is None:
tol = _api.MPoint_kTol
try:
nself, nother = coerce(self, other)
except:
return False
if isinstance(nself, Point):
return bool(nself.apicls.isEquivalent(nself, nother, tol))
else:
return bool(super(Point, nself).isEquivalent(nother, tol))
def axis(self, start, end, normalize=False):
""" a.axis(b, c) --> Vector
Returns the axis of rotation from point b to c around a as the vector n = (b-a)^(c-a)
if the normalize keyword argument is set to True, n is also normalized """
return Vector.axis(start - self, end - self, normalize=normalize)
def angle(self, start, end):
""" a.angle(b, c) --> float
Returns the angle (in radians) of rotation from point b to c around a.
Note that this angle is not signed, use axis to know the direction of the rotation """
return Vector.angle(start - self, end - self)
def cotan(self, start, end):
""" a.cotan(b, c) --> float :
cotangent of the (b-a), (c-a) angle, a, b, and c should be MPoints representing points a, b, c"""
return VectorN.cotan(start - self, end - self)
def planar(self, *args, **kwargs):
""" p.planar(q, r, s (...), tol=tolerance) --> bool
Returns True if all provided points are planar within given tolerance """
if len(args) > 2:
tol = kwargs.get('tol', None)
n = (args[0] - self) ^ (args[1] - self)
return reduce(operator.and_, map(lambda x: n.isParallel(x, tol), [(args[0] - self) ^ (a - self) for a in args[2:]]), True)
else:
return True
def center(self, *args):
""" p.center(q, r, s (...)) --> Point
Returns the Point that is the center of p, q, r, s (...) """
return sum((self,) + args) / float(len(args) + 1)
def bWeights(self, *args):
""" p.bWeights(p0, p1, (...), pn) --> tuple
Returns a tuple of (n0, n1, ...) normalized barycentric weights so that n0*p0 + n1*p1 + ... = p.
This method works for n points defining a concave or convex n sided face,
always returns positive normalized weights, and is continuous on the face limits (on the edges),
but the n points must be coplanar, and p must be inside the face delimited by (p0, ..., pn) """
if args:
p = self
q = list(args)
np = len(q)
w = VectorN(0.0, size=np)
weightSum = 0.0
pOnEdge = False
tol = _api.MPoint_kTol
# all args should be MPoints
for i in xrange(np):
if not isinstance(q[i], Point):
try:
q[i] = Point(q[i])
except:
raise TypeError, "cannot convert %s to Point, bWeights is defined for n MPoints" % (util.clsname(q[i]))
# if p sits on an edge, it' a limit case and there is an easy solution,
# all weights are 0 but for the 2 edge end points
for i in xrange(np):
next = (i + 1) % np
e = ((q[next] - q[i]) ^ (p - q[i])).sqlength()
l = (q[next] - q[i]).sqlength()
if e <= (tol * l):
if l < tol:
# p is on a 0 length edge, point and next point are on top of each other, as is p then
w[i] = 0.5
w[next] = 0.5
else:
# p is somewhere on that edge between point and next point
di = (p - q[i]).length()
w[next] = float(di / sqrt(l))
w[i] = 1.0 - w[next]
# in both case update the weights sum and mark p as being on an edge,
# problem is solved
weightSum += 1.0
pOnEdge = True
break
# If p not on edge, use the cotangents method
if not pOnEdge:
for i in xrange(np):
prev = (i + np - 1) % np
next = (i + 1) % np
lenSq = (p - q[i]).sqlength()
w[i] = (q[i].cotan(p, q[prev]) + q[i].cotan(p, q[next])) / lenSq
weightSum += w[i]
# then normalize result
if abs(weightSum):
w /= weightSum
else:
raise ValueError, "failed to compute bWeights for %s and %s.\nThe point bWeights are computed for must be inside the planar face delimited by the n argument points" % (self, args)
return tuple(w)
else:
return ()
class FloatPoint(Point):
""" A 4 dimensional vector class that wraps Maya's api FloatPoint class,
It behaves identically to Point, but it also derives from api's FloatPoint
to keep api methods happy
"""
apicls = _api.MFloatPoint
class Color(Vector):
""" A 4 dimensional vector class that wraps Maya's api Color class,
It stores the r, g, b, a components of the color, as normalized (Python) floats
"""
apicls = _api.MColor
cnames = ('r', 'g', 'b', 'a')
shape = (4,)
# modes = ('rgb', 'hsv', 'cmy', 'cmyk')
modes = ('rgb', 'hsv')
# constants
red = _api.MColor(1.0, 0.0, 0.0)
green = _api.MColor(0.0, 1.0, 0.0)
blue = _api.MColor(0.0, 0.0, 1.0)
white = _api.MColor(1.0, 1.0, 1.0)
black = _api.MColor(0.0, 0.0, 0.0)
opaque = _api.MColor(0.0, 0.0, 0.0, 1.0)
clear = _api.MColor(0.0, 0.0, 0.0, 0.0)
# static methods
@staticmethod
def rgbtohsv(c):
c = tuple(c)
return tuple(colorsys.rgb_to_hsv(*clamp(c[:3])) + c[3:4])
@staticmethod
def hsvtorgb(c):
c = tuple(c)
# return colorsys.hsv_to_rgb(clamp(c[0]), clamp(c[1]), clamp(c[2]))
return tuple(colorsys.hsv_to_rgb(*clamp(c[:3])) + c[3:4])
# TODO : could define rgb and hsv iterators and allow __setitem__ and __getitem__ on these iterators
# like (it's more simple) it's done in ArrayIter
def _getrgba(self):
return tuple(self)
def _setrgba(self, value):
if not hasattr(value, '__iter__'):
# the way api interprets a single value
# value = (None, None, None, value)
value = (value,) * 4
l = list(self)
for i, v in enumerate(value[:4]):
if v is not None:
l[i] = float(v)
self.assign(*l)
rgba = property(_getrgba, _setrgba, None, "The r,g,b,a Color components""")
def _getrgb(self):
return self.rgba[:3]
def _setrgb(self, value):
if not hasattr(value, '__iter__'):
value = (value,) * 3
self.rgba = value[:3]
rgb = property(_getrgb, _setrgb, None, "The r,g,b Color components""")
def _gethsva(self):
return tuple(Color.rgbtohsv(self))
def _sethsva(self, value):
if not hasattr(value, '__iter__'):
# the way api interprets a single value
# value = (None, None, None, value)
value = (value,) * 4
l = list(Color.rgbtohsv(self))
for i, v in enumerate(value[:4]):
if v is not None:
l[i] = float(v)
self.assign(*Color.hsvtorgb(self))
hsva = property(_gethsva, _sethsva, None, "The h,s,v,a Color components""")
def _gethsv(self):
return tuple(Color.rgbtohsv(self))[:3]
def _sethsv(self, value):
if not hasattr(value, '__iter__'):
value = (value,) * 3
self.hsva = value[:3]
hsv = property(_gethsv, _sethsv, None, "The h,s,v,a Color components""")
def _geth(self):
return self.hsva[0]
def _seth(self, value):
self.hsva = (value, None, None, None)
h = property(_geth, _seth, None, "The h Color component""")
def _gets(self):
return self.hsva[1]
def _sets(self, value):
self.hsva = (None, value, None, None)
s = property(_gets, _sets, None, "The s Color component""")
def _getv(self):
return self.hsva[2]
def _setv(self, value):
self.hsva = (None, None, value, None)
v = property(_getv, _setv, None, "The v Color component""")
# __new__ is herited from Point/Vector, need to override __init__ to accept hsv mode though
def __init__(self, *args, **kwargs):
""" Init a Color instance
Can pass one argument being another Color instance , or the color components """
cls = self.__class__
mode = kwargs.get('mode', None)
if mode is not None and mode not in cls.modes:
raise ValueError, "unknown mode %s for %s" % (mode, util.clsname(self))
# can also use the form <componentname>=<number>
# for now supports only rgb and hsv flags
hsvflag = {}
rgbflag = {}
for a in 'hsv':
if a in kwargs:
hsvflag[a] = kwargs[a]
for a in 'rgb':
if a in kwargs:
rgbflag[a] = kwargs[a]
# can't mix them
if hsvflag and rgbflag:
raise ValueError, "can not mix r,g,b and h,s,v keyword arguments in a %s declaration" % util.clsname(self)
# if no mode specified, guess from what keyword arguments where used, else use 'rgb' as default
if mode is None:
if hsvflag:
mode = 'hsv'
else:
mode = 'rgb'
# can't specify a mode and use keywords of other modes
if mode is not 'hsv' and hsvflag:
raise ValueError, "Can not use h,s,v keyword arguments while specifying %s mode in %s" % (mode, util.clsname(self))
elif mode is not 'rgb' and rgbflag:
raise ValueError, "Can not use r,g,b keyword arguments while specifying %s mode in %s" % (mode, util.clsname(self))
# NOTE: do not try to use mode with _api.Color, it seems bugged as of 2008
#import colorsys
#colorsys.rgb_to_hsv(0.0, 0.0, 1.0)
## Result: (0.66666666666666663, 1.0, 1.0) #
#c = _api.Color(_api.Color.kHSV, 0.66666666666666663, 1.0, 1.0)
# print "# Result: ",c[0], c[1], c[2], c[3]," #"
## Result: 1.0 0.666666686535 1.0 1.0 #
#c = _api.Color(_api.Color.kHSV, 0.66666666666666663*360, 1.0, 1.0)
# print "# Result: ",c[0], c[1], c[2], c[3]," #"
## Result: 1.0 240.0 1.0 1.0 #
#colorsys.hsv_to_rgb(0.66666666666666663, 1.0, 1.0)
## Result: (0.0, 0.0, 1.0) #
# we'll use Color only to store RGB values internally and do the conversion a read/write if desired
# which I think make more sense anyway
# quantize (255, 65535, no quantize means colors are 0.0-1.0 float values)
# Initializing api's Color with int values seems also not to always behave so we quantize first and
# use a float init always
quantize = kwargs.get('quantize', None)
if quantize is not None:
try:
quantize = float(quantize)
except:
raise ValueError, "quantize must be a numeric value, not %s" % (util.clsname(quantize))
# can be initilized with a single argument (other Color, Vector, VectorN)
if len(args) == 1:
args = args[0]
# we dont rely much on Color api as it doesn't seem totally finished, and do some things directly here
if isinstance(args, self.__class__) or isinstance(args, self.apicls):
# alternatively could be just ignored / output as warning
if quantize:
raise ValueError, "Can not quantize a Color argument, a Color is always stored internally as float color" % (mode, util.clsname(self))
if mode == 'rgb':
args = VectorN(args)
elif mode == 'hsv':
args = VectorN(cls.rgbtohsv(args))
else:
# single alpha value, as understood by api will break coerce behavior in operations
# where other operand is a scalar
# if not hasattr(args, '__iter__') :
# args = VectorN(0.0, 0.0, 0.0, args)
if hasattr(args, '__len__'):
shape = (min(len(args), cls.size),)
else:
shape = cls.shape
args = VectorN(args, shape=shape)
# quantize if needed
if quantize:
args /= quantize
# pad to a full Color size
args.stack(self[len(args):])
# apply keywords arguments, and convert if mode is not rgb
if mode == 'rgb':
if rgbflag:
for i, a in enumerate('rgb'):
if a in rgbflag:
if quantize:
args[i] = float(rgbflag[a]) / quantize
else:
args[i] = float(rgbflag[a])
elif mode == 'hsv':
if hsvflag:
for i, a in enumerate('hsv'):
if a in hsvflag:
if quantize:
args[i] = float(hsvflag[a]) / quantize
else:
args[i] = float(hsvflag[a])
args = VectorN(cls.hsvtorgb(args))
# finally alpha keyword
a = kwargs.get('a', None)
if a is not None:
if quantize:
args[-1] = float(a) / quantize
else:
args[-1] = float(a)
try:
self.assign(args)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", mode, args))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (util.clsname(self), msg, util.clsname(self))
def __melobject__(self):
"""Special method for returning a mel-friendly representation. In this case, a 3-component color (RGB) """
return [self.r, self.g, self.b]
# overriden operators
# defined for two MColors only
def __add__(self, other):
""" c.__add__(d) <==> c+d
Returns the result of the addition of MColors c and d if d is convertible to a Color,
adds d to every component of c if d is a scalar """
# prb with coerce when delegating to VectorN, either redefine coerce for Point or other fix
# if isinstance(other, Point) :
# other = Vector(other)
try:
other = Color(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except:
return self.__class__._convert(super(Vector, self).__add__(other))
def __radd__(self, other):
""" c.__radd__(d) <==> d+c
Returns the result of the addition of MColors c and d if d is convertible to a Color,
adds d to every component of c if d is a scalar """
try:
other = Color(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except:
return self.__class__._convert(super(Point, self).__radd__(other))
def __iadd__(self, other):
""" c.__iadd__(d) <==> c += d
In place addition of c and d, see __add__ """
try:
return self.__class__(self.__add__(other))
except:
return NotImplemented
def __sub__(self, other):
""" c.__add__(d) <==> c+d
Returns the result of the substraction of Color d from c if d is convertible to a Color,
substract d from every component of c if d is a scalar """
try:
other = Color(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__sub__(self, other))
except:
return self.__class__._convert(super(Vector, self).__sub__(other))
def __rsub__(self, other):
""" c.__rsub__(d) <==> d-c
Returns the result of the substraction of Color c from d if d is convertible to a Color,
replace every component c[i] of c by d-c[i] if d is a scalar """
try:
other = Color(other)
except:
pass
try:
return self.__class__._convert(self.apicls.__rsub__(self, other))
except:
return self.__class__._convert(super(Point, self).__rsub__(other))
def __isub__(self, other):
""" c.__isub__(d) <==> c -= d
In place substraction of d from c, see __sub__ """
try:
return self.__class__(self.__sub__(other))
except:
return NotImplemented
# action depends on second object type
# TODO : would be nice to define LUT classes and allow MColor * LUT transform
# overloaded operators
def __mul__(self, other):
""" a.__mul__(b) <==> a*b
If b is a 1D sequence (Array, VectorN, Color), __mul__ is mapped to element-wise multiplication,
If b is a MatrixN, __mul__ is similar to Point a by MatrixN b multiplication (post multiplication or transformation of a by b),
multiplies every component of a by b if b is a single numeric value """
if isinstance(other, MatrixN):
# will defer to MatrixN rmul
return NotImplemented
else:
# will defer to Array.__mul__
return Array.__mul__(self, other)
def __rmul__(self, other):
""" a.__rmul__(b) <==> b*a
If b is a 1D sequence (Array, VectorN, Color), __mul__ is mapped to element-wise multiplication,
If b is a MatrixN, __mul__ is similar to MatrixN b by Point a matrix multiplication,
multiplies every component of a by b if b is a single numeric value """
if isinstance(other, MatrixN):
# will defer to MatrixN mul
return NotImplemented
else:
# will defer to Array.__rmul__
return Array.__rmul__(self, other)
def __imul__(self, other):
""" a.__imul__(b) <==> a *= b
In place multiplication of VectorN a and b, see __mul__, result must fit a's type """
res = self * other
if isinstance(res, self.__class__):
return self.__class__(res)
else:
raise TypeError, "result of in place multiplication of %s by %s is not a %s" % (clsname(self), clsname(other), clsname(self))
# additionnal methods, to be extended
def over(self, other):
""" c1.over(c2): Composites c1 over other c2 using c1's alpha, the resulting color has the alpha of c2 """
if isinstance(other, Color):
a = self.a
return Color(Vector(other).blend(Vector(self), self.a), a=other.a)
else:
raise TypeError, "over is defined for Color instances, not %s" % (util.clsname(other))
# return Vector instead ? Keeping alpha doesn't make much sense
def premult(self):
""" Premultiply Color r, g and b by it's alpha and resets alpha to 1.0 """
return self.__class__(Vector(self) * self.a)
def gamma(self, g):
""" c.gamma(g) applies gamma correction g to Color c, g can be a scalar and then will be applied to r, g, b
or an iterable of up to 3 (r, g, b) independant gamma correction values """
if not hasattr(g, '__iter__'):
g = (g,) * 3 + (1.0,)
else:
g = g[:3] + (1.0,) * (4 - len(g[:3]))
return gamma(self, g)
def hsvblend(self, other, weight=0.5):
""" c1.hsvblend(c2) --> Color
Returns the result of blending c1 with c2 in hsv space, using the given weight """
c1 = list(self.hsva)
c2 = list(other.hsva)
if abs(c2[0] - c1[0]) >= 0.5:
if abs(c2[0] - c1[0]) == 0.5:
c1[1], c2[1] = 0.0, 0.0
if c1[0] > 0.5:
c1[0] -= 1.0
if c2[0] > 0.5:
c2[0] -= 1.0
c = blend(c1, c2, weight=weight)
if c[0] < 0.0:
c[0] += 1.0
return self.__class__(c, mode='hsv')
# to specify space of transforms
class Space(_api.MSpace):
apicls = _api.MSpace
__metaclass__ = _factories.MetaMayaTypeWrapper
pass
Spaces = Space.Space
def equivalentSpace(space1, space2, rotationOnly=False):
'''Compare the two given space values to see if they are equal
Parameters
----------
space1 : int or str
the first space to compare (may be either the integer enum value, or the
api enum name - ie, "kPostTransform" - or the pymel enum name - ie,
"postTransform" )
space2 : int or str
the seoncd space to compare (may be either the integer enum value, or
the api enum name - ie, "kPostTransform" - or the pymel enum name - ie,
"postTransform")
rotationOnly : bool
If true, then compare the spaces, assuming we are only considering
rotation - in rotation, transform is the same as preTransform/object
(the reason being that in maya, preTransform means rotation +
translation are both defined in the preTransform/object coordinate
system, while transform means rotation is defined in preTransform/object
coordinates, while translate is given in the postTransform space...
which matches the way maya applies transforms)
'''
translated = []
for space in space1, space2:
space = _factories.ApiArgUtil.castInputEnum('MSpace', 'Space', space)
if rotationOnly:
# for the purposes of rotations, maya treats transform and
# preTransform/object as the same (the reason being that in maya,
# preTransform means both rotation + translation are both defined in
# the preTransform/object coordinate system, while transform means
# rotation is defined in preTransform/object coordinates, while
# translate is given in the postTransform space... which matches the
# way maya applies transforms)
if space == _api.MSpace.kTransform:
space = _api.MSpace.kPreTransform
translated.append(space)
# kInvalid
# kTransform
# Transform matrix (relative) space
# kPreTransform
# Pre-transform matrix (geometry)
# kPostTransform
# Post-transform matrix (world) space
# kWorld
# transform in world space
# kObject
# Same as pre-transform space
# kLast
# sadly TransformationMatrix.RotationOrder and EulerRotation.RotationOrder don't match
# class MRotationOrder(int):
# pass
# kInvalid
# kXYZ
# kYZX
# kZXY
# kXZY
# kYXZ
# kZYX
# kLast
# kXYZ
# kYZX
# kZXY
# kXZY
# kYXZ
# kZYX
# functions that work on MatrixN (det(), inv(), ...) herited from arrays
# and properly defer to the class methods
# For row, column order, see the definition of a TransformationMatrix in docs :
# T = | 1 0 0 0 |
# | 0 1 0 0 |
# | 0 0 1 0 |
# | tx ty tz 1 |
# and m(r, c) should return value of cell at r row and c column :
# t = _api.TransformationMatrix()
# t.setTranslation(_api.Vector(1, 2, 3), _api.MSpace.kWorld)
# m = t.asMatrix()
# mm(3,0)
# 1.0
# mm(3,1)
# 2.0
# mm(3,2)
# 3.0
class Matrix(MatrixN):
"""
A 4x4 transformation matrix based on api Matrix
>>> from pymel.all import *
>>> import pymel.core.datatypes as dt
>>>
>>> i = dt.Matrix()
>>> print i.formated()
[[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]]
>>> v = dt.Matrix(1, 2, 3)
>>> print v.formated()
[[1.0, 2.0, 3.0, 0.0],
[1.0, 2.0, 3.0, 0.0],
[1.0, 2.0, 3.0, 0.0],
[1.0, 2.0, 3.0, 0.0]]
"""
__metaclass__ = MetaMayaArrayTypeWrapper
apicls = _api.MMatrix
shape = (4, 4)
cnames = ('a00', 'a01', 'a02', 'a03',
'a10', 'a11', 'a12', 'a13',
'a20', 'a21', 'a22', 'a23',
'a30', 'a31', 'a32', 'a33')
# constants
identity = _api.MMatrix()
def __new__(cls, *args, **kwargs):
shape = kwargs.get('shape', None)
ndim = kwargs.get('ndim', None)
size = kwargs.get('size', None)
# will default to class constant shape = (4, 4), so it's just an error check to catch invalid shapes,
# as no other option is actually possible on Matrix, but this method could be used to allow wrapping
# of Maya array classes that can have a variable number of elements
shape, ndim, size = cls._expandshape(shape, ndim, size)
new = cls.apicls.__new__(cls)
cls.apicls.__init__(new)
return new
def __init__(self, *args, **kwargs):
""" __init__ method, valid for Vector, Point and Color classes """
cls = self.__class__
if args:
# allow both forms for arguments
if len(args) == 1 and hasattr(args[0], '__iter__'):
args = args[0]
# shape = kwargs.get('shape', None)
# ndim = kwargs.get('ndim', None)
# size = kwargs.get('size', None)
# if shape is not None or ndim is not None or size is not None :
# shape, ndim, size = cls._expandshape(shape, ndim, size)
# args = MatrixN(args, shape=shape, ndim=ndim, size=size)
# shortcut when a direct api init is possible
try:
self.assign(args)
except:
super(MatrixN, self).__init__(*args)
# value = list(Matrix(value, shape=self.shape).flat)
# data = self.apicls()
# _api.MScriptUtil.createMatrixFromList ( value, data )
if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):
# can also use the form <componentname>=<number>
l = list(self.flat)
setcomp = False
for i, c in enumerate(cls.cnames):
if c in kwargs:
if float(l[i]) != float(kwargs[c]):
l[i] = float(kwargs[c])
setcomp = True
if setcomp:
try:
self.assign(l)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", cls.cnames, l))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (cls.__name__, msg, cls.__name__)
# for compatibility with base classes Array that actually hold a nested list in their _data attribute
# here, there is no _data attribute as we subclass _api.Vector directly, thus v.data is v
# for wraps
def _getdata(self):
return self
def _setdata(self, value):
self.assign(value)
def _deldata(self):
if hasattr(self.apicls, 'clear'):
self.apicls.clear(self)
else:
raise TypeError, "cannot clear stored elements of %s" % (self.__class__.__name__)
data = property(_getdata, _setdata, _deldata, "The Matrix/FloatMatrix/TransformationMatrix/Quaternion/EulerRotation data")
# set properties for easy acces to translation / rotation / scale of a Matrix or derived class
# some of these will only yield dependable results if Matrix is a TransformationMatrix and some
# will always be zero for some classes (ie only rotation has a value on a Quaternion
def _getTranslate(self):
t = TransformationMatrix(self)
return Vector(t.getTranslation(_api.MSpace.kTransform))
def _setTranslate(self, value):
t = TransformationMatrix(self)
t.setTranslation(Vector(value), _api.MSpace.kTransform)
self.assign(t.asMatrix())
translate = property(_getTranslate, _setTranslate, None, "The translation expressed in this Matrix, in transform space")
def _getRotate(self):
t = TransformationMatrix(self)
return Quaternion(t.apicls.rotation(t))
def _setRotate(self, value):
t = TransformationMatrix(self)
q = Quaternion(value)
t.rotateTo(q)
# values = (q.x, q.y, q.z, q.w)
# t.setRotationQuaternion(q.x, q.y, q.z, q.w)
self.assign(t.asMatrix())
rotate = property(_getRotate, _setRotate, None, "The rotation expressed in this Matrix, in transform space")
def _getScale(self):
t = TransformationMatrix(self)
return Vector(t.getScale(_api.MSpace.kTransform))
def _setScale(self, value):
t = TransformationMatrix(self)
t.setScale(value, _api.MSpace.kTransform)
self.assign(t.asMatrix())
scale = property(_getScale, _setScale, None, "The scale expressed in this Matrix, in transform space")
def __melobject__(self):
"""Special method for returning a mel-friendly representation. In this case, a flat list of 16 values """
return [x for x in self.flat]
# some Matrix derived classes can actually be represented as matrix but not stored
# internally as such by the API
def asMatrix(self, percent=None):
"The matrix representation for this Matrix/TransformationMatrix/Quaternion/EulerRotation instance"
if percent is not None and percent != 1.0:
if type(self) is not TransformationMatrix:
self = TransformationMatrix(self)
return Matrix(self.apicls.asMatrix(self, percent))
else:
if type(self) is Matrix:
return self
else:
return Matrix(self.apicls.asMatrix(self))
matrix = property(asMatrix, None, None, "The Matrix representation for this Matrix/TransformationMatrix/Quaternion/EulerRotation instance")
# overloads for assign and get though standard way should be to use the data property
# to access stored values
def assign(self, value):
# don't accept instances as assign works on exact _api.Matrix type
data = None
if type(value) == self.apicls or type(value) == type(self):
data = value
elif hasattr(value, 'asMatrix'):
data = value.asMatrix()
else:
value = list(MatrixN(value).flat)
if len(value) == self.size:
data = self.apicls()
if isinstance(data, _api.MFloatMatrix):
_api.MScriptUtil.createFloatMatrixFromList(value, data)
elif isinstance(data, _api.MMatrix):
_api.MScriptUtil.createMatrixFromList(value, data)
else:
tmp = _api.MMatrix()
_api.MScriptUtil.createMatrixFromList(value, tmp)
data = self.apicls(tmp)
else:
raise TypeError, "cannot assign %s to a %s" % (value, util.clsname(self))
self.apicls.assign(self, data)
return self
# API get, actually not faster than pulling self[i] for such a short structure
def get(self):
""" Wrap the Matrix api get method """
mat = self.matrix
return tuple(tuple(_api.MScriptUtil.getDoubleArrayItem(_api.MMatrix.__getitem__(mat, r), c) for c in xrange(Matrix.shape[1])) for r in xrange(Matrix.shape[0]))
# ptr = _api.Matrix(self.matrix).matrix
# return tuple(tuple(_api.MScriptUtil.getDouble2ArrayItem ( ptr, r, c) for c in xrange(Matrix.shape[1])) for r in xrange(Matrix.shape[0]))
def __len__(self):
""" Number of components in the Matrix instance """
return self.apicls.__len__(self)
# iterator override
# TODO : support for optionnal __iter__ arguments
def __iter__(self, *args, **kwargs):
""" Iterate on the Matrix rows """
return self.apicls.__iter__(self.data)
# contains is herited from Array contains
# __getitem__ / __setitem__ override
def __getitem__(self, index):
""" m.__getitem__(index) <==> m[index]
Get component index value from self.
index can be a single numeric value or slice, thus one or more rows will be returned,
or a row,column tuple of numeric values / slices """
m = MatrixN(self)
# print list(m)
return m.__getitem__(index)
# return super(MatrixN, self).__getitem__(index)
# deprecated and __getitem__ should accept slices anyway
def __getslice__(self, start, end):
return self.__getitem__(slice(start, end))
# as _api.Matrix has no __setitem__ method
def __setitem__(self, index, value):
""" m.__setitem__(index, value) <==> m[index] = value
Set value of component index on self
index can be a single numeric value or slice, thus one or more rows will be returned,
or a row,column tuple of numeric values / slices """
m = MatrixN(self)
m.__setitem__(index, value)
self.assign(m)
# deprecated and __setitem__ should accept slices anyway
def __setslice__(self, start, end, value):
self.__setitem__(slice(start, end), value)
def __delitem__(self, index):
""" Cannot delete from a class with a fixed shape """
raise TypeError, "deleting %s from an instance of class %s will make it incompatible with class shape" % (index, clsname(self))
def __delslice__(self, start, end):
self.__delitem__(slice(start, end))
# TODO : wrap double Matrix:: operator() (unsigned int row, unsigned int col ) const
# common operators herited from MatrixN
# operators using the Maya API when applicable
def __eq__(self, other):
""" m.__eq__(v) <==> m == v
Equivalence test """
try:
return bool(self.apicls.__eq__(self, other))
except:
return bool(super(Matrix, self).__eq__(other))
def __ne__(self, other):
""" m.__ne__(v) <==> m != v
Equivalence test """
return (not self.__eq__(other))
def __neg__(self):
""" m.__neg__() <==> -m
The unary minus operator. Negates the value of each of the components of m """
return self.__class__(self.apicls.__neg__(self))
def __add__(self, other):
""" m.__add__(v) <==> m+v
Returns the result of the addition of m and v if v is convertible to a MatrixN (element-wise addition),
adds v to every component of m if v is a scalar """
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__add__(other))
def __radd__(self, other):
""" m.__radd__(v) <==> v+m
Returns the result of the addition of m and v if v is convertible to a MatrixN (element-wise addition),
adds v to every component of m if v is a scalar """
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__radd__(other))
def __iadd__(self, other):
""" m.__iadd__(v) <==> m += v
In place addition of m and v, see __add__ """
try:
return self.__class__(self.__add__(other))
except:
return NotImplemented
def __sub__(self, other):
""" m.__sub__(v) <==> m-v
Returns the result of the substraction of v from m if v is convertible to a MatrixN (element-wise substration),
substract v to every component of m if v is a scalar """
try:
return self.__class__._convert(self.apicls.__sub__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__sub__(other))
def __rsub__(self, other):
""" m.__rsub__(v) <==> v-m
Returns the result of the substraction of m from v if v is convertible to a MatrixN (element-wise substration),
replace every component c of m by v-c if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rsub__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__rsub__(other))
def __isub__(self, other):
""" m.__isub__(v) <==> m -= v
In place substraction of m and v, see __sub__ """
try:
return self.__class__(self.__sub__(other))
except:
return NotImplemented
# action depends on second object type
def __mul__(self, other):
""" m.__mul__(x) <==> m*x
If x is a MatrixN, __mul__ is mapped to matrix multiplication m*x, if x is a VectorN, to MatrixN by VectorN multiplication.
Otherwise, returns the result of the element wise multiplication of m and x if x is convertible to Array,
multiplies every component of b by x if x is a single numeric value """
try:
return self.__class__._convert(self.apicls.__mul__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__mul__(other))
def __rmul__(self, other):
""" m.__rmul__(x) <==> x*m
If x is a MatrixN, __rmul__ is mapped to matrix multiplication x*m, if x is a VectorN (or Vector or Point or Color),
to transformation, ie VectorN by MatrixN multiplication.
Otherwise, returns the result of the element wise multiplication of m and x if x is convertible to Array,
multiplies every component of m by x if x is a single numeric value """
try:
return self.__class__._convert(self.apicls.__rmul__(self, other))
except:
return self.__class__._convert(super(Matrix, self).__rmul__(other))
def __imul__(self, other):
""" m.__imul__(n) <==> m *= n
Valid for Matrix * Matrix multiplication, in place multiplication of MatrixN m by MatrixN n """
try:
return self.__class__(self.__mul__(other))
except:
return NotImplemented
# __xor__ will defer to Vector __xor__
# API added methods
def setToIdentity(self):
""" m.setToIdentity() <==> m = a * b
Sets MatrixN to the identity matrix """
try:
self.apicls.setToIdentity(self)
except:
self.assign(self.__class__())
return self
def setToProduct(self, left, right):
""" m.setToProduct(a, b) <==> m = a * b
Sets MatrixN to the result of the product of MatrixN a and MatrixN b """
try:
self.apicls.setToProduct(self.__class__(left), self.__class__(right))
except:
self.assign(self.__class__(self.__class__(left) * self.__class__(right)))
return self
def transpose(self):
""" Returns the transposed Matrix """
try:
return self.__class__._convert(self.apicls.transpose(self))
except:
return self.__class__._convert(super(Matrix, self).transpose())
def inverse(self):
""" Returns the inverse Matrix """
try:
return self.__class__._convert(self.apicls.inverse(self))
except:
return self.__class__._convert(super(Matrix, self).inverse())
def adjoint(self):
""" Returns the adjoint (adjugate) Matrix """
try:
return self.__class__._convert(self.apicls.adjoint(self))
except:
return self.__class__._convert(super(Matrix, self).adjugate())
def homogenize(self):
""" Returns a homogenized version of the Matrix """
try:
return self.__class__._convert(self.apicls.homogenize(self))
except:
return self.__class__._convert(super(Matrix, self).homogenize())
def det(self):
""" Returns the determinant of this Matrix instance """
try:
return self.apicls.det4x4(self)
except:
return super(Matrix, self).det()
def det4x4(self):
""" Returns the 4x4 determinant of this Matrix instance """
try:
return self.apicls.det4x4(self)
except:
return super(Matrix, self[:4, :4]).det()
def det3x3(self):
""" Returns the determinant of the upper left 3x3 submatrix of this Matrix instance,
it's the same as doing det(m[0:3, 0:3]) """
try:
return self.apicls.det3x3(self)
except:
return super(Matrix, self[:3, :3]).det()
def isEquivalent(self, other, tol=_api.MVector_kTol):
""" Returns true if both arguments considered as Matrix are equal within the specified tolerance """
try:
nself, nother = coerce(self, other)
except:
return False
if isinstance(nself, Matrix):
return bool(nself.apicls.isEquivalent(nself, nother, tol))
else:
return bool(super(MatrixN, nself).isEquivalent(nother, tol))
def isSingular(self):
""" Returns True if the given Matrix is singular """
try:
return bool(self.apicls.isSingular(self))
except:
return super(MatrixN, self).isSingular()
# additionnal methods
def blend(self, other, weight=0.5):
""" Returns a 0.0-1.0 scalar weight blend between self and other Matrix,
blend mixes Matrix as transformation matrices """
if isinstance(other, Matrix):
return self.__class__(self.weighted(1.0 - weight) * other.weighted(weight))
else:
return blend(self, other, weight=weight)
def weighted(self, weight):
""" Returns a 0.0-1.0 scalar weighted blend between identity and self """
if type(self) is not TransformationMatrix:
self = TransformationMatrix(self)
return self.__class__._convert(self.asMatrix(weight))
class FloatMatrix(Matrix):
""" A 4x4 matrix class that wraps Maya's api FloatMatrix class,
It behaves identically to Matrix, but it also derives from api's FloatMatrix
to keep api methods happy
"""
apicls = _api.MFloatMatrix
class Quaternion(Matrix):
apicls = _api.MQuaternion
shape = (4,)
cnames = ('x', 'y', 'z', 'w')
def __new__(cls, *args, **kwargs):
shape = kwargs.get('shape', None)
ndim = kwargs.get('ndim', None)
size = kwargs.get('size', None)
# will default to class constant shape = (4,), so it's just an error check to catch invalid shapes,
# as no other option is actually possible on Quaternion, but this method could be used to allow wrapping
# of Maya array classes that can have a variable number of elements
shape, ndim, size = cls._expandshape(shape, ndim, size)
new = cls.apicls.__new__(cls)
cls.apicls.__init__(new)
return new
def __init__(self, *args, **kwargs):
""" __init__ method for Quaternion """
cls = self.__class__
if args:
# allow both forms for arguments
if len(args) == 1 and hasattr(args[0], '__iter__'):
args = args[0]
rotate = getattr(args, 'rotate', None)
# TransformationMatrix, Quaternion, EulerRotation api classes can convert to a rotation Quaternion
if rotate is not None and not callable(rotate):
args = args.rotate
self.unit = 'radians'
elif len(args) == 4 and isinstance(args[3], (basestring, util.EnumValue)): # isinstance(args[3], EulerRotation.RotationOrder) ) :
quat = _api.MQuaternion()
quat.assign(EulerRotation(*args, **kwargs))
args = quat
# allow to initialize directly from 3 rotations and a rotation order
elif len(args) == 2 and isinstance(args[0], VectorN) and isinstance(args[1], float):
# some special init cases are allowed by the api class, want to authorize
# Quaternion(Vector axis, float angle) as well as Quaternion(float angle, Vector axis)
args = (float(args[1]), Vector(args[0]))
# shortcut when a direct api init is possible
try:
self.assign(args)
except:
super(Array, self).__init__(*args)
if hasattr(cls, 'cnames') and len(set(cls.cnames) & set(kwargs)):
# can also use the form <componentname>=<number>
l = list(self.flat)
setcomp = False
for i, c in enumerate(cls.cnames):
if c in kwargs:
if float(l[i]) != float(kwargs[c]):
l[i] = float(kwargs[c])
setcomp = True
if setcomp:
try:
self.assign(l)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", cls.cnames, l))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (cls.__name__, msg, cls.__name__)
# set properties for easy acces to translation / rotation / scale of a MMatrix or derived class
# some of these will only yield dependable results if MMatrix is a MTransformationMatrix and some
# will always be zero for some classes (ie only rotation has a value on a MQuaternion
def _getTranslate(self):
return Vector(0.0, 0.0, 0.0)
translate = property(_getTranslate, None, None, "The translation expressed in this MMQuaternion, which is always (0.0, 0.0, 0.0)")
def _getRotate(self):
return self
def _setRotate(self, value):
self.assign(Quaternion(value))
rotate = property(_getRotate, _setRotate, None, "The rotation expressed in this Quaternion, in transform space")
def _getScale(self):
return Vector(1.0, 1.0, 1.0)
scale = property(_getScale, None, None, "The scale expressed in this Quaternion, which is always (1.0, 1.0, 1.0)")
# overloads for assign and get though standard way should be to use the data property
# to access stored values
def assign(self, value):
""" Wrap the Quaternion api assign method """
# api Quaternion assign accepts Matrix, Quaternion and EulerRotation
if isinstance(value, Matrix):
value = value.rotate
else:
if not hasattr(value, '__iter__'):
value = (value,)
value = self.apicls(*value)
self.apicls.assign(self, value)
return self
# API get, actually not faster than pulling self[i] for such a short structure
def get(self):
""" Wrap the Quaternion api get method """
# need to keep a ref to the MScriptUtil alive until
# all pointers aren't needed...
ms = _api.MScriptUtil()
l = (0,) * self.size
ms.createFromDouble(*l)
p = ms.asDoublePtr()
self.apicls.get(self, p)
return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])
def __getitem__(self, i):
return self._getitem(i)
# faster to override __getitem__ cause we know Quaternion only has one dimension
def _getitem(self, i):
""" Get component i value from self """
if hasattr(i, '__iter__'):
i = list(i)
if len(i) == 1:
i = i[0]
else:
raise IndexError, "class %s instance %s has only %s dimension(s), index %s is out of bounds" % (util.clsname(self), self, self.ndim, i)
if isinstance(i, slice):
try:
return list(self)[i]
except:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
else:
if i < 0:
i = self.size + i
if i < self.size and not i < 0:
if hasattr(self.apicls, '__getitem__'):
res = self.apicls.__getitem__(self, i)
else:
res = list(self)[i]
return res
else:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
# as _api.Vector has no __setitem__ method, so need to reassign the whole Vector
def __setitem__(self, i, a):
""" Set component i value on self """
v = VectorN(self)
v.__setitem__(i, a)
self.assign(v)
def __iter__(self):
for i in range(self.size):
yield self[i]
def __len__(self):
# api incorrectly returns 4. this might make sense if it did not simply return z a second time as the fourth element
return self.size
#
# # TODO : support for optional __iter__ arguments
# def __iter__(self, *args, **kwargs):
# """ Iterate on the api components """
# return self.apicls.__iter__(self.data)
def __contains__(self, value):
""" True if at least one of the vector components is equal to the argument """
return value in self.__iter__()
class TransformationMatrix(Matrix):
apicls = _api.MTransformationMatrix
def _getTranslate(self):
return Vector(self.getTranslation(_api.MSpace.kTransform))
def _setTranslate(self, value):
self.setTranslation(Vector(value), _api.MSpace.kTransform)
translate = property(_getTranslate, _setTranslate, None, "The translation expressed in this TransformationMatrix, in transform space")
def _getRotate(self):
return Quaternion(self.apicls.rotation(self))
def _setRotate(self, value):
self.rotateTo(Quaternion(value))
rotate = property(_getRotate, _setRotate, None, "The quaternion rotation expressed in this TransformationMatrix, in transform space")
def rotateTo(self, value):
'''Set to the given rotation (and result self)
Value may be either a Quaternion, EulerRotation object, or a list of
floats; if it is floats, if it has length 4 it is interpreted as
a Quaternion; if 3, as a EulerRotation.
'''
if not isinstance(value, (Quaternion, EulerRotation,
_api.MQuaternion, _api.MEulerRotation)):
if len(value) == 3:
value = EulerRotation(value)
elif len(value) == 4:
value = Quaternion(value)
else:
raise ValueError('arg to rotateTo must be a Quaternion, EulerRotation, or an iterable of 3 or 4 floats')
return self.__class__(self.apicls.rotateTo(self, value))
def eulerRotation(self):
return EulerRotation(self.apicls.eulerRotation(self))
def _getEuler(self):
return self.eulerRotation()
def _setEuler(self, value):
self.rotateTo(EulerRotation(value))
euler = property(_getEuler, _getEuler, None, "The euler rotation expressed in this TransformationMatrix, in transform space")
# The apicls getRotation needs a "RotationOrder &" object, which is
# impossible to make in python...
# So instead, wrap eulerRotation
def getRotation(self):
return self.eulerRotation()
def setRotation(self, *args):
self.rotateTo(EulerRotation(*args))
def _getScale(self):
return Vector(self.getScale(_api.MSpace.kTransform))
def _setScale(self, value):
self.setScale(value, _api.MSpace.kTransform)
scale = property(_getScale, _setScale, None, "The scale expressed in this TransformationMatrix, in transform space")
class EulerRotation(Array):
"""
unit handling:
>>> from pymel.all import *
>>> import pymel.core.datatypes as dt
>>>
>>> currentUnit(angle='degree')
u'degree'
>>> e = dt.EulerRotation([math.pi,0,0], unit='radians')
>>> e
dt.EulerRotation([3.14159265359, 0.0, 0.0], unit='radians')
>>> e2 = dt.EulerRotation([180,0,0], unit='degrees')
>>> e2
dt.EulerRotation([180.0, 0.0, 0.0])
>>> e.isEquivalent( e2 )
True
>>> e == e2
True
units are only displayed when they do not match the current ui unit
>>> dt.Angle.getUIUnit() # check current angular unit
'degrees'
>>> e
dt.EulerRotation([3.14159265359, 0.0, 0.0], unit='radians')
>>> dt.Angle.setUIUnit('radians') # change to radians
>>> e
dt.EulerRotation([3.14159265359, 0.0, 0.0])
"""
__metaclass__ = MetaMayaArrayTypeWrapper
apicls = _api.MEulerRotation
shape = (3,)
cnames = ('x', 'y', 'z')
RotationOrder = _factories.apiClassInfo['MEulerRotation']['pymelEnums']['RotationOrder']
def _getorder(self):
return self.RotationOrder[self.apicls.__dict__['order'].__get__(self, self.apicls)]
def _setorder(self, val):
self.apicls.__dict__['order'].__set__(self, self.RotationOrder.getIndex(val))
order = property(_getorder, _setorder)
def __new__(cls, *args, **kwargs):
# shape = kwargs.get('shape', None)
# ndim = kwargs.get('ndim', None)
# size = kwargs.get('size', None)
#
new = cls.apicls.__new__(cls)
cls.apicls.__init__(new)
return new
def __init__(self, *args, **kwargs):
""" __init__ method for EulerRotation """
self.unit = None
self.assign(*args, **kwargs)
def setDisplayUnit(self, unit):
if unit not in Angle.Unit:
raise TypeError, "%s is not a valid angular unit. See Angle.Unit for the list of valid units"
self.unit = unit
def __repr__(self):
argStrs = [str(self)]
if self.unit != Angle.getUIUnit():
argStrs.append('unit=%r' % self.unit)
if self.order != 'XYZ':
argStrs.append('order=%r' % str(self.order))
return "dt.%s(%s)" % (self.__class__.__name__, ', '.join(argStrs))
def __iter__(self):
for i in range(self.size):
yield self[i]
def __getitem__(self, i):
return Angle(self._getitem(i), 'radians').asUnit(self.unit)
def __setitem__(self, key, val):
kwargs = {}
if key in self.cnames:
kwargs[key] = val
else:
kwargs[self.cnames[key]] = val
self.assign(**kwargs)
# faster to override __getitem__ cause we know Vector only has one dimension
def _getitem(self, i):
""" Get component i value from self """
if hasattr(i, '__iter__'):
i = list(i)
if len(i) == 1:
i = i[0]
else:
raise IndexError, "class %s instance %s has only %s dimension(s), index %s is out of bounds" % (util.clsname(self), self, self.ndim, i)
if isinstance(i, slice):
return _toCompOrArrayInstance(list(self)[i], VectorN)
try:
return _toCompOrArrayInstance(list(self)[i], VectorN)
except:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
else:
if i < 0:
i = self.size + i
if i < self.size and not i < 0:
if hasattr(self.apicls, '__getitem__'):
return self.apicls.__getitem__(self, i)
else:
return list(self)[i]
else:
raise IndexError, "class %s instance %s is of size %s, index %s is out of bounds" % (util.clsname(self), self, self.size, i)
def assign(self, *args, **kwargs):
""" Wrap the Quaternion api assign method """
# After processing, we want to have args be in a format such that
# we may do:
# apicls.assign(*args)
# This means that either:
# args is a list/tuple of
if 'unit' in kwargs:
self.unit = kwargs['unit']
elif self.unit is None:
self.unit = Angle.getUIUnit()
if len(args) == 1 and isinstance(args[0], _api.MTransformationMatrix):
args = [args[0].asMatrix()]
# api MEulerRotation assign accepts Matrix, Quaternion and EulerRotation
validSingleObjs = (_api.MMatrix, _api.MQuaternion, _api.MEulerRotation)
if len(args) == 1 and isinstance(args[0], validSingleObjs):
self.unit = 'radians'
self.apicls.assign(self, args[0])
elif args:
if len(args) == 1:
args = list(args[0])
elif len(args) == 2 and isinstance(args[1], (basestring, util.EnumValue)):
args = list(args[0]) + [args[1]]
else:
# convert to list, as we may have to do modifications
args = list(args)
# If only 3 rotation angles supplied, and current order is
# not default, make sure we maintain it
if self.order != 'XYZ' and len(args) == 3:
args.append(self.apicls.__dict__['order'].__get__(self, self.apicls))
elif len(args) == 4 and isinstance(args[3], (basestring, util.EnumValue)):
# allow to initialize directly from 3 rotations and a rotation order as string
args[3] = self.RotationOrder.getIndex(args[3])
# In case they do something like pass in a mix of Angle objects and
# float numbers, convert to correct unit one-by-one...
for i in xrange(3):
if isinstance(args[i], Angle):
args[i] = args[i].asUnit('radians')
elif self.unit != 'radians' and not isinstance(args[i], Angle):
args[i] = Angle(args[i], self.unit).asUnit('radians')
self.apicls.setValue(self, *args)
# We do kwargs as a separate step after args, instead of trying to combine
# them, in case they do something like pass in a EulerRotation(myMatrix, y=2)
if hasattr(self, 'cnames') and len(set(self.cnames) & set(kwargs)):
# can also use the form <componentname>=<number>
l = list(self.flat)
setcomp = False
for i, c in enumerate(self.cnames):
if c in kwargs:
if float(l[i]) != float(kwargs[c]):
l[i] = float(kwargs[c])
setcomp = True
if setcomp:
try:
self.assign(l)
except:
msg = ", ".join(map(lambda x, y: x + "=<" + util.clsname(y) + ">", cls.cnames, l))
raise TypeError, "in %s(%s), at least one of the components is of an invalid type, check help(%s) " % (cls.__name__, msg, cls.__name__)
return self
# API get, actually not faster than pulling self[i] for such a short structure
def get(self):
""" Wrap the MEulerRotation api get method """
# need to keep a ref to the MScriptUtil alive until
# all pointers aren't needed...
ms = _api.MScriptUtil()
l = (0,) * self.size
ms.createFromDouble(*l)
p = ms.asDoublePtr()
self.apicls.get(self, p)
return tuple([ms.getDoubleArrayItem(p, i) for i in xrange(self.size)])
def __contains__(self, value):
""" True if at least one of the vector components is equal to the argument """
return value in self.__iter__()
def __len__(self):
return self.apicls.__len__(self)
# common operators without an api equivalent are herited from VectorN
# operators using the Maya API when applicable, but that can delegate to VectorN
def __eq__(self, other):
""" u.__eq__(v) <==> u == v
Equivalence test """
if isinstance(other, self.apicls):
return bool(self.apicls.__eq__(self, other))
else:
return bool(super(EulerRotation, self).__eq__(other))
def __ne__(self, other):
""" u.__ne__(v) <==> u != v
Equivalence test """
return (not self.__eq__(other))
def __neg__(self):
""" u.__neg__() <==> -u
The unary minus operator. Negates the value of each of the components of u """
return self.__class__(self.apicls.__neg__(self))
def __add__(self, other):
""" u.__add__(v) <==> u+v
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__add__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__add__(other))
def __radd__(self, other):
""" u.__radd__(v) <==> v+u
Returns the result of the addition of u and v if v is convertible to a VectorN (element-wise addition),
adds v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__radd__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__radd__(other))
def __iadd__(self, other):
""" u.__iadd__(v) <==> u += v
In place addition of u and v, see __add__ """
try:
return self.__class__(self.__add__(other))
except:
return NotImplemented
def __sub__(self, other):
""" u.__sub__(v) <==> u-v
Returns the result of the substraction of v from u if v is convertible to a VectorN (element-wise substration),
substract v to every component of u if v is a scalar """
try:
return self.__class__._convert(self.apicls.__sub__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__sub__(other))
def __rsub__(self, other):
""" u.__rsub__(v) <==> v-u
Returns the result of the substraction of u from v if v is convertible to a VectorN (element-wise substration),
replace every component c of u by v-c if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rsub__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__rsub__(other))
def __isub__(self, other):
""" u.__isub__(v) <==> u -= v
In place substraction of u and v, see __sub__ """
try:
return self.__class__(self.__sub__(other))
except:
return NotImplemented
def __div__(self, other):
""" u.__div__(v) <==> u/v
Returns the result of the division of u by v if v is convertible to a VectorN (element-wise division),
divide every component of u by v if v is a scalar """
try:
return self.__class__._convert(self.apicls.__div__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__div__(other))
def __rdiv__(self, other):
""" u.__rdiv__(v) <==> v/u
Returns the result of of the division of v by u if v is convertible to a VectorN (element-wise division),
invert every component of u and multiply it by v if v is a scalar """
try:
return self.__class__._convert(self.apicls.__rdiv__(self, other))
except:
return self.__class__._convert(super(EulerRotation, self).__rdiv__(other))
def __idiv__(self, other):
""" u.__idiv__(v) <==> u /= v
In place division of u by v, see __div__ """
try:
return self.__class__(self.__div__(other))
except:
return NotImplemented
# action depends on second object type
def __mul__(self, other):
""" u.__mul__(v) <==> u*v
The multiply '*' operator is mapped to the dot product when both objects are Vectors,
to the transformation of u by matrix v when v is a MatrixN,
to element wise multiplication when v is a sequence,
and multiplies each component of u by v when v is a numeric type. """
try:
res = self.apicls.__mul__(self, other)
except:
res = super(EulerRotation, self).__mul__(other)
if util.isNumeric(res):
return res
else:
return self.__class__._convert(res)
def __rmul__(self, other):
""" u.__rmul__(v) <==> v*u
The multiply '*' operator is mapped to the dot product when both objects are Vectors,
to the left side multiplication (pre-multiplication) of u by matrix v when v is a MatrixN,
to element wise multiplication when v is a sequence,
and multiplies each component of u by v when v is a numeric type. """
try:
res = self.apicls.__rmul__(self, other)
except:
res = super(EulerRotation, self).__rmul__(other)
if util.isNumeric(res):
return res
else:
return self.__class__._convert(res)
def __imul__(self, other):
""" u.__imul__(v) <==> u *= v
Valid for EulerRotation * Matrix multiplication, in place transformation of u by Matrix v
or EulerRotation by scalar multiplication only """
try:
return self.__class__(self.__mul__(other))
except:
return NotImplemented
# special operators
# def __xor__(self, other):
# """ u.__xor__(v) <==> u^v
# Defines the cross product operator between two 3D vectors,
# if v is a MatrixN, u^v is equivalent to u.transformAsNormal(v) """
# if isinstance(other, VectorN) :
# return self.cross(other)
# elif isinstance(other, MatrixN) :
# return self.transformAsNormal(other)
# else :
# return NotImplemented
# def __ixor__(self, other):
# """ u.__xor__(v) <==> u^=v
# Inplace cross product or transformation by inverse transpose of v is v is a MatrixN """
# try :
# return self.__class__(self.__xor__(other))
# except :
# return NotImplemented
class Unit(float):
__slots__ = ['unit', 'data', 'value', '_unit']
@classmethod
def getUIUnit(cls):
"""
Returns the global UI units currently in use for that type
"""
return cls.sUnit(cls.apicls.uiUnit())
@classmethod
def setUIUnit(cls, unit=None):
"""
Sets the global UI units currently to use for that type
"""
if unit is None:
cls.apicls.setUIUnit(cls.apicls.internalUnit())
else:
cls.apicls.setUIUnit(cls.kUnit(unit))
@classmethod
def getInternalUnit(cls):
"""
Returns the inernal units currently in use for that type
"""
return cls.sUnit(cls.apicls.internalUnit())
@classmethod
def uiToInternal(cls, value):
d = cls(value, cls.getUIUnit())
return d.asInternalUnit()
@classmethod
def kUnit(cls, unit=None):
"""
Converts a string unit name to the internal int unit enum representation
"""
if unit:
return cls.Unit.getIndex(unit)
else:
return cls.apicls.uiUnit()
@classmethod
def sUnit(cls, unit=None):
"""
Converts an internal int unit enum representation tp the string unit name
"""
if unit:
return cls.Unit.getKey(unit)
else:
return str(cls.unit[cls.apicls.uiUnit()])
def getUnit(self):
"""
Returns the units currently in effect for this instance
"""
return self.__class__.sUnit(self._unit)
# def setUnit(self, unit=None) :
# """
# Sets the units currently in effect for this instance
# """
# self._unit = self.__class__.kUnit(unit)
unit = property(getUnit, None, None, "The units currently in effect for this instance")
def __new__(cls, value, unit=None):
unit = cls.kUnit(unit)
if isinstance(value, cls.apicls):
value = getattr(value, AS_UNITS)(unit)
elif isinstance(value, cls):
value = value.asUnit(unit)
#data = cls.apicls(value, unit)
# the float representation uses internal units so that arithmetics work
#newobj = float.__new__(cls, data.asUnit(cls.apicls.internalUnit()))
#newobj = float.__new__(cls, data.asUnit(unit))
newobj = float.__new__(cls, value)
#ewobj._data = data
newobj._unit = unit
newobj._data = cls.apicls(value, unit)
return newobj
def assign(self, *args):
if isinstance(args, self.__class__):
args = (args._data, args._unit)
self._data.assign(*args)
def __repr__(self):
return 'dt.%s(%s, unit=%r)' % (self.__class__.__name__, self, self.unit)
def asUnit(self, unit):
# in python2.6/maya2010 'as' becomes a keyword.
return getattr(self._data, AS_UNITS)(self.__class__.kUnit(unit))
# def asUnit(self) :
# return self.asUnit(self.unit)
def asUIUnit(self):
return self.asUnit(self.__class__.getUIUnit())
def asInternalUnit(self):
return self.asUnit(self.__class__.getInternalUnit())
class Time(Unit):
apicls = _api.MTime
Unit = _factories.apiClassInfo['MTime']['pymelEnums']['Unit']
@classmethod
def _inCast(cls, x):
return cls(x)._data
class Distance(Unit):
"""
>>> from pymel.core import *
>>> import pymel.core.datatypes as dt
>>>
>>> dt.Distance.getInternalUnit()
'centimeters'
>>> dt.Distance.setUIUnit('meters')
>>> dt.Distance.getUIUnit()
'meters'
>>> d = dt.Distance(12)
>>> d.unit
'meters'
>>> print d
12.0
>>> print repr(d)
dt.Distance(12.0, unit='meters')
>>> print d.asUIUnit()
12.0
>>> print d.asInternalUnit()
1200.0
>>> dt.Distance.setUIUnit('centimeters')
>>> dt.Distance.getUIUnit()
'centimeters'
>>> e = dt.Distance(12)
>>> e.unit
'centimeters'
>>> print e
12.0
>>> str(e)
'12.0'
>>> print repr(e)
dt.Distance(12.0, unit='centimeters')
>>> print e.asUIUnit()
12.0
>>> print e.asInternalUnit()
12.0
>>> f = dt.Distance(12, 'feet')
>>> print f
12.0
>>> print repr(f)
dt.Distance(12.0, unit='feet')
>>> f.unit
'feet'
>>> print f.asUIUnit()
365.76
>>> dt.Distance.setUIUnit('meters')
>>> dt.Distance.getUIUnit()
'meters'
>>> print f.asUIUnit()
3.6576
>>> dt.Distance.getInternalUnit()
'centimeters'
>>> print f.asInternalUnit()
365.76
>>> print f.asFeet()
12.0
>>> print f.asMeters()
3.6576
>>> print f.asCentimeters()
365.76
>>> dt.Distance.setUIUnit()
>>> dt.Distance.getUIUnit()
'centimeters'
"""
apicls = _api.MDistance
Unit = _factories.apiClassInfo['MDistance']['pymelEnums']['Unit']
def asMillimeter(self):
return self.asUnit('millimeter')
def asCentimeters(self):
return self.asUnit('centimeters')
def asKilometers(self):
return self.asUnit('kilometers')
def asMeters(self):
return self.asUnit('meters')
def asInches(self):
return self.asUnit('inches')
def asFeet(self):
return self.asUnit('feet')
def asYards(self):
return self.asUnit('yards')
def asMiles(self):
return self.asUnit('miles')
@classmethod
def _outCast(cls, instance, result):
return cls(result, 'centimeters').asUIUnit()
class Angle(Unit):
apicls = _api.MAngle
Unit = _factories.apiClassInfo['MAngle']['pymelEnums']['Unit']
def asRadians(self):
return self.asUnit('radians')
def asDegrees(self):
return self.asUnit('degrees')
def asAngMinutes(self):
return self.asUnit('angMinutes')
def asAngSeconds(self):
return self.asUnit('angSeconds')
@classmethod
def _outCast(cls, instance, result):
return cls(result, 'radians').asUIUnit()
class BoundingBox(_api.MBoundingBox):
apicls = _api.MBoundingBox
__metaclass__ = _factories.MetaMayaTypeWrapper
def __init__(self, *args):
if len(args) == 2:
args = list(args)
if not isinstance(args[0], _api.MPoint):
args[0] = Point(args[0])
if not isinstance(args[1], _api.MPoint):
args[1] = Point(args[1])
_api.MBoundingBox.__init__(self, *args)
def __str__(self):
return 'dt.%s(%s,%s)' % (self.__class__.__name__, self.min(), self.max())
def __repr__(self):
return str(self)
def __getitem__(self, item):
if item == 0:
return self.min()
elif item == 1:
return self.max()
raise IndexError, "Index out of range"
def __melobject__(self):
"""A flat list of 6 values [minx, miny, minz, maxx, maxy, maxz]"""
return list(self.min()) + list(self.max())
repr = __str__
w = property(_factories.wrapApiMethod(_api.MBoundingBox, 'width'))
h = property(_factories.wrapApiMethod(_api.MBoundingBox, 'height'))
d = property(_factories.wrapApiMethod(_api.MBoundingBox, 'depth'))
#_factories.ApiTypeRegister.register( 'MVector', Vector )
#_factories.ApiTypeRegister.register( 'MMatrix', Matrix )
#_factories.ApiTypeRegister.register( 'MPoint', Point )
#_factories.ApiTypeRegister.register( 'MColor', Color )
#_factories.ApiTypeRegister.register( 'MQuaternion', Quaternion )
#_factories.ApiTypeRegister.register( 'MEulerRotation', EulerRotation )
_factories.ApiTypeRegister.register('MTime', Time, inCast=Time._inCast)
_factories.ApiTypeRegister.register('MDistance', Distance, outCast=Distance._outCast)
_factories.ApiTypeRegister.register('MAngle', Angle, outCast=Angle._outCast)
#_floatUpConvertDict = {_api.MFloatArray:_api.MDoubleArray,
# _api.MFloatMatrix:_api.MMatrix,
# _api.MFloatPoint:_api.MPoint,
# _api.MFloatPointArray:_api.MPointArray,
# _api.MFloatVector:_api.MVector,
# _api.MFloatVectorArray:_api.MVectorArray,
# FloatMatrix:Matrix,
# FloatPoint:Point,
# FloatVector:Vector
# }
# def _floatUpConvert(input):
# """Will convert various Float* objects to their corresponding double object
#
# ie, api.MFloatMatrix => api.MMatrix, FloatPoint => Point
# """
# newClass = _floatUpConvertDict.get(input.__class__)
# if newClass:
# return newClass(input)
# else:
# return input
def getPlugValue(plug):
"""given an MPlug, get its value as a pymel-style object"""
# if plug.isArray():
# raise TypeError, "array plugs of this type are not supported"
obj = plug.attribute()
apiType = obj.apiType()
# Float Pairs
if apiType in [_api.MFn.kAttribute2Double, _api.MFn.kAttribute2Float]:
res = []
for i in range(plug.numChildren()):
res.append(getPlugValue(plug.child(i)))
if isinstance(res[0], Distance):
return Vector(res)
return res
# Integer Groups
elif apiType in [_api.MFn.kAttribute2Short, _api.MFn.kAttribute2Int, _api.MFn.kAttribute3Short, _api.MFn.kAttribute3Int]:
res = []
for i in range(plug.numChildren()):
res.append(getPlugValue(plug.child(i)))
return res
# Float Groups
elif apiType in [_api.MFn.kAttribute3Double, _api.MFn.kAttribute3Float, _api.MFn.kAttribute4Double]:
res = []
for i in range(plug.numChildren()):
res.append(getPlugValue(plug.child(i)))
if isinstance(res[0], Distance):
return Vector(res)
elif _api.MFnAttribute(obj).isUsedAsColor():
return Color(res)
return res
# Compound
elif apiType in [_api.MFn.kCompoundAttribute]:
res = []
for i in range(plug.numChildren()):
res.append(getPlugValue(plug.child(i)))
return tuple(res)
# Distance
elif apiType in [_api.MFn.kDoubleLinearAttribute, _api.MFn.kFloatLinearAttribute]:
val = plug.asMDistance()
unit = _api.MDistance.uiUnit()
# as becomes a keyword in python 2.6
return Distance(getattr(val, AS_UNITS)(unit), unit)
# Angle
elif apiType in [_api.MFn.kDoubleAngleAttribute, _api.MFn.kFloatAngleAttribute]:
val = plug.asMAngle()
unit = _api.MAngle.uiUnit()
# as becomes a keyword in python 2.6
return Angle(getattr(val, AS_UNITS), unit)
# Time
elif apiType == _api.MFn.kTimeAttribute:
val = plug.asMTime()
unit = _api.MTime.uiUnit()
# as becomes a keyword in python 2.6
return Time(getattr(val, AS_UNITS), unit)
elif apiType == _api.MFn.kNumericAttribute:
nAttr = _api.MFnNumericAttribute(obj)
dataType = nAttr.unitType()
if dataType == _api.MFnNumericData.kBoolean:
return plug.asBool()
elif dataType in [_api.MFnNumericData.kShort, _api.MFnNumericData.kInt, _api.MFnNumericData.kLong, _api.MFnNumericData.kByte]:
return plug.asInt()
elif dataType in [_api.MFnNumericData.kFloat, _api.MFnNumericData.kDouble, _api.MFnNumericData.kAddr]:
return plug.asDouble()
raise "%s: unknown numeric attribute type: %s" % (plug.partialName(True, True, True, False, True, True), dataType)
elif apiType == _api.MFn.kEnumAttribute:
# TODO : use EnumValue class?
return plug.asInt()
elif apiType == _api.MFn.kTypedAttribute:
tAttr = _api.MFnTypedAttribute(obj)
dataType = tAttr.attrType()
if dataType == _api.MFnData.kInvalid: # 0
return None
elif dataType == _api.MFnData.kNumeric: # 1
# all of the dynamic mental ray attributes fail here, but i have no idea why they are numeric attrs and not message attrs.
# cmds.getAttr returns None, so we will too.
try:
dataObj = plug.asMObject()
except:
return
try:
numFn = _api.MFnNumericData(dataObj)
except RuntimeError:
if plug.isArray():
raise TypeError, "%s: numeric arrays are not supported" % plug.partialName(True, True, True, False, True, True)
else:
raise TypeError, "%s: attribute type is numeric, but its data cannot be interpreted numerically" % plug.partialName(True, True, True, False, True, True)
dataType = numFn.numericType()
if dataType == _api.MFnNumericData.kBoolean:
return plug.asBool()
elif dataType in [_api.MFnNumericData.kShort, _api.MFnNumericData.kInt, _api.MFnNumericData.kLong, _api.MFnNumericData.kByte]:
return plug.asInt()
elif dataType in [_api.MFnNumericData.kFloat, _api.MFnNumericData.kDouble, _api.MFnNumericData.kAddr]:
return plug.asDouble()
elif dataType == _api.MFnNumericData.k2Short:
ptr1 = _api.SafeApiPtr('short')
ptr2 = _api.SafeApiPtr('short')
numFn.getData2Short(ptr1(), ptr2())
return (ptr1.get(), ptr2.get())
elif dataType in [_api.MFnNumericData.k2Int, _api.MFnNumericData.k2Long]:
ptr1 = _api.SafeApiPtr('int')
ptr2 = _api.SafeApiPtr('int')
numFn.getData2Int(ptr1(), ptr2())
return (ptr1.get(), ptr2.get())
elif dataType == _api.MFnNumericData.k2Float:
ptr1 = _api.SafeApiPtr('float')
ptr2 = _api.SafeApiPtr('float')
numFn.getData2Float(ptr1(), ptr2())
return (ptr1.get(), ptr2.get())
elif dataType == _api.MFnNumericData.k2Double:
ptr1 = _api.SafeApiPtr('double')
ptr2 = _api.SafeApiPtr('double')
numFn.getData2Double(ptr1(), ptr2())
return (ptr1.get(), ptr2.get())
elif dataType == _api.MFnNumericData.k3Float:
ptr1 = _api.SafeApiPtr('float')
ptr2 = _api.SafeApiPtr('float')
ptr3 = _api.SafeApiPtr('float')
numFn.getData3Float(ptr1(), ptr2(), ptr3())
return (ptr1.get(), ptr2.get(), ptr3.get())
elif dataType == _api.MFnNumericData.k3Double:
ptr1 = _api.SafeApiPtr('double')
ptr2 = _api.SafeApiPtr('double')
ptr3 = _api.SafeApiPtr('double')
numFn.getData3Double(ptr1(), ptr2(), ptr3())
return (ptr1.get(), ptr2.get(), ptr3.get())
elif dataType == _api.MFnNumericData.kChar:
return plug.asChar()
raise TypeError, "%s: Unsupported numeric attribute: %s" % (plug.partialName(True, True, True, False, True, True), dataType)
elif dataType == _api.MFnData.kString: # 4
return plug.asString()
elif dataType == _api.MFnData.kMatrix: # 5
return Matrix(_api.MFnMatrixData(plug.asMObject()).matrix())
elif dataType == _api.MFnData.kStringArray: # 6
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnStringArrayData(dataObj).array()
return [array[i] for i in range(array.length())]
elif dataType == _api.MFnData.kDoubleArray: # 7
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnDoubleArrayData(dataObj).array()
return [array[i] for i in range(array.length())]
elif dataType == _api.MFnData.kIntArray: # 8
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnIntArrayData(dataObj).array()
return [array[i] for i in range(array.length())]
elif dataType == _api.MFnData.kPointArray: # 9
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnPointArrayData(dataObj).array()
return [Point(array[i]) for i in range(array.length())]
elif dataType == _api.MFnData.kVectorArray: # 10
try:
dataObj = plug.asMObject()
except RuntimeError:
return []
array = _api.MFnVectorArrayData(dataObj).array()
return [Vector(array[i]) for i in range(array.length())]
# this block crashes maya under certain circumstances
# elif dataType == _api.MFnData.kComponentList : # 11
# try:
# dataObj = plug.asMObject()
# except RuntimeError:
# return []
# array = _api.MFnComponentListData( dataObj )
# return array
# #return [ Vector(array[i]) for i in range(array.length()) ]
raise TypeError, "%s: Unsupported typed attribute: %s" % (plug.partialName(True, True, True, False, True, True), dataType)
raise TypeError, "%s: Unsupported Type: %s" % (plug.partialName(True, True, True, False, True, True), _factories.apiEnumsToApiTypes.get(apiType, apiType))
def _testMVector():
print "Vector class:", dir(Vector)
u = Vector()
print u
print "Vector instance:", dir(u)
print repr(u)
print Vector.__readonly__
print Vector.__slots__
print Vector.shape
print Vector.ndim
print Vector.size
print u.shape
print u.ndim
print u.size
# should fail
u.shape = 2
u.assign(Vector(4, 5, 6))
print repr(u)
#Vector([4.0, 5.0, 6.0])
u = Vector(1, 2, 3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
print len(u)
# 3
# inherits from VectorN --> Array
print isinstance(u, VectorN)
# True
print isinstance(u, Array)
# True
# as well as _api.Vector
print isinstance(u, _api.MVector)
# True
# accepted directly by API methods
M = _api.MTransformationMatrix()
M.setTranslation(u, _api.MSpace.kWorld)
# need conversion on the way back though
u = Vector(M.getTranslation(_api.MSpace.kWorld))
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector(x=1, y=2, z=3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector([1, 2], z=3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector(_api.MPoint(1, 2, 3))
print repr(u)
# Vector([1.0, 2.0, 3.0])
print "u = Vector(VectorN(1, 2, 3))"
u = Vector(VectorN(1, 2, 3))
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector(1)
print repr(u)
# Vector([1.0, 1.0, 1.0])
u = Vector(1, 2)
print repr(u)
# Vector([1.0, 2.0, 0.0])
u = Vector(VectorN(1, shape=(2,)))
print repr(u)
# Vector([1.0, 1.0, 0.0])
u = Vector(Point(1, 2, 3))
print repr(u)
# Vector([1.0, 2.0, 3.0])
u = Vector(Point(1, 2, 3, 1), y=20, z=30)
print repr(u)
# Vector([1.0, 20.0, 30.0])
# should fail
print "Vector(VectorN(1, 2, 3, 4))"
try:
u = Vector(VectorN(1, 2, 3, 4))
except:
print "will raise ValueError: could not cast [1, 2, 3, 4] to Vector of size 3, some data would be lost"
print u.get()
# (1.0, 20.0, 30.0)
print u[0]
1.0
u[0] = 10
print repr(u)
# Vector([10.0, 20.0, 30.0])
print (10 in u)
# True
print list(u)
# [10.0, 20.0, 30.0]
u = Vector.xAxis
v = Vector.yAxis
print Vector.xAxis
print str(Vector.xAxis)
print unicode(Vector.xAxis)
print repr(Vector.xAxis)
print "u = Vector.xAxis:"
print repr(u)
# Vector([1.0, 0.0, 0.0])
print "v = Vector.yAxis:"
print repr(v)
# Vector([0.0, 1.0, 0.0])
n = u ^ v
print "n = u ^ v:"
print repr(n)
# Vector([0.0, 0.0, 1.0])
print "n.x=%s, n.y=%s, n.z=%s" % (n.x, n.y, n.z)
# n.x=0.0, n.y=0.0, n.z=1.0
n = u ^ VectorN(v)
print "n = u ^ VectorN(v):"
print repr(n)
# Vector([0.0, 0.0, 1.0])
n = u ^ [0, 1, 0]
print "n = u ^ [0, 1, 0]:"
print repr(n)
# Vector([0.0, 0.0, 1.0])
n[0:2] = [1, 1]
print "n[0:2] = [1, 1]:"
print repr(n)
# Vector([1.0, 1.0, 1.0])
print "n = n * 2 :"
n = n * 2
print repr(n)
# Vector([2.0, 2.0, 2.0])
print "n = n * [0.5, 1.0, 2.0]:"
n = n * [0.5, 1.0, 2.0]
print repr(n)
# Vector([1.0, 2.0, 4.0])
print "n * n :"
print n * n
# 21.0
print repr(n.clamp(1.0, 2.0))
# Vector([1.0, 2.0, 2.0])
print repr(-n)
# Vector([-1.0, -2.0, -4.0])
w = u + v
print repr(w)
# Vector([1.0, 1.0, 0.0])
p = Point(1, 2, 3)
q = u + p
print repr(q)
# Point([2.0, 2.0, 3.0, 1.0])
q = p + u
print repr(q)
# Point([2.0, 2.0, 3.0, 1.0])
print repr(p + q)
# Point([3.0, 4.0, 6.0, 1.0])
w = u + VectorN(1, 2, 3, 4)
print repr(w)
# VectorN([2.0, 2.0, 3.0, 4])
print repr(u + 2)
# Vector([3.0, 2.0, 2.0])
print repr(2 + u)
# Vector([3.0, 2.0, 2.0])
print repr(p + 2)
# Point([3.0, 4.0, 5.0, 1.0])
print repr(2 + p)
# Point([3.0, 4.0, 5.0, 1.0])
print repr(p + u)
# Point([2.0, 2.0, 3.0, 1.0])
print repr(VectorN(1, 2, 3, 4) + u)
# VectorN([2.0, 2.0, 3.0, 4])
print repr([1, 2, 3] + u)
# Vector([2.0, 2.0, 3.0])
u = Vector(1, 2, 3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
print u.length()
# 3.74165738677
print length(u)
# 3.74165738677
print length([1, 2, 3])
# 3.74165738677
print length(VectorN(1, 2, 3))
# 3.74165738677
print VectorN(1, 2, 3).length()
# 3.74165738677
print length(VectorN(1, 2, 3, 4))
# 5.47722557505
print VectorN(1, 2, 3, 4).length()
# 5.47722557505
print length(1)
# 1.0
print length([1, 2])
# 2.2360679775
print length([1, 2, 3])
# 3.74165738677
print length([1, 2, 3, 4])
# 5.47722557505
print length([1, 2, 3, 4], 0)
# 5.47722557505
print length([1, 2, 3, 4], (0,))
# 5.47722557505
print length([[1, 2], [3, 4]], 1)
# [3.16227766017, 4.472135955]
# should fail
try:
print length([1, 2, 3, 4], 1)
except:
print "Will raise ValueError, \"axis 0 is the only valid axis for a Vector, 1 invalid\""
u = Vector(1, 2, 3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
print u.sqlength()
# 14
print repr(u.normal())
# Vector([0.267261241912, 0.534522483825, 0.801783725737])
u.normalize()
print repr(u)
# Vector([0.267261241912, 0.534522483825, 0.801783725737])
u = Vector(1, 2, 3)
print repr(u)
# Vector([1.0, 2.0, 3.0])
w = u + [0.01, 0.01, 0.01]
print repr(w)
# Vector([1.01, 2.01, 3.01])
print (u == u)
# True
print (u == w)
# False
print (u == Vector(1.0, 2.0, 3.0))
# True
print (u == [1.0, 2.0, 3.0])
# False
print (u == Point(1.0, 2.0, 3.0))
# False
print u.isEquivalent([1.0, 2.0, 3.0])
# True
print u.isEquivalent(Vector(1.0, 2.0, 3.0))
# True
print u.isEquivalent(Point(1.0, 2.0, 3.0))
# True
print u.isEquivalent(w)
# False
print u.isEquivalent(w, 0.1)
# True
u = Vector(1, 0, 0)
print repr(u)
# Vector([1.0, 0.0, 0.0])
v = Vector(0.707, 0, -0.707)
print repr(v)
# Vector([0.707, 0.0, -0.707])
print repr(axis(u, v))
# Vector([-0.0, 0.707, 0.0])
print repr(u.axis(v))
# Vector([-0.0, 0.707, 0.0])
print repr(axis(VectorN(u), VectorN(v)))
# VectorN([-0.0, 0.707, 0.0])
print repr(axis(u, v, normalize=True))
# Vector([-0.0, 1.0, 0.0])
print repr(v.axis(u, normalize=True))
# Vector([-0.0, -1.0, 0.0])
print repr(axis(VectorN(u), VectorN(v), normalize=True))
# VectorN([-0.0, 1.0, 0.0])
print angle(u, v)
# 0.785398163397
print v.angle(u)
# 0.785398163397
print angle(VectorN(u), VectorN(v))
# 0.785398163397
print cotan(u, v)
# 1.0
print repr(u.rotateTo(v))
# Quaternion([-0.0, 0.382683432365, 0.0, 0.923879532511])
print repr(u.rotateBy(u.axis(v), u.angle(v)))
# Vector([0.707106781187, 0.0, -0.707106781187])
q = Quaternion([-0.0, 0.382683432365, 0.0, 0.923879532511])
print repr(u.rotateBy(q))
# Vector([0.707106781187, 0.0, -0.707106781187])
print u.distanceTo(v)
# 0.765309087885
print u.isParallel(v)
# False
print u.isParallel(2 * u)
# True
print repr(u.blend(v))
# Vector([0.8535, 0.0, -0.3535])
print "end tests Vector"
def _testMPoint():
print "Point class", dir(Point)
print hasattr(Point, 'data')
p = Point()
print repr(p)
# Point([0.0, 0.0, 0.0])
print "Point instance", dir(p)
print hasattr(p, 'data')
print repr(p.data)
# <maya.OpenMaya.Point; proxy of <Swig Object of type 'Point *' at 0x84a1270> >
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
v = Vector(p)
print repr(v)
# Vector([1.0, 2.0, 3.0])
V = VectorN(p)
print repr(V)
# VectorN([1.0, 2.0, 3.0, 1.0])
print list(p)
# [1.0, 2.0, 3.0]
print len(p)
# 3
print p.size
# 4
print p.x, p.y, p.z, p.w
# 1.0 2.0 3.0 1.0
print p[0], p[1], p[2], p[3]
# 1.0 2.0 3.0 1.0
p.get()
# 1.0 2.0 3.0 1.0
# accepted by api
q = _api.MPoint()
print q.distanceTo(p)
# 3.74165738677
# support for non cartesian points still there
p = Point(1, 2, 3, 2)
print repr(p)
# Point([1.0, 2.0, 3.0, 2.0])
v = Vector(p)
print repr(v)
# Vector([0.5, 1.0, 1.5])
V = VectorN(p)
print repr(V)
# VectorN([1.0, 2.0, 3.0, 2.0])
print list(p)
# [1.0, 2.0, 3.0, 2.0]
print len(p)
# 4
print p.size
# 4
print p.x, p.y, p.z, p.w
# 1.0 2.0 3.0 2.0
print p[0], p[1], p[2], p[3]
# 1.0 2.0 3.0 2.0
p.get()
# 1.0 2.0 3.0 2.0
# accepted by api
q = _api.MPoint()
print q.distanceTo(p)
# 1.87082869339
p = Point(_api.MPoint())
print repr(p)
# Point([0.0, 0.0, 0.0])
p = Point(1)
print repr(p)
# Point([1.0, 1.0, 1.0])
p = Point(1, 2)
print repr(p)
# Point([1.0, 2.0, 0.0])
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
p = Point(_api.MPoint(1, 2, 3))
print repr(p)
# Point([1.0, 2.0, 3.0])
p = Point(VectorN(1, 2))
print repr(p)
# Point([1.0, 2.0, 0.0])
p = Point(Vector(1, 2, 3))
print repr(p)
# Point([1.0, 2.0, 3.0])
p = Point(_api.MVector(1, 2, 3))
print repr(p)
# Point([1.0, 2.0, 3.0])
p = Point(VectorN(1, 2, 3, 4))
print repr(p)
# Point([1.0, 2.0, 3.0, 4.0])
print repr(Vector(p))
# Vector([0.25, 0.5, 0.75])
print repr(VectorN(p))
# VectorN([1.0, 2.0, 3.0, 4.0])
p = Point(p, w=1)
print repr(p)
# Point([1.0, 2.0, 3.0])
print repr(Vector(p))
# Vector([1.0, 2.0, 3.0])
print repr(VectorN(p))
# VectorN([1.0, 2.0, 3.0, 1.0])
p = Point.origin
print repr(p)
# Point([0.0, 0.0, 0.0])
p = Point.xAxis
print repr(p)
# Point([1.0, 0.0, 0.0])
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
print repr(p + Vector([1, 2, 3]))
# Point([2.0, 4.0, 6.0])
print repr(p + Point([1, 2, 3]))
# Point([2.0, 4.0, 6.0])
print repr(p + [1, 2, 3])
# Point([2.0, 4.0, 6.0])
print repr(p + [1, 2, 3, 1])
# Point([2.0, 4.0, 6.0])
print repr(p + Point([1, 2, 3, 1]))
# Point([2.0, 4.0, 6.0])
print repr(p + [1, 2, 3, 2])
# Point([2.0, 4.0, 6.0, 3.0]) TODO : convert to Point always?
print repr(p + Point([1, 2, 3, 2]))
# Point([1.5, 3.0, 4.5])
print repr(Vector([1, 2, 3]) + p)
# Point([2.0, 4.0, 6.0])
print repr(Point([1, 2, 3]) + p)
# Point([2.0, 4.0, 6.0])
print repr([1, 2, 3] + p)
# Point([2.0, 4.0, 6.0])
print repr([1, 2, 3, 1] + p)
# Point([2.0, 4.0, 6.0])
print repr(Point([1, 2, 3, 1]) + p)
# Point([2.0, 4.0, 6.0])
print repr([1, 2, 3, 2] + p)
# Point([2.0, 4.0, 6.0, 3.0])
print repr(Point([1, 2, 3, 2]) + p)
# Point([1.5, 3.0, 4.5])
# various operation, on cartesian and non cartesian points
print "p = Point(1, 2, 3)"
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
print "p/2"
print repr(p / 2)
# Point([0.5, 1.0, 1.5])
print "p*2"
print repr(p * 2)
# Point([2.0, 4.0, 6.0])
print "q = Point(0.25, 0.5, 1.0)"
q = Point(0.25, 0.5, 1.0)
print repr(q)
# Point([0.25, 0.5, 1.0])
print repr(q + 2)
# Point([2.25, 2.5, 3.0])
print repr(q / 2)
# Point([0.125, 0.25, 0.5])
print repr(p + q)
# Point([1.25, 2.5, 4.0])
print repr(p - q)
# Vector([0.75, 1.5, 2.0])
print repr(q - p)
# Vector([-0.75, -1.5, -2.0])
print repr(p - (p - q))
# Point([0.25, 0.5, 1.0])
print repr(Vector(p) * Vector(q))
# 4.25
print repr(p * q)
# 4.25
print repr(p / q)
# Point([4.0, 4.0, 3.0])
print "p = Point(1, 2, 3)"
p = Point(1, 2, 3)
print repr(p)
# Point([1.0, 2.0, 3.0])
print "p/2"
print repr(p / 2)
# Point([0.5, 1.0, 1.5])
print "p*2"
print repr(p * 2)
# Point([2.0, 4.0, 6.0])
print "q = Point(0.25, 0.5, 1.0, 0.5)"
q = Point(0.25, 0.5, 1.0, 0.5)
print repr(q)
# Point([0.25, 0.5, 1.0, 0.5])
r = q.deepcopy()
print repr(r)
# Point([0.25, 0.5, 1.0, 0.5])
print repr(r.cartesianize())
# Point([0.5, 1.0, 2.0])
print repr(r)
# Point([0.5, 1.0, 2.0])
print repr(q)
# Point([0.25, 0.5, 1.0, 0.5])
print repr(q.cartesian())
# Point([0.5, 1.0, 2.0])
r = q.deepcopy()
print repr(r)
# Point([0.25, 0.5, 1.0, 0.5])
print repr(r.rationalize())
# Point([0.5, 1.0, 2.0, 0.5])
print repr(r)
# Point([0.5, 1.0, 2.0, 0.5])
print repr(q.rational())
# Point([0.5, 1.0, 2.0, 0.5])
r = q.deepcopy()
print repr(r.homogenize())
# Point([0.125, 0.25, 0.5, 0.5])
print repr(r)
# Point([0.125, 0.25, 0.5, 0.5])
print repr(q.homogen())
# Point([0.125, 0.25, 0.5, 0.5])
print repr(q)
# Point([0.25, 0.5, 1.0, 0.5])
print Vector(q)
# [0.5, 1.0, 2.0]
print Vector(q.cartesian())
# [0.5, 1.0, 2.0]
# ignore w
print "q/2"
print repr(q / 2)
# Point([0.125, 0.25, 0.5, 0.5])
print "q*2"
print repr(q * 2)
# Point([0.5, 1.0, 2.0, 0.5])
print repr(q + 2) # cartesianize is done by Vector add
# Point([2.5, 3.0, 4.0])
print repr(q)
# Point([0.25, 0.5, 1.0, 0.5])
print repr(p + Vector(1, 2, 3))
# Point([2.0, 4.0, 6.0])
print repr(q + Vector(1, 2, 3))
# Point([1.5, 3.0, 5.0])
print repr(q.cartesian() + Vector(1, 2, 3))
# Point([1.5, 3.0, 5.0])
print repr(p - q)
# Vector([0.5, 1.0, 1.0])
print repr(p - q.cartesian())
# Vector([0.5, 1.0, 1.0])
print repr(q - p)
# Vector([-0.5, -1.0, -1.0])
print repr(p - (p - q))
# Point([0.5, 1.0, 2.0])
print repr(Vector(p) * Vector(q))
# 4.25
print repr(p * q)
# 4.25
print repr(p / q) # need explicit homogenize as division not handled by api
# Point([4.0, 4.0, 3.0, 2.0]) TODO : what do we want here ?
# Vector([2.0, 2.0, 1.5])
# additionnal methods
print "p = Point(x=1, y=2, z=3)"
p = Point(x=1, y=2, z=3)
print p.length()
# 3.74165738677
print p[:1].length()
# 1.0
print p[:2].length()
# 2.2360679775
print p[:3].length()
# 3.74165738677
p = Point(1.0, 0.0, 0.0)
q = Point(0.707, 0.0, -0.707)
print repr(p)
# Point([1.0, 0.0, 0.0, 1.0])
print repr(q)
# Point([0.707, 0.0, -0.707, 1.0])
print repr(q - p)
# Vector([-0.293, 0.0, -0.707])
print repr(axis(Point.origin, p, q))
# Vector([-0.0, 0.707, 0.0])
print repr(Point.origin.axis(p, q))
# Vector([-0.0, 0.707, 0.0])
print repr(Point.origin.axis(q, p))
# Vector([0.0, -0.707, 0.0])
print angle(Point.origin, p, q)
# 0.785398163397
print angle(Point.origin, q, p)
# 0.785398163397
print Point.origin.angle(p, q)
# 0.785398163397
print p.distanceTo(q)
# 0.765309087885
print (q - p).length()
# 0.765309087885
print cotan(Point.origin, p, q)
# 1.0
# obviously True
print planar(Point.origin, p, q)
# True
r = center(Point.origin, p, q)
print repr(r)
# Point([0.569, 0.0, -0.235666666667, 1.0])
print planar(Point.origin, p, q, r)
# True
print planar(Point.origin, p, q, r + Vector(0.0, 0.1, 0.0))
# False
print bWeights(r, Point.origin, p, q)
# (0.33333333333333337, 0.33333333333333331, 0.33333333333333343)
p = Point([0.33333, 0.66666, 1.333333, 0.33333])
print repr(round(p, 3))
# Point([0.333, 0.667, 1.333, 0.333])
print "end tests Point"
def _testMColor():
print "Color class", dir(Color)
print hasattr(Color, 'data')
c = Color()
print repr(c)
# Color([0.0, 0.0, 0.0, 1.0])
print "Color instance", dir(c)
print hasattr(c, 'data')
print repr(c.data)
# Color([0.0, 0.0, 0.0, 1.0])
c = Color(_api.MColor())
print repr(c)
# Color([0.0, 0.0, 0.0, 1.0])
# using api convetion of single value would mean alpha
# instead of VectorN convention of filling all with value
# which would yield # Color([0.5, 0.5, 0.5, 0.5]) instead
# This would break coerce behavior for Color
print "c = Color(0.5)"
c = Color(0.5)
print repr(c)
# Color([0.5, 0.5, 0.5, 0.5])
print "c = round(Color(128, quantize=255), 2)"
c = Color(128, quantize=255)
print repr(c)
# Color([0.501999974251, 0.501999974251, 0.501999974251, 0.501999974251])
c = Color(255, 128, b=64, a=32, quantize=255)
print repr(c)
# Color([1.0 0.501999974251 0.250999987125 0.125490196078])
print "c = Color(1, 1, 1)"
c = Color(1, 1, 1)
print repr(c)
# Color([1.0, 1.0, 1.0, 1.0])
print "c = round(Color(255, 0, 255, g=128, quantize=255, mode='rgb'), 2)"
c = round(Color(255, 0, 255, g=128, quantize=255, mode='rgb'), 2)
print repr(c)
# Color([1.0, 0.5, 1.0, 1.0])
print "c = round(Color(255, b=128, quantize=255, mode='rgb'), 2)"
c = round(Color(255, b=128, quantize=255, mode='rgb'), 2)
print repr(c)
# Color([1.0, 1.0, 0.5, 1.0])
print "c = Color(1, 0.5, 2, 0.5)"
c = Color(1, 0.5, 2, 0.5)
print repr(c)
# Color([1.0, 0.5, 2.0, 0.5])
print "c = Color(0, 65535, 65535, quantize=65535, mode='hsv')"
c = Color(0, 65535, 65535, quantize=65535, mode='hsv')
print repr(c)
# Color([1.0, 0.0, 0.0, 1.0])
print "c.rgb"
print repr(c.rgb)
# (1.0, 0.0, 0.0)
print "c.hsv"
print repr(c.hsv)
# (0.0, 1.0, 1.0)
d = Color(c, v=0.5, mode='hsv')
print repr(d)
# Color([0.5, 0.0, 0.0, 1.0])
print repr(d.hsv)
# (0.0, 1.0, 0.5)
print "c = Color(Color.blue, v=0.5)"
c = Color(Color.blue, v=0.5)
print repr(c)
# Color([0.0, 0.0, 0.5, 1.0])
print "c.hsv"
print c.hsv
# (0.66666666666666663, 1.0, 0.5)
c.r = 1.0
print repr(c)
# Color([1.0, 0.0, 0.5, 1.0])
print "c.hsv"
print c.hsv
# (0.91666666666666663, 1.0, 1.0)
print "c = Color(1, 0.5, 2, 0.5).clamp()"
c = Color(1, 0.5, 2, 0.5).clamp()
print repr(c)
# Color([1.0, 0.5, 1.0, 0.5])
print c.hsv
# (0.83333333333333337, 0.5, 1.0)
print "Color(c, v=0.5)"
d = Color(c, v=0.5)
print repr(d)
# Color([0.5, 0.25, 0.5, 0.5])
print "d.hsv"
print d.hsv
# (0.83333333333333337, 0.5, 0.5)
print "c = Color(0.0, 0.5, 1.0, 0.5)"
c = Color(0.0, 0.5, 1.0, 0.5)
print repr(c)
# Color(0.0, 0.5, 1.0, 0.5)
print "d = c.gamma(2.0)"
d = c.gamma(2.0)
print repr(d)
# Color([0.0, 0.25, 1.0, 0.5])
print "c = Color.red.blend(Color.blue, 0.5)"
c = Color.red.blend(Color.blue, 0.5)
print repr(c)
# Color([0.5, 0.0, 0.5, 1.0])
print c.hsv
# (0.83333333333333337, 1.0, 0.5)
c = Color.red.hsvblend(Color.blue, 0.5)
print repr(c)
# Color([1.0, 0.0, 1.0, 1.0])
print c.hsv
# (0.83333333333333337, 1.0, 1.0)
print "c = Color(0.25, 0.5, 0.75, 0.5)"
c = Color(0.25, 0.5, 0.75, 0.5)
print repr(c)
# Color([0.25, 0.5, 0.75, 0.5])
print "d = Color.black"
d = Color.black
print repr(d)
# Color([0.0, 0.0, 0.0, 1.0])
print "c.over(d)"
print repr(c.over(d))
# Color([0.125, 0.25, 0.375, 1.0])
print "d.over(c)"
print repr(d.over(c))
# Color([0.0, 0.0, 0.0, 0.5])
print "c.premult()"
print repr(c.premult())
# Color([0.125, 0.25, 0.375, 1.0])
# herited from Vector
print "c = Color(0.25, 0.5, 1.0, 1.0)"
c = Color(0.25, 0.5, 1.0, 1.0)
print repr(c)
# Color([0.25, 0.5, 1.0, 1.0])
print "d = Color(2.0, 1.0, 0.5, 0.25)"
d = Color(2.0, 1.0, 0.5, 0.25)
print repr(d)
# Color([2.0, 1.0, 0.5, 0.25])
print "-c"
print repr(-c)
# Color([-0.25, -0.5, -1.0, 1.0])
print "e = c*d"
e = c * d
print repr(e)
# Color([0.5, 0.5, 0.5, 0.25])
print "e + 2"
print repr(e + 2)
# Color([2.5, 2.5, 2.5, 0.25])
print "e * 2.0" # mult by scalar float is defined in api for colors and also multiplies alpha
print repr(e * 2.0)
# Color([1.0, 1.0, 1.0, 0.5])
print "e / 2.0" # as is divide, that ignores alpha now for some reason
print repr(e / 2.0)
# Color([0.25, 0.25, 0.25, 0.25])
print "e+Vector(1, 2, 3)"
print repr(e + Vector(1, 2, 3))
# Color([1.5, 2.5, 3.5, 0.25])
# how to handle operations on colors ?
# here behaves like api but does it make any sense
# for colors as it is now ?
print "c+c"
print repr(c + c)
# Color([0.5, 1.0, 2.0, 1.0])
print "c+d"
print repr(c + d)
# Color([2.25, 1.5, 1.5, 1.0])
print "d-c"
print repr(d - c)
# Color([1.75, 0.5, -0.5, 0.25])
print "end tests Color"
def _testMMatrix():
print "Matrix class", dir(Matrix)
m = Matrix()
print m.formated()
#[[1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [0.0, 0.0, 0.0, 1.0]]
print m[0, 0]
# 1.0
print repr(m[0:2, 0:3])
# [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
print m(0, 0)
# 1.0
print "Matrix instance:", dir(m)
print Matrix.__readonly__
print Matrix.__slots__
print Matrix.shape
print Matrix.ndim
print Matrix.size
print m.shape
print m.ndim
print m.size
# should fail
m.shape = (4, 4)
m.shape = 2
print dir(Space)
m = Matrix.identity
# inherits from MatrixN --> Array
print isinstance(m, MatrixN)
# True
print isinstance(m, Array)
# True
# as well as _api.Matrix
print isinstance(m, _api.MMatrix)
# True
# accepted directly by API methods
n = _api.MMatrix()
m = n.setToProduct(m, m)
print repr(m)
print repr(n)
# inits
m = Matrix(range(16))
print m.formated()
#[[0.0, 1.0, 2.0, 3.0],
# [4.0, 5.0, 6.0, 7.0],
# [8.0, 9.0, 10.0, 11.0],
# [12.0, 13.0, 14.0, 15.0]]
M = Array(range(16), shape=(8, 2))
m = Matrix(M)
print m.formated()
#[[0.0, 1.0, 2.0, 3.0],
# [4.0, 5.0, 6.0, 7.0],
# [8.0, 9.0, 10.0, 11.0],
# [12.0, 13.0, 14.0, 15.0]]
M = MatrixN(range(9), shape=(3, 3))
m = Matrix(M)
print m.formated()
#[[0.0, 1.0, 2.0, 0.0],
# [3.0, 4.0, 5.0, 0.0],
# [6.0, 7.0, 8.0, 0.0],
# [0.0, 0.0, 0.0, 1.0]]
# inherits from MatrixN --> Array
print isinstance(m, MatrixN)
# True
print isinstance(m, Array)
# True
# as well as _api.Matrix
print isinstance(m, _api.MMatrix)
# True
# accepted directly by API methods
n = _api.MMatrix()
m = n.setToProduct(m, m)
print repr(m)
print repr(n)
t = _api.MTransformationMatrix()
t.setTranslation(Vector(1, 2, 3), _api.MSpace.kWorld)
m = Matrix(t)
print m.formated()
#[[1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [1.0, 2.0, 3.0, 1.0]]
m = Matrix(m, a30=10)
print m.formated()
#[[1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [10.0, 2.0, 3.0, 1.0]]
# should fail
print "Matrix(range(20)"
try:
m = Matrix(range(20))
print m.formated()
except:
print "will raise ValueError: cannot initialize a Matrix of shape (4, 4) from (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19), some information would be lost, use an explicit resize or trim"
m = Matrix.identity
M = m.trimmed(shape=(3, 3))
print repr(M)
# MatrixN([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
print M.formated()
#[[1.0, 0.0, 0.0],
# [0.0, 1.0, 0.0],
# [0.0, 0.0, 1.0]]
try:
m.trim(shape=(3, 3))
except:
print "will raise TypeError: new shape (3, 3) is not compatible with class Matrix"
print m.nrow
# 4
print m.ncol
# 4
# should fail
try:
m.nrow = 3
except:
print "will raise TypeError: new shape (3, 4) is not compatible with class Matrix"
print list(m.row)
# [Array([1.0, 0.0, 0.0, 0.0]), Array([0.0, 1.0, 0.0, 0.0]), Array([0.0, 0.0, 1.0, 0.0]), Array([0.0, 0.0, 0.0, 1.0])]
print list(m.col)
# [Array([1.0, 0.0, 0.0, 0.0]), Array([0.0, 1.0, 0.0, 0.0]), Array([0.0, 0.0, 1.0, 0.0]), Array([0.0, 0.0, 0.0, 1.0])]
m = Matrix(MatrixN(range(9), shape=(3, 3)).trimmed(shape=(4, 4), value=10))
print m.formated()
#[[0.0, 1.0, 2.0, 10.0],
# [3.0, 4.0, 5.0, 10.0],
# [6.0, 7.0, 8.0, 10.0],
# [10.0, 10.0, 10.0, 10.0]]
print m.get()
# ((0.0, 1.0, 2.0, 10.0), (3.0, 4.0, 5.0, 10.0), (6.0, 7.0, 8.0, 10.0), (10.0, 10.0, 10.0, 10.0))
print repr(m[0])
# [0.0, 1.0, 2.0, 10.0]
m[0] = 10
print m.formated()
#[[10.0, 10.0, 10.0, 10.0],
# [3.0, 4.0, 5.0, 10.0],
# [6.0, 7.0, 8.0, 10.0],
# [10.0, 10.0, 10.0, 10.0]]
print (10 in m)
# True
print list(m)
# [Array([10.0, 10.0, 10.0, 10.0]), Array([3.0, 4.0, 5.0, 10.0]), Array([6.0, 7.0, 8.0, 10.0]), Array([10.0, 10.0, 10.0, 10.0])]
print list(m.flat)
# [10.0, 10.0, 10.0, 10.0, 3.0, 4.0, 5.0, 10.0, 6.0, 7.0, 8.0, 10.0, 10.0, 10.0, 10.0, 10.0]
u = Vector.xAxis
v = Vector.yAxis
print Vector.xAxis
print str(Vector.xAxis)
print unicode(Vector.xAxis)
print repr(Vector.xAxis)
print "u = Vector.xAxis:"
print repr(u)
# trans matrix : t: 1, 2, 3, r: 45, 90, 30, s: 0.5, 1.0, 2.0
m = Matrix([0.0, 4.1633363423443383e-17, -0.5, 0.0, 0.25881904510252079, 0.96592582628906831, 1.3877787807814459e-16, 0.0, 1.9318516525781366, -0.51763809020504159, 0.0, 0.0, 1.0, 2.0, 3.0, 1.0])
print "m:"
print round(m, 2).formated()
#[[0.0, 0.0, -0.5, 0.0],
# [0.26, 0.97, 0.0, 0.0],
# [1.93, -0.52, 0.0, 0.0],
# [1.0, 2.0, 3.0, 1.0]]
x = Vector.xAxis
y = Vector.yAxis
z = Vector.zAxis
u = Vector(1, 2, 3)
print "u:"
print repr(u)
# Vector([1, 2, 3])
print "u*m"
print repr(u * m)
# Vector([6.31319304794, 0.378937381963, -0.5])
print "m*u"
print repr(m * u)
# Vector([-1.5, 2.19067069768, 0.896575472168])
p = Point(1, 10, 100, 1)
print "p:"
print repr(p)
# Point([1.0, 10.0, 100.0, 1.0])
print "p*m"
print repr(p * m)
# Point([196.773355709, -40.1045507576, 2.5, 1.0])
print "m*p"
print repr(m * p)
# Point([-50.0, 9.91807730799, -3.24452924947, 322.0])
print "v = [1, 2, 3]*m"
v = VectorN([1, 2, 3]) * m
print repr(v)
# VectorN([6.31319304794, 0.378937381963, -0.5])
print "v = [1, 2, 3, 1]*m"
v = VectorN([1, 2, 3, 1]) * m
print repr(v)
# VectorN([7.31319304794, 2.37893738196, 2.5, 1.0])
# should fail
print "VectorN([1, 2, 3, 4, 5])*m"
try:
v = VectorN([1, 2, 3, 4, 5]) * m
except:
print "Will raise ValueError: vector of size 5 and matrix of shape (4, 4) are not conformable for a VectorN * MatrixN multiplication"
# herited
print "m = Matrix(range(1, 17))"
m = Matrix(range(1, 17))
print m.formated()
#[[1.0, 2.0, 3.0, 4.0],
# [5.0, 6.0, 7.0, 8.0],
# [9.0, 10.0, 11.0, 12.0],
# [13.0, 14.0, 15.0, 16.0]]
# element wise
print "[1, 10, 100]*m"
print repr([1, 10, 100] * m)
# Matrix([[1.0, 20.0, 300.0, 0.0], [5.0, 60.0, 700.0, 0.0], [9.0, 100.0, 1100.0, 0.0], [13.0, 140.0, 1500.0, 0.0]])
print "M = MatrixN(range(20), shape=(4, 5))"
M = MatrixN(range(1, 21), shape=(4, 5))
print M.formated()
#[[1, 2, 3, 4, 5],
# [6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15],
# [16, 17, 18, 19, 20]]
print "m*M"
n = m * M
print (n).formated()
#[[110.0, 120.0, 130.0, 140.0, 150.0],
# [246.0, 272.0, 298.0, 324.0, 350.0],
# [382.0, 424.0, 466.0, 508.0, 550.0],
# [518.0, 576.0, 634.0, 692.0, 750.0]]
print util.clsname(n)
# MatrixN
print "m*2"
n = m * 2
print (n).formated()
#[[2.0, 4.0, 6.0, 8.0],
# [10.0, 12.0, 14.0, 16.0],
# [18.0, 20.0, 22.0, 24.0],
# [26.0, 28.0, 30.0, 32.0]]
print util.clsname(n)
# Matrix
print "2*m"
n = 2 * m
print (n).formated()
#[[2.0, 4.0, 6.0, 8.0],
# [10.0, 12.0, 14.0, 16.0],
# [18.0, 20.0, 22.0, 24.0],
# [26.0, 28.0, 30.0, 32.0]]
print util.clsname(n)
# Matrix
print "m+2"
n = m + 2
print (n).formated()
#[[3.0, 4.0, 5.0, 6.0],
# [7.0, 8.0, 9.0, 10.0],
# [11.0, 12.0, 13.0, 14.0],
# [15.0, 16.0, 17.0, 18.0]]
print util.clsname(n)
# Matrix
print "2+m"
n = 2 + m
print (n).formated()
#[[3.0, 4.0, 5.0, 6.0],
# [7.0, 8.0, 9.0, 10.0],
# [11.0, 12.0, 13.0, 14.0],
# [15.0, 16.0, 17.0, 18.0]]
print util.clsname(n)
# Matrix
try:
m.setToProduct(m, M)
except:
print """Will raise TypeError: cannot initialize a Matrix of shape (4, 4) from (Array([0, 1, 2, 3, 4]), Array([5, 6, 7, 8, 9]), Array([10, 11, 12, 13, 14]), Array([15, 16, 17, 18, 19])) of shape (4, 5),
as it would truncate data or reduce the number of dimensions"""
print m.isEquivalent(m * M)
# False
# trans matrix : t: 1, 2, 3, r: 45, 90, 30, s: 0.5, 1.0, 2.0
m = Matrix([0.0, 4.1633363423443383e-17, -0.5, 0.0, 0.25881904510252079, 0.96592582628906831, 1.3877787807814459e-16, 0.0, 1.9318516525781366, -0.51763809020504159, 0.0, 0.0, 1.0, 2.0, 3.0, 1.0])
print "m:"
print round(m, 2).formated()
#[[0.0, 0.0, -0.5, 0.0],
# [0.26, 0.97, 0.0, 0.0],
# [1.93, -0.52, 0.0, 0.0],
# [1.0, 2.0, 3.0, 1.0]]
print "m.transpose():"
print round(m.transpose(), 2).formated()
#[[0.0, 0.26, 1.93, 1.0],
# [0.0, 0.97, -0.52, 2.0],
# [-0.5, 0.0, 0.0, 3.0],
# [0.0, 0.0, 0.0, 1.0]]
print "m.isSingular():"
print m.isSingular()
# False
print "m.inverse():"
print round(m.inverse(), 2).formated()
#[[0.0, 0.26, 0.48, 0.0],
# [0.0, 0.97, -0.13, 0.0],
# [-2.0, 0.0, 0.0, 0.0],
# [6.0, -2.19, -0.22, 1.0]]
print "m.adjoint():"
print round(m.adjoint(), 2).formated()
#[[0.0, 0.26, 0.48, 0.0],
# [0.0, 0.97, -0.13, 0.0],
# [-2.0, 0.0, -0.0, 0.0],
# [6.0, -2.19, -0.22, 1.0]]
print "m.adjugate():"
print round(m.adjugate(), 2).formated()
#[[0.0, 0.26, 0.48, 0.0],
# [0.0, 0.97, -0.13, 0.0],
# [-2.0, 0.0, -0.0, 0.0],
# [6.0, -2.19, -0.22, 1.0]]
print "m.homogenize():"
print round(m.homogenize(), 2).formated()
#[[0.0, 0.0, -1.0, 0.0],
# [0.26, 0.97, 0.0, 0.0],
# [0.97, -0.26, -0.0, 0.0],
# [1.0, 2.0, 3.0, 1.0]]
print "m.det():"
print m.det()
# 1.0
print "m.det4x4():"
print m.det4x4()
# 1.0
print "m.det3x3():"
print m.det3x3()
# 1.0
print "m.weighted(0.5):"
print round(m.weighted(0.5), 2).formated()
#[[0.53, 0.0, -0.53, 0.0],
# [0.09, 0.99, 0.09, 0.0],
# [1.05, -0.2, 1.05, 0.0],
# [0.5, 1.0, 1.5, 1.0]]
print "m.blend(Matrix.identity, 0.5):"
print round(m.blend(Matrix.identity, 0.5), 2).formated()
#[[0.53, 0.0, -0.53, 0.0],
# [0.09, 0.99, 0.09, 0.0],
# [1.05, -0.2, 1.05, 0.0],
# [0.5, 1.0, 1.5, 1.0]]
print "end tests Matrix"
def _testMTransformationMatrix():
q = Quaternion()
print repr(q)
# Quaternion([0.0, 0.0, 0.0, 1.0])
q = Quaternion(1, 2, 3, 0.5)
print repr(q)
# Quaternion([1.0, 2.0, 3.0, 0.5])
q = Quaternion(0.785, 0.785, 0.785, "xyz")
print repr(q)
# Quaternion([0.191357439088, 0.461717715523, 0.191357439088, 0.844737481223])
m = Matrix()
m.rotate = q
print repr(m)
# Matrix([[0.500398163355, 0.499999841466, -0.706825181105, 0.0], [-0.146587362969, 0.853529322022, 0.499999841466, 0.0], [0.853295859083, -0.146587362969, 0.500398163355, 0.0], [0.0, 0.0, 0.0, 1.0]])
print "TransformationMatrix class", dir(TransformationMatrix)
m = TransformationMatrix()
print m.formated()
#[[1.0, 0.0, 0.0, 0.0],
# [0.0, 1.0, 0.0, 0.0],
# [0.0, 0.0, 1.0, 0.0],
# [0.0, 0.0, 0.0, 1.0]]
print m[0, 0]
# 1.0
print m[0:2, 0:3]
# [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
print "TransformationMatrix instance:", dir(m)
print TransformationMatrix.__readonly__
print TransformationMatrix.__slots__
print TransformationMatrix.shape
print TransformationMatrix.ndim
print TransformationMatrix.size
print m.shape
print m.ndim
print m.size
# should fail
m.shape = (4, 4)
m.shape = 2
print dir(Space)
m = TransformationMatrix.identity
# inherits from MatrixN --> Array
print isinstance(m, MatrixN)
# True
print isinstance(m, Array)
# True
# as well as _api.TransformationMatrix and _api.Matrix
print isinstance(m, _api.MTransformationMatrix)
# True
print isinstance(m, _api.MMatrix)
# True
# accepted directly by API methods
n = _api.MMatrix()
n = n.setToProduct(m, m)
print repr(n)
n = _api.MTransformationMatrix()
n = n.assign(m)
print repr(n)
m = TransformationMatrix.identity
m.rotation = Quaternion()
print repr(m)
print m.formated()
n = TransformationMatrix.identity
n.translation = Vector(1, 2, 3)
print n.formated()
print repr(n)
o = m * n
print repr(o)
print o.formated()
print "end tests TransformationMatrix"
if __name__ == '__main__':
print Distance.getInternalUnit()
# centimeters
print Distance.getUIUnit()
# centimeters
Distance.setUIUnit('meters')
print Distance.getUIUnit()
# meters
d = Distance(12)
print d.unit
# meters
print d
1200.0
print repr(d)
Distance(12.0, unit='meters')
print d.asUnit()
12.0
print d.asInternalUnit()
1200.0
import doctest
doctest.testmod(verbose=True)
_testMVector()
_testMPoint()
_testMColor()
_testMMatrix()
_testMTransformationMatrix()
|
bsd-3-clause
|
sunqm/pyscf
|
examples/ao2mo/10-diff_orbs_for_ijkl.py
|
2
|
2084
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import tempfile
import numpy
import h5py
from pyscf import gto, scf, ao2mo
'''
Integral transformation for four different orbitals
'''
mol = gto.Mole()
mol.build(
atom = [
["C", (-0.65830719, 0.61123287, -0.00800148)],
["C", ( 0.73685281, 0.61123287, -0.00800148)],
["C", ( 1.43439081, 1.81898387, -0.00800148)],
["C", ( 0.73673681, 3.02749287, -0.00920048)],
["C", (-0.65808819, 3.02741487, -0.00967948)],
["C", (-1.35568919, 1.81920887, -0.00868348)],
["H", (-1.20806619, -0.34108413, -0.00755148)],
["H", ( 1.28636081, -0.34128013, -0.00668648)],
["H", ( 2.53407081, 1.81906387, -0.00736748)],
["H", ( 1.28693681, 3.97963587, -0.00925948)],
["H", (-1.20821019, 3.97969587, -0.01063248)],
["H", (-2.45529319, 1.81939187, -0.00886348)],],
basis = 'ccpvtz'
)
mf = scf.RHF(mol)
mf.conv_tol = 1e-8
e = mf.kernel()
print('E = %.15g, ref -230.776765415' % e)
#
# Given four MOs, compute the MO-integrals and saved in dataset "mp2_bz"
#
eritmp = tempfile.NamedTemporaryFile()
nocc = mol.nelectron // 2
nvir = len(mf.mo_energy) - nocc
co = mf.mo_coeff[:,:nocc]
cv = mf.mo_coeff[:,nocc:]
orbs = (co, cv, co, cv)
# Depending on your hardware and BLAS library, it needs about 1 min on I5 3GHz
# CPU with MKL library to transform the integrals
ao2mo.general(mol, orbs, eritmp.name, dataname='mp2_bz')#, verbose=5)
eia = mf.mo_energy[:nocc,None] - mf.mo_energy[None,nocc:]
f = h5py.File(eritmp.name, 'r')
eri = f['mp2_bz']
print('Note the shape of the transformed integrals (ij|kl) is %s.' % str(eri.shape))
print("It's a 2D array: the first index for compressed ij, the second index for compressed kl")
emp2 = 0
for i in range(nocc):
dajb = eia[i].reshape(-1,1) + eia.reshape(1,-1)
gi = numpy.array(eri[i*nvir:(i+1)*nvir])
t2 = gi.flatten() / dajb.flatten()
gi = gi.reshape(nvir,nocc,nvir)
theta = gi*2 - gi.transpose(2,1,0)
emp2 += numpy.dot(t2, theta.flatten())
print('E_MP2 = %.15g, ref = -1.0435476768' % emp2)
f.close()
|
apache-2.0
|
akaminsky/ghost_blog
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/formatters/other.py
|
363
|
3811
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
|
mit
|
Mixser/django
|
django/conf/locale/en_AU/formats.py
|
504
|
2117
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 p.m.'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
jordan-developer/pyOCNI
|
pyocni/TDD/Tests/queryInterface_Tests.py
|
2
|
5753
|
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 27, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
from multiprocessing import Process
from unittest import TestLoader, TextTestRunner, TestCase
from pyocni.TDD.fake_Data.server_Mock import ocni_server
import pycurl
import time
import StringIO
from pyocni.TDD.fake_Data.initialize_fakeDB import init_fakeDB
import pyocni.TDD.fake_Data.categories as f_categories
import pyocni.pyocni_tools.config as config
def start_server():
ocni_server_instance = ocni_server()
ocni_server_instance.run_server()
class test_get(TestCase):
"""
Tests GET request scenarios
"""
def setUp(self):
"""
Set up the test environment
"""
self.p = Process(target=start_server)
self.p.start()
time.sleep(0.5)
#init_fakeDB()
time.sleep(0.5)
def tearDown(self):
#config.purge_PyOCNI_db()
self.p.terminate()
def test_get_categories(self):
"""
Get all kinds,mixins and actions
"""
storage = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(c.URL, 'http://127.0.0.1:8090/-/')
c.setopt(c.HTTPHEADER, ['Accept:application/occi+json','content-type:application/occi+json'])
c.setopt(c.VERBOSE, True)
c.setopt(c.POSTFIELDS,f_categories.kind)
c.setopt(c.CUSTOMREQUEST, 'GET')
c.setopt(c.WRITEFUNCTION, storage.write)
c.perform()
content = storage.getvalue()
print " ========== Body content ==========\n " + content + " \n ==========\n"
class test_delete(TestCase):
"""
Tests DELETE request scenarios
"""
def setUp(self):
"""
Set up the test environment
"""
self.p = Process(target=start_server)
self.p.start()
time.sleep(0.5)
init_fakeDB()
time.sleep(0.5)
def tearDown(self):
#config.purge_PyOCNI_db()
self.p.terminate()
def test_delete_categories(self):
"""
delete a mixin
"""
storage = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(c.CUSTOMREQUEST, 'DELETE')
c.setopt(c.URL, 'http://127.0.0.1:8090/-/')
c.setopt(c.HTTPHEADER, ['Accept: application/occi+json', 'Content-Type: application/occi+json'])
c.setopt(c.POSTFIELDS, f_categories.kind)
c.setopt(c.VERBOSE, True)
c.setopt(c.WRITEFUNCTION, storage.write)
c.perform()
content = storage.getvalue()
print " ===== Body content =====\n " + content + " ==========\n"
class test_post(TestCase):
"""
Tests POST request scenarios
"""
def setUp(self):
"""
Set up the test environment
"""
self.p = Process(target=start_server)
self.p.start()
time.sleep(0.5)
#init_fakeDB()
time.sleep(0.5)
def tearDown(self):
self.p.terminate()
#config.purge_PyOCNI_db()
def test_register_categories(self):
"""
register kind, mixins or actions
"""
c = pycurl.Curl()
storage = StringIO.StringIO()
c.setopt(c.URL, 'http://127.0.0.1:8090/-/')
c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])
c.setopt(c.POSTFIELDS, f_categories.kind)
c.setopt(c.CUSTOMREQUEST, 'POST')
c.setopt(c.WRITEFUNCTION, storage.write)
c.perform()
content = storage.getvalue()
print " ===== Body content =====\n " + content + " ==========\n"
class test_put(TestCase):
"""
Tests PUT request scenarios
"""
def setUp(self):
"""
Set up the test environment
"""
self.p = Process(target=start_server)
self.p.start()
time.sleep(0.5)
#init_fakeDB()
time.sleep(0.5)
def tearDown(self):
self.p.terminate()
#config.purge_PyOCNI_db()
def test_update_categories(self):
"""
register kind, mixins or actions
"""
storage = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(c.URL, 'http://127.0.0.1:8090/-/')
c.setopt(c.HTTPHEADER, ['Content-Type: application/occi+json', 'Accept: application/occi+json'])
c.setopt(c.CUSTOMREQUEST, 'PUT')
c.setopt(c.POSTFIELDS, f_categories.put_provider)
c.setopt(c.WRITEFUNCTION, storage.write)
c.perform()
content = storage.getvalue()
print " ===== Body content =====\n " + content + " ==========\n"
if __name__ == '__main__':
#Create the testing tools
loader = TestLoader()
runner = TextTestRunner(verbosity=2)
#Create the testing suites
get_suite = loader.loadTestsFromTestCase(test_get)
delete_suite = loader.loadTestsFromTestCase(test_delete)
post_suite = loader.loadTestsFromTestCase(test_post)
put_suite = loader.loadTestsFromTestCase(test_put)
#Run tests
runner.run(delete_suite)
|
apache-2.0
|
rmackay9/rmackay9-ardupilot
|
libraries/SITL/examples/Morse/rover.py
|
22
|
1997
|
'''
This is an example builder script that sets up a rover in Morse to
be driven by ArduPilot.
The rover has the basic set of sensors that ArduPilot needs
To start the simulation use this:
morse run rover.py
Then connect with ArduPilot like this:
sim_vehicle.py --model morse --console --map
This model assumes you will setup a steering/throttle rover
SERVO1_FUNCTION 26
SERVO3_FUNCTION 70
'''
from morse.builder import *
# use the Hummer
vehicle = Hummer()
vehicle.properties(Object = True, Graspable = False, Label = "Vehicle")
vehicle.translate(x=0.0, z=0.0)
# add a camera
camera = SemanticCamera(name="Camera")
camera.translate(x=0.2, y=0.3, z=0.9)
vehicle.append(camera)
camera.properties(cam_far=800)
camera.properties(Vertical_Flip=True)
# we could optionally stream the video to a port
#camera.add_stream('socket')
# add sensors needed for ArduPilot operation to a vehicle
pose = Pose()
vehicle.append(pose)
imu = IMU()
vehicle.append(imu)
gps = GPS()
gps.alter('UTM')
vehicle.append(gps)
velocity = Velocity()
vehicle.append(velocity)
# create a compound sensor of all of the individual sensors and stream it
all_sensors = CompoundSensor([imu, gps, velocity, pose])
all_sensors.add_stream('socket')
vehicle.append(all_sensors)
# make the vehicle controllable with steer and force
# this will be available on port 60001 by default
motion = SteerForce()
vehicle.append(motion)
motion.add_stream('socket')
# this would allow us to control the vehicle with a keyboard
# we don't enable it as it causes issues with sensor consistency
#keyboard = Keyboard()
#keyboard.properties(Speed=3.0)
#vehicle.append(keyboard)
# Environment
env = Environment('land-1/trees')
env.set_camera_location([10.0, -10.0, 10.0])
env.set_camera_rotation([1.0470, 0, 0.7854])
env.select_display_camera(camera)
env.set_camera_clip(clip_end=1000)
# startup at CMAC. A location is needed for the magnetometer
env.properties(longitude = 149.165230, latitude = -35.363261, altitude = 584.0)
|
gpl-3.0
|
petebachant/scipy
|
scipy/weave/examples/dict_sort.py
|
100
|
3235
|
# Borrowed from Alex Martelli's sort from Python cookbook using inlines
# 2x over fastest Python version -- again, maybe not worth the effort...
# Then again, 2x is 2x...
#
# C:\home\eric\wrk\scipy\weave\examples>python dict_sort.py
# Dict sort of 1000 items for 300 iterations:
# speed in python: 0.250999927521
# [0, 1, 2, 3, 4]
# speed in c: 0.110000014305
# speed up: 2.28
# [0, 1, 2, 3, 4]
# speed in c (scxx): 0.200000047684
# speed up: 1.25
# [0, 1, 2, 3, 4]
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
import inline_tools
def c_sort(adict):
assert(type(adict) is dict)
code = """
#line 24 "dict_sort.py"
py::list keys = adict.keys();
py::list items(keys.length());
keys.sort();
PyObject* item = NULL;
int N = keys.length();
for(int i = 0; i < N;i++)
{
item = PyList_GetItem(keys,i);
item = PyDict_GetItem(adict,item);
Py_XINCREF(item);
PyList_SetItem(items,i,item);
}
return_val = items;
"""
return inline_tools.inline(code,['adict'])
def c_sort2(adict):
assert(type(adict) is dict)
code = """
#line 44 "dict_sort.py"
py::list keys = adict.keys();
py::list items(keys.len());
keys.sort();
int N = keys.length();
for(int i = 0; i < N;i++)
{
items[i] = adict[int( keys[i] )];
}
return_val = items;
"""
return inline_tools.inline(code,['adict'],verbose=1)
# (IMHO) the simplest approach:
def sortedDictValues1(adict):
items = adict.items()
items.sort()
return [value for key, value in items]
# an alternative implementation, which
# happens to run a bit faster for large
# dictionaries on my machine:
def sortedDictValues2(adict):
keys = adict.keys()
keys.sort()
return [adict[key] for key in keys]
# a further slight speed-up on my box
# is to map a bound-method:
def sortedDictValues3(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
import time
def sort_compare(a,n):
print('Dict sort of %d items for %d iterations:' % (len(a),n))
t1 = time.time()
for i in range(n):
b = sortedDictValues3(a)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1))
print(b[:5])
b = c_sort(a)
t1 = time.time()
for i in range(n):
b = c_sort(a)
t2 = time.time()
print(' speed in c (Python API):',(t2 - t1))
print(' speed up: %3.2f' % (py/(t2-t1)))
print(b[:5])
b = c_sort2(a)
t1 = time.time()
for i in range(n):
b = c_sort2(a)
t2 = time.time()
print(' speed in c (scxx):',(t2 - t1))
print(' speed up: %3.2f' % (py/(t2-t1)))
print(b[:5])
def setup_dict(m):
" does insertion order matter?"
import random
a = range(m)
d = {}
for i in range(m):
key = random.choice(a)
a.remove(key)
d[key] = key
return d
if __name__ == "__main__":
m = 1000
a = setup_dict(m)
n = 3000
sort_compare(a,n)
|
bsd-3-clause
|
denisbalyko/checkio-solution
|
network-attack.py
|
1
|
1278
|
from operator import itemgetter
def capture(matrix):
queue, watched = [[0, 0]], [0]
while len(queue):
min_point = min(queue, key = itemgetter(1))
queue.pop(queue.index(min_point))
point, old_value = min_point
for i, value in enumerate(matrix[point]):
if value and not i == point and not i in watched:
matrix[i][i] = matrix[i][i] + matrix[point][point]
queue.append([i, matrix[i][i]])
watched.append(i)
return max(map(max, matrix))
def test_function():
# These "asserts" using only for self-checking and not necessary for auto-testing
assert capture([[0, 1, 0, 1, 0, 1],
[1, 8, 1, 0, 0, 0],
[0, 1, 2, 0, 0, 1],
[1, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 3, 1],
[1, 0, 1, 0, 1, 2]]) == 8, "Base example"
assert capture([[0, 1, 0, 1, 0, 1],
[1, 1, 1, 0, 0, 0],
[0, 1, 2, 0, 0, 1],
[1, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 3, 1],
[1, 0, 1, 0, 1, 2]]) == 4, "Low security"
assert capture([[0, 1, 1],
[1, 9, 1],
[1, 1, 9]]) == 9, "Small"
|
mit
|
Distrotech/intellij-community
|
python/lib/Lib/site-packages/django/contrib/gis/tests/__init__.py
|
229
|
4968
|
from django.conf import settings
from django.test.simple import build_suite, DjangoTestSuiteRunner
from django.utils import unittest
def run_tests(*args, **kwargs):
from django.test.simple import run_tests as base_run_tests
return base_run_tests(*args, **kwargs)
def run_gis_tests(test_labels, verbosity=1, interactive=True, failfast=False, extra_tests=None):
import warnings
warnings.warn(
'The run_gis_tests() test runner has been deprecated in favor of GeoDjangoTestSuiteRunner.',
DeprecationWarning
)
test_runner = GeoDjangoTestSuiteRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
return test_runner.run_tests(test_labels, extra_tests=extra_tests)
def geo_apps(namespace=True, runtests=False):
"""
Returns a list of GeoDjango test applications that reside in
`django.contrib.gis.tests` that can be used with the current
database and the spatial libraries that are installed.
"""
from django.db import connection
from django.contrib.gis.geos import GEOS_PREPARE
from django.contrib.gis.gdal import HAS_GDAL
apps = ['geoapp', 'relatedapp']
# No distance queries on MySQL.
if not connection.ops.mysql:
apps.append('distapp')
# Test geography support with PostGIS 1.5+.
if connection.ops.postgis and connection.ops.geography:
apps.append('geogapp')
# The following GeoDjango test apps depend on GDAL support.
if HAS_GDAL:
# 3D apps use LayerMapping, which uses GDAL.
if connection.ops.postgis and GEOS_PREPARE:
apps.append('geo3d')
apps.append('layermap')
if runtests:
return [('django.contrib.gis.tests', app) for app in apps]
elif namespace:
return ['django.contrib.gis.tests.%s' % app
for app in apps]
else:
return apps
def geodjango_suite(apps=True):
"""
Returns a TestSuite consisting only of GeoDjango tests that can be run.
"""
import sys
from django.db.models import get_app
suite = unittest.TestSuite()
# Adding the GEOS tests.
from django.contrib.gis.geos import tests as geos_tests
suite.addTest(geos_tests.suite())
# Adding the measurment tests.
from django.contrib.gis.tests import test_measure
suite.addTest(test_measure.suite())
# Adding GDAL tests, and any test suite that depends on GDAL, to the
# suite if GDAL is available.
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import tests as gdal_tests
suite.addTest(gdal_tests.suite())
from django.contrib.gis.tests import test_spatialrefsys, test_geoforms
suite.addTest(test_spatialrefsys.suite())
suite.addTest(test_geoforms.suite())
else:
sys.stderr.write('GDAL not available - no tests requiring GDAL will be run.\n')
# Add GeoIP tests to the suite, if the library and data is available.
from django.contrib.gis.utils import HAS_GEOIP
if HAS_GEOIP and hasattr(settings, 'GEOIP_PATH'):
from django.contrib.gis.tests import test_geoip
suite.addTest(test_geoip.suite())
# Finally, adding the suites for each of the GeoDjango test apps.
if apps:
for app_name in geo_apps(namespace=False):
suite.addTest(build_suite(get_app(app_name)))
return suite
class GeoDjangoTestSuiteRunner(DjangoTestSuiteRunner):
def setup_test_environment(self, **kwargs):
super(GeoDjangoTestSuiteRunner, self).setup_test_environment(**kwargs)
# Saving original values of INSTALLED_APPS, ROOT_URLCONF, and SITE_ID.
self.old_installed = getattr(settings, 'INSTALLED_APPS', None)
self.old_root_urlconf = getattr(settings, 'ROOT_URLCONF', '')
self.old_site_id = getattr(settings, 'SITE_ID', None)
# Constructing the new INSTALLED_APPS, and including applications
# within the GeoDjango test namespace.
new_installed = ['django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.gis',
]
# Calling out to `geo_apps` to get GeoDjango applications supported
# for testing.
new_installed.extend(geo_apps())
settings.INSTALLED_APPS = new_installed
# SITE_ID needs to be set
settings.SITE_ID = 1
# ROOT_URLCONF needs to be set, else `AttributeErrors` are raised
# when TestCases are torn down that have `urls` defined.
settings.ROOT_URLCONF = ''
def teardown_test_environment(self, **kwargs):
super(GeoDjangoTestSuiteRunner, self).teardown_test_environment(**kwargs)
settings.INSTALLED_APPS = self.old_installed
settings.ROOT_URLCONF = self.old_root_urlconf
settings.SITE_ID = self.old_site_id
def build_suite(self, test_labels, extra_tests=None, **kwargs):
return geodjango_suite()
|
apache-2.0
|
k3nnyfr/s2a_fr-nsis
|
s2a/Python/Lib/ctypes/wintypes.py
|
265
|
5349
|
######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
# The most useful windows datatypes
from ctypes import *
BYTE = c_byte
WORD = c_ushort
DWORD = c_ulong
WCHAR = c_wchar
UINT = c_uint
INT = c_int
DOUBLE = c_double
FLOAT = c_float
BOOLEAN = BYTE
BOOL = c_long
from ctypes import _SimpleCData
class VARIANT_BOOL(_SimpleCData):
_type_ = "v"
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.value)
ULONG = c_ulong
LONG = c_long
USHORT = c_ushort
SHORT = c_short
# in the windows header files, these are structures.
_LARGE_INTEGER = LARGE_INTEGER = c_longlong
_ULARGE_INTEGER = ULARGE_INTEGER = c_ulonglong
LPCOLESTR = LPOLESTR = OLESTR = c_wchar_p
LPCWSTR = LPWSTR = c_wchar_p
LPCSTR = LPSTR = c_char_p
LPCVOID = LPVOID = c_void_p
# WPARAM is defined as UINT_PTR (unsigned type)
# LPARAM is defined as LONG_PTR (signed type)
if sizeof(c_long) == sizeof(c_void_p):
WPARAM = c_ulong
LPARAM = c_long
elif sizeof(c_longlong) == sizeof(c_void_p):
WPARAM = c_ulonglong
LPARAM = c_longlong
ATOM = WORD
LANGID = WORD
COLORREF = DWORD
LGRPID = DWORD
LCTYPE = DWORD
LCID = DWORD
################################################################
# HANDLE types
HANDLE = c_void_p # in the header files: void *
HACCEL = HANDLE
HBITMAP = HANDLE
HBRUSH = HANDLE
HCOLORSPACE = HANDLE
HDC = HANDLE
HDESK = HANDLE
HDWP = HANDLE
HENHMETAFILE = HANDLE
HFONT = HANDLE
HGDIOBJ = HANDLE
HGLOBAL = HANDLE
HHOOK = HANDLE
HICON = HANDLE
HINSTANCE = HANDLE
HKEY = HANDLE
HKL = HANDLE
HLOCAL = HANDLE
HMENU = HANDLE
HMETAFILE = HANDLE
HMODULE = HANDLE
HMONITOR = HANDLE
HPALETTE = HANDLE
HPEN = HANDLE
HRGN = HANDLE
HRSRC = HANDLE
HSTR = HANDLE
HTASK = HANDLE
HWINSTA = HANDLE
HWND = HANDLE
SC_HANDLE = HANDLE
SERVICE_STATUS_HANDLE = HANDLE
################################################################
# Some important structure definitions
class RECT(Structure):
_fields_ = [("left", c_long),
("top", c_long),
("right", c_long),
("bottom", c_long)]
tagRECT = _RECTL = RECTL = RECT
class _SMALL_RECT(Structure):
_fields_ = [('Left', c_short),
('Top', c_short),
('Right', c_short),
('Bottom', c_short)]
SMALL_RECT = _SMALL_RECT
class _COORD(Structure):
_fields_ = [('X', c_short),
('Y', c_short)]
class POINT(Structure):
_fields_ = [("x", c_long),
("y", c_long)]
tagPOINT = _POINTL = POINTL = POINT
class SIZE(Structure):
_fields_ = [("cx", c_long),
("cy", c_long)]
tagSIZE = SIZEL = SIZE
def RGB(red, green, blue):
return red + (green << 8) + (blue << 16)
class FILETIME(Structure):
_fields_ = [("dwLowDateTime", DWORD),
("dwHighDateTime", DWORD)]
_FILETIME = FILETIME
class MSG(Structure):
_fields_ = [("hWnd", HWND),
("message", c_uint),
("wParam", WPARAM),
("lParam", LPARAM),
("time", DWORD),
("pt", POINT)]
tagMSG = MSG
MAX_PATH = 260
class WIN32_FIND_DATAA(Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", c_char * MAX_PATH),
("cAlternateFileName", c_char * 14)]
class WIN32_FIND_DATAW(Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", c_wchar * MAX_PATH),
("cAlternateFileName", c_wchar * 14)]
__all__ = ['ATOM', 'BOOL', 'BOOLEAN', 'BYTE', 'COLORREF', 'DOUBLE', 'DWORD',
'FILETIME', 'FLOAT', 'HACCEL', 'HANDLE', 'HBITMAP', 'HBRUSH',
'HCOLORSPACE', 'HDC', 'HDESK', 'HDWP', 'HENHMETAFILE', 'HFONT',
'HGDIOBJ', 'HGLOBAL', 'HHOOK', 'HICON', 'HINSTANCE', 'HKEY',
'HKL', 'HLOCAL', 'HMENU', 'HMETAFILE', 'HMODULE', 'HMONITOR',
'HPALETTE', 'HPEN', 'HRGN', 'HRSRC', 'HSTR', 'HTASK', 'HWINSTA',
'HWND', 'INT', 'LANGID', 'LARGE_INTEGER', 'LCID', 'LCTYPE',
'LGRPID', 'LONG', 'LPARAM', 'LPCOLESTR', 'LPCSTR', 'LPCVOID',
'LPCWSTR', 'LPOLESTR', 'LPSTR', 'LPVOID', 'LPWSTR', 'MAX_PATH',
'MSG', 'OLESTR', 'POINT', 'POINTL', 'RECT', 'RECTL', 'RGB',
'SC_HANDLE', 'SERVICE_STATUS_HANDLE', 'SHORT', 'SIZE', 'SIZEL',
'SMALL_RECT', 'UINT', 'ULARGE_INTEGER', 'ULONG', 'USHORT',
'VARIANT_BOOL', 'WCHAR', 'WIN32_FIND_DATAA', 'WIN32_FIND_DATAW',
'WORD', 'WPARAM', '_COORD', '_FILETIME', '_LARGE_INTEGER',
'_POINTL', '_RECTL', '_SMALL_RECT', '_ULARGE_INTEGER', 'tagMSG',
'tagPOINT', 'tagRECT', 'tagSIZE']
|
gpl-3.0
|
cleophasmashiri/oppia
|
core/controllers/base_test.py
|
6
|
8539
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generic controller behavior."""
__author__ = 'Sean Lip'
import copy
import datetime
import feconf
import re
import types
from core.controllers import base
from core.domain import exp_services
from core.platform import models
current_user_services = models.Registry.import_current_user_services()
from core.tests import test_utils
import main
import webapp2
import webtest
class BaseHandlerTest(test_utils.GenericTestBase):
def test_that_no_get_results_in_500_error(self):
"""Test that no GET request results in a 500 error."""
for route in main.urls:
# This was needed for the Django tests to pass (at the time we had
# a Django branch of the codebase).
if isinstance(route, tuple):
continue
else:
url = route.template
url = re.sub('<([^/^:]+)>', 'abc123', url)
# Some of these will 404 or 302. This is expected.
response = self.testapp.get(url, expect_errors=True)
self.log_line(
'Fetched %s with status code %s' % (url, response.status_int))
self.assertIn(response.status_int, [200, 302, 404])
# TODO(sll): Add similar tests for POST, PUT, DELETE.
# TODO(sll): Set a self.payload attr in the BaseHandler for
# POST, PUT and DELETE. Something needs to regulate what
# the fields in the payload should be.
def test_requests_for_invalid_paths(self):
"""Test that requests for invalid paths result in a 404 error."""
response = self.testapp.get('/gallery/extra', expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.get('/gallery/data/extra', expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.post('/gallery/extra', {}, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put('/gallery/extra', {}, expect_errors=True)
self.assertEqual(response.status_int, 404)
class CsrfTokenManagerTest(test_utils.GenericTestBase):
def test_create_and_validate_token(self):
uid = 'user_id'
page = 'page_name'
token = base.CsrfTokenManager.create_csrf_token(uid, page)
self.assertTrue(base.CsrfTokenManager.is_csrf_token_valid(
uid, page, token))
self.assertFalse(
base.CsrfTokenManager.is_csrf_token_valid('bad_user', page, token))
self.assertFalse(base.CsrfTokenManager.is_csrf_token_valid(
uid, 'wrong_page', token))
self.assertFalse(base.CsrfTokenManager.is_csrf_token_valid(
uid, self.UNICODE_TEST_STRING, token))
self.assertFalse(
base.CsrfTokenManager.is_csrf_token_valid(uid, page, 'new_token'))
self.assertFalse(
base.CsrfTokenManager.is_csrf_token_valid(uid, page, 'new/token'))
def test_nondefault_csrf_secret_is_used(self):
base.CsrfTokenManager.create_csrf_token('uid', 'page')
self.assertNotEqual(base.CSRF_SECRET.value, base.DEFAULT_CSRF_SECRET)
def test_token_expiry(self):
# This can be any value.
ORIG_TIME = 100.0
FORTY_EIGHT_HOURS_IN_SECS = 48 * 60 * 60
PADDING = 1
current_time = ORIG_TIME
# Create a fake copy of the CsrfTokenManager class so that its
# _get_current_time() method can be swapped out without affecting the
# original class.
FakeCsrfTokenManager = copy.deepcopy(base.CsrfTokenManager)
def _get_current_time(cls):
return current_time
setattr(
FakeCsrfTokenManager,
_get_current_time.__name__,
types.MethodType(_get_current_time, FakeCsrfTokenManager)
)
# Create a token and check that it expires correctly.
token = FakeCsrfTokenManager.create_csrf_token('uid', 'page')
self.assertTrue(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page', token))
current_time = ORIG_TIME + 1
self.assertTrue(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page', token))
current_time = ORIG_TIME + FORTY_EIGHT_HOURS_IN_SECS - PADDING
self.assertTrue(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page', token))
current_time = ORIG_TIME + FORTY_EIGHT_HOURS_IN_SECS + PADDING
self.assertFalse(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page', token))
# Check that the expiry of one token does not cause the other to
# expire.
current_time = ORIG_TIME
token1 = FakeCsrfTokenManager.create_csrf_token('uid', 'page1')
self.assertTrue(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page1', token1))
current_time = ORIG_TIME + 100
token2 = FakeCsrfTokenManager.create_csrf_token('uid', 'page2')
self.assertTrue(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page2', token2))
current_time = ORIG_TIME + FORTY_EIGHT_HOURS_IN_SECS + PADDING
self.assertFalse(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page1', token1))
self.assertTrue(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page2', token2))
current_time = ORIG_TIME + 100 + FORTY_EIGHT_HOURS_IN_SECS + PADDING
self.assertFalse(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page1', token1))
self.assertFalse(
FakeCsrfTokenManager.is_csrf_token_valid('uid', 'page2', token2))
class EscapingTest(test_utils.GenericTestBase):
class FakeAboutPage(base.BaseHandler):
"""Fake page for testing autoescaping."""
def get(self):
"""Handles GET requests."""
self.values.update({
'ADMIN_EMAIL_ADDRESS': ['<[angular_tag]>'],
'SITE_FORUM_URL': 'x{{51 * 3}}y',
})
self.render_template('pages/about.html')
def post(self):
"""Handles POST requests."""
self.render_json({'big_value': u'\n<script>马={{'})
def setUp(self):
super(EscapingTest, self).setUp()
self.testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/fake', self.FakeAboutPage, name='FakePage')],
debug=feconf.DEBUG,
))
def test_jinja_autoescaping(self):
response = self.testapp.get('/fake')
self.assertEqual(response.status_int, 200)
self.assertIn('<[angular_tag]>', response.body)
self.assertNotIn('<[angular_tag]>', response.body)
self.assertIn('x{{51 * 3}}y', response.body)
self.assertNotIn('x153y', response.body)
def test_special_char_escaping(self):
response = self.testapp.post('/fake', {})
self.assertEqual(response.status_int, 200)
self.assertTrue(response.body.startswith(feconf.XSSI_PREFIX))
self.assertIn('\\n\\u003cscript\\u003e\\u9a6c={{', response.body)
self.assertNotIn('<script>', response.body)
self.assertNotIn('马', response.body)
class LogoutPageTest(test_utils.GenericTestBase):
def test_logout_page(self):
"""Tests for logout handler."""
exp_services.load_demo('0')
# Logout with valid query arg. This test only validates that the login
# cookies have expired after hitting the logout url.
current_page = '/explore/0'
response = self.testapp.get(current_page)
self.assertEqual(response.status_int, 200)
response = self.testapp.get(current_user_services.create_logout_url(
current_page))
expiry_date = response.headers['Set-Cookie'].rsplit('=', 1)
self.assertTrue(datetime.datetime.now() > datetime.datetime.strptime(
expiry_date[1], "%a, %d %b %Y %H:%M:%S GMT",))
|
apache-2.0
|
heracek/django-nonrel
|
django/db/backends/postgresql/operations.py
|
229
|
9420
|
import re
from django.db.backends import BaseDatabaseOperations
# This DatabaseOperations class lives in here instead of base.py because it's
# used by both the 'postgresql' and 'postgresql_psycopg2' backends.
class DatabaseOperations(BaseDatabaseOperations):
def __init__(self, connection):
super(DatabaseOperations, self).__init__()
self._postgres_version = None
self.connection = connection
def _get_postgres_version(self):
if self._postgres_version is None:
from django.db.backends.postgresql.version import get_version
cursor = self.connection.cursor()
self._postgres_version = get_version(cursor)
return self._postgres_version
postgres_version = property(_get_postgres_version)
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
implements the interval functionality for expressions
format for Postgres:
(datefield + interval '3 days 200 seconds 5 microseconds')
"""
modifiers = []
if timedelta.days:
modifiers.append(u'%s days' % timedelta.days)
if timedelta.seconds:
modifiers.append(u'%s seconds' % timedelta.seconds)
if timedelta.microseconds:
modifiers.append(u'%s microseconds' % timedelta.microseconds)
mods = u' '.join(modifiers)
conn = u' %s ' % connector
return u'(%s)' % conn.join([sql, u'interval \'%s\'' % mods])
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith'):
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def field_cast_sql(self, db_type):
if db_type == 'inet':
return 'HOST(%s)'
return '%s'
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def sql_flush(self, style, tables, sequences):
if tables:
if self.postgres_version[0:2] >= (8,1):
# Postgres 8.1+ can do 'TRUNCATE x, y, z...;'. In fact, it *has to*
# in order to be able to truncate tables referenced by a foreign
# key in any other table. The result is a single SQL TRUNCATE
# statement.
sql = ['%s %s;' % \
(style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(', '.join([self.quote_name(table) for table in tables]))
)]
else:
# Older versions of Postgres can't do TRUNCATE in a single call, so
# they must use a simple delete.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table))))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
def prep_for_iexact_query(self, x):
return x
def check_aggregate_support(self, aggregate):
"""Check that the backend fully supports the provided aggregate.
The population and sample statistics (STDDEV_POP, STDDEV_SAMP,
VAR_POP, VAR_SAMP) were first implemented in Postgres 8.2.
The implementation of population statistics (STDDEV_POP and VAR_POP)
under Postgres 8.2 - 8.2.4 is known to be faulty. Raise
NotImplementedError if this is the database in use.
"""
if aggregate.sql_function in ('STDDEV_POP', 'STDDEV_SAMP', 'VAR_POP', 'VAR_SAMP'):
if self.postgres_version[0:2] < (8,2):
raise NotImplementedError('PostgreSQL does not support %s prior to version 8.2. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
if aggregate.sql_function in ('STDDEV_POP', 'VAR_POP'):
if self.postgres_version[0:2] == (8,2):
if self.postgres_version[2] is None or self.postgres_version[2] <= 4:
raise NotImplementedError('PostgreSQL 8.2 to 8.2.4 is known to have a faulty implementation of %s. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
|
bsd-3-clause
|
andrefreitas/schwa
|
schwa/extraction/abstract_extractor.py
|
1
|
2040
|
# Copyright (c) 2015 Faculty of Engineering of the University of Porto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Module for representing Extractors Abstract classes.
If someone wants to add support to a new type of repository e.g. SVN,
it should start here.
"""
import abc
import re
class AbstractExtractor:
""" An abstract class for a Repository Extractor.
This class ensures that all the extractors have a pattern.
Attributes:
path: A String representing the local repository path
"""
__metaclass__ = abc.ABCMeta
def __init__(self, path):
self.path = path
@abc.abstractmethod
def extract(self, ignore_regex="^$", max_commits=None):
""" Extracts all the Java commits"""
def is_code_file(path):
result = re.search(".+\.(java|php|py|cpp|c|js|html|css|rb|h|scala|sbt|sh|sql|cs)$", path)
return result
def can_parse_file(path):
result = re.search(".+\.(java)$", path)
return result
class RepositoryExtractionException(Exception):
pass
|
mit
|
sauloal/cufflinksviewer
|
venvlin/lib/python2.7/site-packages/simplejson/tests/test_unicode.py
|
71
|
4687
|
from unittest import TestCase
import simplejson as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"' + u + u'"')
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["' + u + u'"]')
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
#s = '"\\u{0:04x}"'.format(i)
s = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(json.loads(s), eval(s))
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
od = json.loads(s, object_pairs_hook=json.OrderedDict)
self.assertEqual(od, json.OrderedDict(p))
self.assertEqual(type(od), json.OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(json.loads(s,
object_pairs_hook=json.OrderedDict,
object_hook=lambda x: None),
json.OrderedDict(p))
def test_default_encoding(self):
self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEquals(type(json.loads(u'""')), unicode)
self.assertEquals(type(json.loads(u'"a"')), unicode)
self.assertEquals(type(json.loads(u'["a"]')[0]), unicode)
def test_ensure_ascii_false_returns_unicode(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
self.assertEquals(type(json.dumps([], ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps(0, ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps({}, ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps("", ensure_ascii=False)), unicode)
def test_ensure_ascii_false_bytestring_encoding(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
doc1 = {u'quux': 'Arr\xc3\xaat sur images'}
doc2 = {u'quux': u'Arr\xeat sur images'}
doc_ascii = '{"quux": "Arr\\u00eat sur images"}'
doc_unicode = u'{"quux": "Arr\xeat sur images"}'
self.assertEquals(json.dumps(doc1), doc_ascii)
self.assertEquals(json.dumps(doc2), doc_ascii)
self.assertEquals(json.dumps(doc1, ensure_ascii=False), doc_unicode)
self.assertEquals(json.dumps(doc2, ensure_ascii=False), doc_unicode)
def test_ensure_ascii_linebreak_encoding(self):
# http://timelessrepo.com/json-isnt-a-javascript-subset
s1 = u'\u2029\u2028'
s2 = s1.encode('utf8')
expect = '"\\u2029\\u2028"'
self.assertEquals(json.dumps(s1), expect)
self.assertEquals(json.dumps(s2), expect)
self.assertEquals(json.dumps(s1, ensure_ascii=False), expect)
self.assertEquals(json.dumps(s2, ensure_ascii=False), expect)
|
mit
|
provaleks/o8
|
addons/website_forum_doc/__openerp__.py
|
322
|
1508
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Documentation',
'category': 'Website',
'summary': 'Forum, Documentation',
'version': '1.0',
'description': """
Documentation based on question and pertinent answers of Forum
""",
'author': 'OpenERP SA',
'depends': [
'website_forum'
],
'data': [
'data/doc_data.xml',
'security/ir.model.access.csv',
'views/doc.xml',
'views/website_doc.xml',
],
'demo': [
'data/doc_demo.xml',
],
'installable': True,
}
|
agpl-3.0
|
cloudtools/troposphere
|
tests/test_logs.py
|
1
|
1243
|
import unittest
from troposphere import Retain
from troposphere.logs import Destination, LogGroup
class TestLogs(unittest.TestCase):
def test_loggroup_deletionpolicy_is_preserved(self):
log_group = LogGroup("LogGroupWithDeletionPolicy", DeletionPolicy=Retain)
self.assertIn("DeletionPolicy", log_group.to_dict())
def test_loggroup_retention(self):
for days in [7, "7"]:
LogGroup(
"LogGroupWithDeletionPolicy",
RetentionInDays=days,
)
for days in [6, "6"]:
with self.assertRaises(ValueError):
LogGroup(
"LogGroupWithDeletionPolicy",
RetentionInDays=days,
)
def test_log_destination(self):
log_destination = Destination(
"MyLogDestination",
DestinationName="destination-name",
RoleArn="role-arn",
TargetArn="target-arn",
DestinationPolicy="destination-policy",
)
log_destination_json = log_destination.to_dict()
self.assertIn("Type", log_destination_json)
self.assertIn("Properties", log_destination_json)
if __name__ == "__main__":
unittest.main()
|
bsd-2-clause
|
atruberg/django-custom
|
django/middleware/locale.py
|
98
|
2970
|
"This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (is_valid_path, get_resolver,
LocaleRegexURLResolver)
from django.http import HttpResponseRedirect
from django.utils.cache import patch_vary_headers
from django.utils import translation
from django.utils.datastructures import SortedDict
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
def __init__(self):
self._supported_languages = SortedDict(settings.LANGUAGES)
self._is_language_prefix_patterns_used = False
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
self._is_language_prefix_patterns_used = True
break
def process_request(self, request):
check_path = self.is_language_prefix_patterns_used()
language = translation.get_language_from_request(
request, check_path=check_path)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(
request.path_info, supported=self._supported_languages
)
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used()):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
if (not path_valid and settings.APPEND_SLASH
and not language_path.endswith('/')):
path_valid = is_valid_path("%s/" % language_path, urlconf)
if path_valid:
language_url = "%s://%s/%s%s" % (
'https' if request.is_secure() else 'http',
request.get_host(), language, request.get_full_path())
return HttpResponseRedirect(language_url)
if not (self.is_language_prefix_patterns_used()
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
return self._is_language_prefix_patterns_used
|
bsd-3-clause
|
ben-ng/swift
|
utils/swift_build_support/swift_build_support/products/ninja.py
|
1
|
1932
|
# swift_build_support/products/ninja.py -------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
"""
Ninja build
"""
# ----------------------------------------------------------------------------
import os.path
import platform
import sys
from . import product
from .. import cache_util
from .. import shell
class Ninja(product.Product):
@cache_util.reify
def ninja_bin_path(self):
return os.path.join(self.build_dir, 'ninja')
def do_build(self):
if os.path.exists(self.ninja_bin_path):
return
env = None
if platform.system() == "Darwin":
from .. import xcrun
sysroot = xcrun.sdk_path("macosx")
osx_version_min = self.args.darwin_deployment_version_osx
assert sysroot is not None
env = {
"CXX": self.toolchain.cxx,
"CFLAGS": (
"-isysroot {sysroot} -mmacosx-version-min={osx_version}"
).format(sysroot=sysroot, osx_version=osx_version_min),
"LDFLAGS": (
"-mmacosx-version-min={osx_version}"
).format(osx_version=osx_version_min),
}
# Ninja can only be built in-tree. Copy the source tree to the build
# directory.
shell.rmtree(self.build_dir)
shell.copytree(self.source_dir, self.build_dir)
with shell.pushd(self.build_dir):
shell.call([sys.executable, 'configure.py', '--bootstrap'],
env=env)
|
apache-2.0
|
PhilHarnish/forge
|
spec/puzzle/examples/mim/p10_1_spec.py
|
1
|
1173
|
import astor
from data import warehouse
from puzzle.examples.mim import p10_1
from puzzle.problems import logic_problem
from puzzle.puzzlepedia import prod_config
from spec.mamba import *
with _description('p10_1'):
with before.all:
warehouse.save()
prod_config.init()
self.puzzle = p10_1.get()
with after.all:
prod_config.reset()
warehouse.restore()
with description('solution'):
with it('scores the source as a LogicProblem'):
expect(logic_problem.LogicProblem.score(
p10_1.SOURCE.split('\n'))).to(equal(1))
with it('identifies puzzle type'):
problems = self.puzzle.problems()
expect(problems).to(have_len(1))
problem = problems[0]
expect(problem).to(be_a(logic_problem.LogicProblem))
with it('parses puzzle'):
node = logic_problem._parse(p10_1.SOURCE.split('\n'))
print(astor.to_source(node))
with it('models puzzle'):
model = logic_problem._model(p10_1.SOURCE.split('\n'))
print(str(model))
with it('exports a solution'):
problem = self.puzzle.problems()[0]
with breakpoints:
expect(problem.solution).to(look_like(p10_1.SOLUTION))
|
mit
|
letama/android_kernel_nozomi
|
tools/perf/scripts/python/syscall-counts.py
|
11181
|
1522
|
# system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
anchore/anchore-engine
|
anchore_engine/db/db_subscriptions.py
|
1
|
6807
|
import hashlib
import time
from anchore_engine import db
from anchore_engine.db import Subscription
def _compute_subscription_id(userId, subscription_key, subscription_type):
return hashlib.md5(
"+".join([userId, subscription_key, subscription_type]).encode("utf-8")
).hexdigest()
def _prep_payload(subscription_id, inobj):
# prep the input object
if not inobj:
inobj = {}
inobj["subscription_id"] = subscription_id
inobj.pop("userId", None)
inobj.pop("last_updated", None)
inobj.pop("created_at", None)
return inobj
def _new_subscription_record(
userId, subscription_id, subscription_key, subscription_type, inobj
):
our_result = Subscription(
subscription_id=subscription_id,
userId=userId,
subscription_key=subscription_key,
subscription_type=subscription_type,
)
our_result.update(inobj)
return our_result
def create_without_saving(userId, subscription_key, subscription_type, inobj):
subscription_id = _compute_subscription_id(
userId, subscription_key, subscription_type
)
inobj = _prep_payload(subscription_id, inobj)
our_result = _new_subscription_record(
userId, subscription_id, subscription_key, subscription_type, inobj
)
return our_result.to_dict()
def add(userId, subscription_key, subscription_type, inobj, session=None):
if not session:
session = db.Session
subscription_id = _compute_subscription_id(
userId, subscription_key, subscription_type
)
inobj = _prep_payload(subscription_id, inobj)
our_result = (
session.query(Subscription)
.filter_by(
subscription_id=subscription_id,
userId=userId,
subscription_key=subscription_key,
subscription_type=subscription_type,
)
.first()
)
if not our_result:
our_result = _new_subscription_record(
userId, subscription_id, subscription_key, subscription_type, inobj
)
session.add(our_result)
else:
our_result.update(inobj)
return True
def get_all_byuserId(userId, limit=None, session=None):
if not session:
session = db.Session
ret = []
our_results = session.query(Subscription).filter_by(userId=userId)
if limit:
our_results = our_results.limit(int(limit))
for result in our_results:
ret.append(result.to_dict())
return ret
def get_all(session=None):
if not session:
session = db.Session
ret = []
our_results = session.query(Subscription)
for result in our_results:
ret.append(result.to_dict())
return ret
def get(userId, subscription_id, session=None):
if not session:
session = db.Session
ret = {}
result = (
session.query(Subscription)
.filter_by(userId=userId, subscription_id=subscription_id)
.first()
)
if result:
ret = result.to_dict()
return ret
def is_active(account, subscription_id, session=None):
"""
Returns the subscription id of the record if one exists for the account and subscription id
"""
if not session:
session = db.Session
result = (
session.query(Subscription.subscription_id)
.filter_by(userId=account, subscription_id=subscription_id, active=True)
.scalar()
)
return result
def get_byfilter(userId, session=None, **dbfilter):
if not session:
session = db.Session
ret = []
dbfilter["userId"] = userId
results = session.query(Subscription).filter_by(**dbfilter)
if results:
for result in results:
ret.append(result.to_dict())
return ret
def get_bysubscription_key(userId, subscription_key, session=None):
if not session:
session = db.Session
ret = []
results = session.query(Subscription).filter_by(
userId=userId, subscription_key=subscription_key
)
if results:
for result in results:
obj = dict(
(key, value)
for key, value in vars(result).items()
if not key.startswith("_")
)
ret.append(obj)
return ret
def upsert(userId, subscription_key, subscription_type, inobj, session=None):
return add(userId, subscription_key, subscription_type, inobj, session=session)
def update_subscription_value(
account, subscription_id, subscription_value, session=None
):
"""
Lookup the record and update subscription value only for an existing record
"""
if not session:
session = db.Session
result = (
session.query(Subscription)
.filter_by(subscription_id=subscription_id, userId=account)
.one_or_none()
)
if result:
result.subscription_value = subscription_value
return result
def delete(userId, subscriptionId, remove=False, session=None):
if not session:
session = db.Session
ret = False
dbfilter = {"userId": userId, "subscription_id": subscriptionId}
results = session.query(Subscription).filter_by(**dbfilter)
if results:
for result in results:
if remove:
session.delete(result)
else:
result.update(
{
"record_state_key": "to_delete",
"record_state_val": str(time.time()),
}
)
ret = True
return ret
def delete_bysubscription_key(userId, subscription_key, remove=False, session=None):
if not session:
session = db.Session
ret = False
results = session.query(Subscription).filter_by(
userId=userId, subscription_key=subscription_key
)
if results:
for result in results:
if remove:
session.delete(result)
else:
result.update(
{
"record_state_key": "to_delete",
"record_state_val": str(time.time()),
}
)
ret = True
return ret
def delete_byfilter(userId, remove=False, session=None, **dbfilter):
if not session:
session = db.Session
ret = False
dbfilter["userId"] = userId
results = session.query(Subscription).filter_by(**dbfilter)
if results:
for result in results:
if remove:
session.delete(result)
else:
result.update(
{
"record_state_key": "to_delete",
"record_state_val": str(time.time()),
}
)
ret = True
return ret
|
apache-2.0
|
cuit-zhaxin/quick-ng
|
tools/cocos2d-console/plugins/project_new/project_new.py
|
6
|
26531
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "new" plugin
#
# Copyright 2013 (C) cocos2d-x.org
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"new" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
# python
import os
import sys
import getopt
import ConfigParser
import json
import shutil
import cocos
import cocos_project
import re
from collections import OrderedDict
#
# Plugins should be a sublass of CCJSPlugin
#
class CCPluginNew(cocos.CCPlugin):
DEFAULT_PROJ_NAME = {
cocos_project.Project.CPP: 'MyCppGame',
cocos_project.Project.LUA: 'MyLuaGame',
cocos_project.Project.JS: 'MyJSGame'
}
@staticmethod
def plugin_name():
return "new"
@staticmethod
def brief_description():
return cocos.MultiLanguage.get_string('NEW_BRIEF')
def init(self, args):
self._projname = args.name
self._projdir = unicode(
os.path.abspath(os.path.join(args.directory, self._projname)), "utf-8")
self._lang = args.language
self._package = args.package
self._tpname = args.template
# new official ways to get the template and cocos paths
self._templates_paths = self.get_templates_paths()
self._cocosroot = self.get_cocos2d_path()
# search for custom paths
if args.engine_path is not None:
self._cocosroot = os.path.abspath(args.engine_path)
self._cocosroot = unicode(self._cocosroot, "utf-8")
tp_path = os.path.join(self._cocosroot, "templates")
if os.path.isdir(tp_path):
self._templates_paths.append(tp_path)
# remove duplicates keeping order
o = OrderedDict.fromkeys(self._templates_paths)
self._templates_paths = o.keys()
self._other_opts = args
self._mac_bundleid = args.mac_bundleid
self._ios_bundleid = args.ios_bundleid
self._templates = Templates(args.language, self._templates_paths, args.template)
if self._templates.none_active():
self._templates.select_one()
# parse arguments
def parse_args(self, argv):
"""Custom and check param list.
"""
from argparse import ArgumentParser
# set the parser to parse input params
# the correspond variable name of "-x, --xxx" is parser.xxx
name = CCPluginNew.plugin_name()
category = CCPluginNew.plugin_category()
parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(),
description=self.__class__.brief_description())
parser.add_argument(
"name", metavar="PROJECT_NAME", nargs='?', help=cocos.MultiLanguage.get_string('NEW_ARG_NAME'))
parser.add_argument(
"-p", "--package", metavar="PACKAGE_NAME", help=cocos.MultiLanguage.get_string('NEW_ARG_PACKAGE'))
parser.add_argument("-l", "--language",
required=True,
choices=["cpp", "lua", "js"],
help=cocos.MultiLanguage.get_string('NEW_ARG_LANG'))
parser.add_argument("-d", "--directory", metavar="DIRECTORY",
help=cocos.MultiLanguage.get_string('NEW_ARG_DIR'))
parser.add_argument("-t", "--template", metavar="TEMPLATE_NAME",
help=cocos.MultiLanguage.get_string('NEW_ARG_TEMPLATE'))
parser.add_argument(
"--ios-bundleid", dest="ios_bundleid", help=cocos.MultiLanguage.get_string('NEW_ARG_IOS_BUNDLEID'))
parser.add_argument(
"--mac-bundleid", dest="mac_bundleid", help=cocos.MultiLanguage.get_string('NEW_ARG_MAC_BUNDLEID'))
parser.add_argument("-e", "--engine-path", dest="engine_path",
help=cocos.MultiLanguage.get_string('NEW_ARG_ENGINE_PATH'))
parser.add_argument("--portrait", action="store_true", dest="portrait",
help=cocos.MultiLanguage.get_string('NEW_ARG_PORTRAIT'))
group = parser.add_argument_group(cocos.MultiLanguage.get_string('NEW_ARG_GROUP_SCRIPT'))
group.add_argument(
"--no-native", action="store_true", dest="no_native",
help=cocos.MultiLanguage.get_string('NEW_ARG_NO_NATIVE'))
# parse the params
args = parser.parse_args(argv)
if args.name is None:
args.name = CCPluginNew.DEFAULT_PROJ_NAME[args.language]
if not args.package:
args.package = "org.cocos2dx.%s" % args.name
if not args.ios_bundleid:
args.ios_bundleid = args.package
if not args.mac_bundleid:
args.mac_bundleid = args.package
if not args.directory:
args.directory = os.getcwd()
if not args.template:
args.template = 'default'
self.init(args)
return args
def _stat_engine_version(self):
try:
ver_str = None
engine_type = None
framework_ver_file = os.path.join(self._cocosroot, 'version')
x_ver_file = os.path.join(self._cocosroot, 'cocos/cocos2d.cpp')
js_ver_file = os.path.join(self._cocosroot, 'frameworks/js-bindings/bindings/manual/ScriptingCore.h')
if os.path.isfile(framework_ver_file):
# the engine is Cocos Framework
f = open(framework_ver_file)
ver_str = f.read()
f.close()
engine_type = 'cocosframework'
else:
ver_file = None
pattern = None
if os.path.isfile(x_ver_file):
# the engine is cocos2d-x
pattern = r".*return[ \t]+\"(.*)\";"
ver_file = x_ver_file
engine_type = 'cocos2d-x'
elif os.path.isfile(js_ver_file):
# the engine is cocos2d-js
pattern = r".*#define[ \t]+ENGINE_VERSION[ \t]+\"(.*)\""
ver_file = js_ver_file
engine_type = 'cocos2d-js'
if ver_file is not None:
f = open(ver_file)
import re
for line in f.readlines():
match = re.match(pattern, line)
if match:
ver_str = match.group(1)
break
f.close()
if ver_str is not None:
# stat the engine version info
cocos.DataStatistic.stat_event('new_engine_ver', ver_str, engine_type)
except:
pass
def _create_from_cmd(self):
# check the dst project dir exists
if os.path.exists(self._projdir):
message = cocos.MultiLanguage.get_string('NEW_ERROR_FOLDER_EXISTED_FMT') % self._projdir
raise cocos.CCPluginError(message)
tp_dir = self._templates.template_path()
creator = TPCreator(self._lang, self._cocosroot, self._projname, self._projdir,
self._tpname, tp_dir, self._package, self._mac_bundleid, self._ios_bundleid)
# do the default creating step
creator.do_default_step()
data = None
cfg_path = os.path.join(self._projdir, cocos_project.Project.CONFIG)
if os.path.isfile(cfg_path):
f = open(cfg_path)
data = json.load(f)
f.close()
if data is None:
data = {}
if cocos_project.Project.KEY_PROJ_TYPE not in data:
data[cocos_project.Project.KEY_PROJ_TYPE] = self._lang
# script project may add native support
if self._lang in (cocos_project.Project.LUA, cocos_project.Project.JS):
if not self._other_opts.no_native:
creator.do_other_step('do_add_native_support')
data[cocos_project.Project.KEY_HAS_NATIVE] = True
else:
data[cocos_project.Project.KEY_HAS_NATIVE] = False
# if --portrait is specified, change the orientation
if self._other_opts.portrait:
creator.do_other_step("change_orientation", not_existed_error=False)
# write config files
with open(cfg_path, 'w') as outfile:
json.dump(data, outfile, sort_keys=True, indent=4)
# main entry point
def run(self, argv, dependencies):
self.parse_args(argv)
action_str = 'new_%s' % (self._lang)
cocos.DataStatistic.stat_event('new', action_str, self._tpname)
self._create_from_cmd()
self._stat_engine_version()
def replace_string(filepath, src_string, dst_string):
""" From file's content replace specified string
Arg:
filepath: Specify a file contains the path
src_string: old string
dst_string: new string
"""
if src_string is None or dst_string is None:
raise TypeError
content = ""
f1 = open(filepath, "rb")
for line in f1:
strline = line.decode('utf8')
if src_string in strline:
content += strline.replace(src_string, dst_string)
else:
content += strline
f1.close()
f2 = open(filepath, "wb")
f2.write(content.encode('utf8'))
f2.close()
# end of replace_string
class Templates(object):
def __init__(self, lang, templates_paths, current):
self._lang = lang
self._templates_paths = templates_paths
self._scan()
self._current = None
if current is not None:
if current in self._template_folders:
self._current = current
else:
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_TEMPLATE_NOT_FOUND_FMT')
% current)
def _scan(self):
template_pattern = {
"cpp": 'cpp-template-(.+)',
"lua": 'lua-template-(.+)',
"js": 'js-template-(.+)',
}
self._template_folders = {}
for templates_dir in self._templates_paths:
try:
dirs = [name for name in os.listdir(templates_dir) if os.path.isdir(
os.path.join(templates_dir, name))]
except Exception:
continue
pattern = template_pattern[self._lang]
for name in dirs:
match = re.search(pattern, name)
if match is None:
continue
template_name = match.group(1)
if template_name in self._template_folders.keys():
continue
self._template_folders[template_name] = os.path.join(templates_dir, name)
if len(self._template_folders) == 0:
cur_engine = "cocos2d-x" if self._lang == "js" else "cocos2d-js"
need_engine = "cocos2d-js" if self._lang == "js" else "cocos2d-x"
engine_tip = cocos.MultiLanguage.get_string('NEW_ERROR_ENGINE_TIP_FMT') % need_engine
message = cocos.MultiLanguage.get_string('NEW_ERROR_TEMPLATE_NOT_FOUND_FMT') % (self._lang, engine_tip)
raise cocos.CCPluginError(message)
def none_active(self):
return self._current is None
def template_path(self):
if self._current is None:
return None
return self._template_folders[self._current]
def select_one(self):
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_SELECT_TEMPLATE_TIP1'))
p = self._template_folders.keys()
for i in range(len(p)):
cocos.Logging.warning('%d %s' % (i + 1, p[i]))
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_SELECT_TEMPLATE_TIP2'))
while True:
option = raw_input()
if option.isdigit():
option = int(option) - 1
if option in range(len(p)):
break
self._current = p[option]
class TPCreator(object):
def __init__(self, lang, cocos_root, project_name, project_dir, tp_name, tp_dir, project_package, mac_id, ios_id):
self.lang = lang
self.cocos_root = cocos_root
self.project_dir = project_dir
self.project_name = project_name
self.package_name = project_package
self.mac_bundleid = mac_id
self.ios_bundleid = ios_id
self.tp_name = tp_name
self.tp_dir = tp_dir
self.tp_json = 'cocos-project-template.json'
tp_json_path = os.path.join(tp_dir, self.tp_json)
if not os.path.exists(tp_json_path):
message = cocos.MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT') % tp_json_path
raise cocos.CCPluginError(message)
f = open(tp_json_path)
# keep the key order
tpinfo = json.load(f, encoding='utf8', object_pairs_hook=OrderedDict)
# read the default creating step
if 'do_default' not in tpinfo:
message = (cocos.MultiLanguage.get_string('NEW_ERROR_DEFAILT_CFG_NOT_FOUND_FMT') % tp_json_path)
raise cocos.CCPluginError(message)
self.tp_default_step = tpinfo.pop('do_default')
# keep the other steps
self.tp_other_step = tpinfo
def cp_self(self, project_dir, exclude_files):
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_COPY_TEMPLATE_FMT') % project_dir)
if not os.path.exists(self.project_dir):
os.makedirs(self.project_dir)
copy_cfg = {
"from": self.tp_dir,
"to": self.project_dir,
"exclude": exclude_files
}
cocos.copy_files_with_config(copy_cfg, self.tp_dir, self.project_dir)
def do_default_step(self):
default_cmds = self.tp_default_step
exclude_files = []
if "exclude_from_template" in default_cmds:
exclude_files = exclude_files + \
default_cmds['exclude_from_template']
default_cmds.pop('exclude_from_template')
# should ignore teh xx-template-xx.json
exclude_files.append(self.tp_json)
self.cp_self(self.project_dir, exclude_files)
self.do_cmds(default_cmds)
def do_other_step(self, step, not_existed_error=True):
if step not in self.tp_other_step:
if not_existed_error:
# handle as error
message = cocos.MultiLanguage.get_string('NEW_ERROR_STEP_NOT_FOUND_FMT') % step
raise cocos.CCPluginError(message)
else:
# handle as warning
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_WARNING_STEP_NOT_FOUND_FMT') % step)
return
cmds = self.tp_other_step[step]
self.do_cmds(cmds)
def do_cmds(self, cmds):
for k, v in cmds.iteritems():
# call cmd method by method/cmd name
# get from
# http://stackoverflow.com/questions/3951840/python-how-to-invoke-an-function-on-an-object-dynamically-by-name
try:
cmd = getattr(self, k)
except AttributeError:
raise cocos.CCPluginError(cocos.MultiLanguage.get_string('NEW_ERROR_CMD_NOT_FOUND_FMT') % k)
try:
cmd(v)
except Exception as e:
raise cocos.CCPluginError(str(e))
# cmd methods below
def append_h5_engine(self, v):
src = os.path.join(self.cocos_root, v['from'])
dst = os.path.join(self.project_dir, v['to'])
# check cocos engine exist
moduleConfig = 'moduleConfig.json'
moudle_cfg = os.path.join(src, moduleConfig)
if not os.path.exists(moudle_cfg):
message = cocos.MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT') % moudle_cfg
raise cocos.CCPluginError(message)
f = open(moudle_cfg)
data = json.load(f, 'utf8')
f.close()
modules = data['module']
# must copy moduleConfig.json & CCBoot.js
file_list = [moduleConfig, data['bootFile']]
for k, v in modules.iteritems():
module = modules[k]
for f in module:
if f[-2:] == 'js':
file_list.append(f)
# begin copy engine
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_COPY_H5'))
for index in range(len(file_list)):
srcfile = os.path.join(src, file_list[index])
dstfile = os.path.join(dst, file_list[index])
srcfile = cocos.add_path_prefix(srcfile)
dstfile = cocos.add_path_prefix(dstfile)
if not os.path.exists(os.path.dirname(dstfile)):
os.makedirs(cocos.add_path_prefix(os.path.dirname(dstfile)))
# copy file or folder
if os.path.exists(srcfile):
if os.path.isdir(srcfile):
if os.path.exists(dstfile):
shutil.rmtree(dstfile)
shutil.copytree(srcfile, dstfile)
else:
if os.path.exists(dstfile):
os.remove(dstfile)
shutil.copy2(srcfile, dstfile)
def append_x_engine(self, v):
# FIXME this is a hack, but in order to fix it correctly the cocos-project-template.json
# file probably will need to be re-designed.
# As a quick (horrible) fix, we check if we are in distro mode.
# If so, we don't do the "append_x_engine" step
if cocos.CCPlugin.get_cocos2d_mode() == 'distro':
return
src = os.path.join(self.cocos_root, v['from'])
dst = os.path.join(self.project_dir, v['to'])
# check cocos engine exist
cocosx_files_json = os.path.join(
src, 'templates', 'cocos2dx_files.json')
if not os.path.exists(cocosx_files_json):
message = cocos.MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT') % cocosx_files_json
raise cocos.CCPluginError(message)
f = open(cocosx_files_json)
data = json.load(f)
f.close()
fileList = data['common']
if self.lang == 'lua':
fileList = fileList + data['lua']
if self.lang == 'js' and 'js' in data.keys():
fileList = fileList + data['js']
# begin copy engine
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_COPY_X'))
for index in range(len(fileList)):
srcfile = os.path.join(src, fileList[index])
dstfile = os.path.join(dst, fileList[index])
srcfile = cocos.add_path_prefix(srcfile)
dstfile = cocos.add_path_prefix(dstfile)
if not os.path.exists(os.path.dirname(dstfile)):
os.makedirs(cocos.add_path_prefix(os.path.dirname(dstfile)))
# copy file or folder
if os.path.exists(srcfile):
if os.path.isdir(srcfile):
if os.path.exists(dstfile):
shutil.rmtree(dstfile)
shutil.copytree(srcfile, dstfile)
else:
if os.path.exists(dstfile):
os.remove(dstfile)
shutil.copy2(srcfile, dstfile)
def append_from_template(self, v):
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_APPEND_TEMPLATE'))
cocos.copy_files_with_config(v, self.tp_dir, self.project_dir)
def append_dir(self, v):
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_APPEND_DIR'))
for item in v:
cocos.copy_files_with_config(
item, self.cocos_root, self.project_dir)
def append_file(self, v):
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_APPEND_FILE'))
for item in v:
src = os.path.join(self.cocos_root, item['from'])
dst = os.path.join(self.project_dir, item['to'])
src = cocos.add_path_prefix(src)
dst = cocos.add_path_prefix(dst)
shutil.copy2(src, dst)
# project cmd
def project_rename(self, v):
""" will modify the file name of the file
"""
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_project_name = v['src_project_name']
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_RENAME_PROJ_FMT') %
(src_project_name, dst_project_name))
files = v['files']
for f in files:
src = f.replace("PROJECT_NAME", src_project_name)
dst = f.replace("PROJECT_NAME", dst_project_name)
src_file_path = os.path.join(dst_project_dir, src)
dst_file_path = os.path.join(dst_project_dir, dst)
if os.path.exists(src_file_path):
if os.path.exists(dst_file_path):
os.remove(dst_file_path)
os.rename(src_file_path, dst_file_path)
else:
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT')
% os.path.join(dst_project_dir, src))
def project_replace_project_name(self, v):
""" will modify the content of the file
"""
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_project_name = v['src_project_name']
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_REPLACE_PROJ_FMT') %
(src_project_name, dst_project_name))
files = v['files']
for f in files:
dst = f.replace("PROJECT_NAME", dst_project_name)
if os.path.exists(os.path.join(dst_project_dir, dst)):
replace_string(
os.path.join(dst_project_dir, dst), src_project_name, dst_project_name)
else:
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT')
% os.path.join(dst_project_dir, dst))
def project_replace_package_name(self, v):
""" will modify the content of the file
"""
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_package_name = v['src_package_name']
dst_package_name = self.package_name
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_REPLACE_PKG_FMT') %
(src_package_name, dst_package_name))
files = v['files']
if not dst_package_name:
raise cocos.CCPluginError(cocos.MultiLanguage.get_string('NEW_ERROR_PKG_NAME_NOT_SPECIFIED'))
for f in files:
dst = f.replace("PROJECT_NAME", dst_project_name)
if os.path.exists(os.path.join(dst_project_dir, dst)):
replace_string(
os.path.join(dst_project_dir, dst), src_package_name, dst_package_name)
else:
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT')
% os.path.join(dst_project_dir, dst))
def project_replace_mac_bundleid(self, v):
""" will modify the content of the file
"""
if self.mac_bundleid is None:
return
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_bundleid = v['src_bundle_id']
dst_bundleid = self.mac_bundleid
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_MAC_BUNDLEID_FMT')
% (src_bundleid, dst_bundleid))
files = v['files']
for f in files:
dst = f.replace("PROJECT_NAME", dst_project_name)
if os.path.exists(os.path.join(dst_project_dir, dst)):
replace_string(
os.path.join(dst_project_dir, dst), src_bundleid, dst_bundleid)
else:
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT')
% os.path.join(dst_project_dir, dst))
def project_replace_ios_bundleid(self, v):
""" will modify the content of the file
"""
if self.ios_bundleid is None:
return
dst_project_dir = self.project_dir
dst_project_name = self.project_name
src_bundleid = v['src_bundle_id']
dst_bundleid = self.ios_bundleid
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_IOS_BUNDLEID_FMT')
% (src_bundleid, dst_bundleid))
files = v['files']
for f in files:
dst = f.replace("PROJECT_NAME", dst_project_name)
if os.path.exists(os.path.join(dst_project_dir, dst)):
replace_string(
os.path.join(dst_project_dir, dst), src_bundleid, dst_bundleid)
else:
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_WARNING_FILE_NOT_FOUND_FMT')
% os.path.join(dst_project_dir, dst))
def modify_files(self, v):
""" will modify the content of the file
format of v is :
[
{
"file_path": The path related with project directory,
"pattern": Find pattern,
"replace_string": Replaced string
},
...
]
"""
cocos.Logging.info(cocos.MultiLanguage.get_string('NEW_INFO_STEP_MODIFY_FILE'))
for modify_info in v:
modify_file = modify_info["file_path"]
if not os.path.isabs(modify_file):
modify_file = os.path.abspath(os.path.join(self.project_dir, modify_file))
if not os.path.isfile(modify_file):
cocos.Logging.warning(cocos.MultiLanguage.get_string('NEW_WARNING_NOT_A_FILE_FMT') % modify_file)
continue
pattern = modify_info["pattern"]
replace_str = modify_info["replace_string"]
f = open(modify_file)
lines = f.readlines()
f.close()
new_lines = []
for line in lines:
new_line = re.sub(pattern, replace_str, line)
new_lines.append(new_line)
f = open(modify_file, "w")
f.writelines(new_lines)
f.close()
|
mit
|
steventimberman/masterDebater
|
venv/lib/python2.7/site-packages/chardet/utf8prober.py
|
290
|
2766
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8_SM_MODEL
class UTF8Prober(CharSetProber):
ONE_CHAR_PROB = 0.5
def __init__(self):
super(UTF8Prober, self).__init__()
self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
self._num_mb_chars = None
self.reset()
def reset(self):
super(UTF8Prober, self).reset()
self.coding_sm.reset()
self._num_mb_chars = 0
@property
def charset_name(self):
return "utf-8"
@property
def language(self):
return ""
def feed(self, byte_str):
for c in byte_str:
coding_state = self.coding_sm.next_state(c)
if coding_state == MachineState.ERROR:
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
if self.coding_sm.get_current_charlen() >= 2:
self._num_mb_chars += 1
if self.state == ProbingState.DETECTING:
if self.get_confidence() > self.SHORTCUT_THRESHOLD:
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
unlike = 0.99
if self._num_mb_chars < 6:
unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
return 1.0 - unlike
else:
return unlike
|
mit
|
naresh21/synergetics-edx-platform
|
openedx/core/djangoapps/theming/tests/test_commands.py
|
47
|
2114
|
"""
Tests for Management commands of comprehensive theming.
"""
from django.test import TestCase
from django.core.management import call_command, CommandError
from openedx.core.djangoapps.theming.helpers import get_themes
from openedx.core.djangoapps.theming.management.commands.compile_sass import Command
class TestUpdateAssets(TestCase):
"""
Test comprehensive theming helper functions.
"""
def setUp(self):
super(TestUpdateAssets, self).setUp()
self.themes = get_themes()
def test_errors_for_invalid_arguments(self):
"""
Test update_asset command.
"""
# make sure error is raised for invalid theme list
with self.assertRaises(CommandError):
call_command("compile_sass", themes=["all", "test-theme"])
# make sure error is raised for invalid theme list
with self.assertRaises(CommandError):
call_command("compile_sass", themes=["no", "test-theme"])
# make sure error is raised for invalid theme list
with self.assertRaises(CommandError):
call_command("compile_sass", themes=["all", "no"])
# make sure error is raised for invalid theme list
with self.assertRaises(CommandError):
call_command("compile_sass", themes=["test-theme", "non-existing-theme"])
def test_parse_arguments(self):
"""
Test parse arguments method for update_asset command.
"""
# make sure compile_sass picks all themes when called with 'themes=all' option
parsed_args = Command.parse_arguments(themes=["all"])
self.assertItemsEqual(parsed_args[2], get_themes())
# make sure compile_sass picks no themes when called with 'themes=no' option
parsed_args = Command.parse_arguments(themes=["no"])
self.assertItemsEqual(parsed_args[2], [])
# make sure compile_sass picks only specified themes
parsed_args = Command.parse_arguments(themes=["test-theme"])
self.assertItemsEqual(parsed_args[2], [theme for theme in get_themes() if theme.theme_dir_name == "test-theme"])
|
agpl-3.0
|
HyperBaton/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_devtestlabarmtemplate_info.py
|
20
|
7086
|
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_devtestlabarmtemplate_info
version_added: "2.9"
short_description: Get Azure DevTest Lab ARM Template facts
description:
- Get facts of Azure DevTest Lab ARM Template.
options:
resource_group:
description:
- The name of the resource group.
required: True
type: str
lab_name:
description:
- The name of the lab.
required: True
type: str
artifact_source_name:
description:
- The name of the artifact source.
required: True
type: str
name:
description:
- The name of the ARM template.
type: str
extends_documentation_fragment:
- azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Get information on DevTest Lab ARM Template
azure_rm_devtestlabarmtemplate_info:
resource_group: myResourceGroup
lab_name: myLab
artifact_source_name: public environment repo
name: WebApp
'''
RETURN = '''
arm_templates:
description:
- A list of dictionaries containing facts for DevTest Lab ARM Template.
returned: always
type: complex
contains:
id:
description:
- The identifier of the resource.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/art
ifactSources/public environment repo/armTemplates/WebApp"
resource_group:
description:
- Resource group name.
returned: always
sample: myResourceGroup
lab_name:
description:
- DevTest Lab name.
returned: always
sample: myLab
artifact_source_name:
description:
- Artifact source name.
returned: always
sample: public environment repo
name:
description:
- ARM Template name.
returned: always
sample: WebApp
display_name:
description:
- The tags of the resource.
returned: always
sample: Web App
description:
description:
- The tags of the resource.
returned: always
sample: This template creates an Azure Web App without a data store.
publisher:
description:
- The tags of the resource.
returned: always
sample: Microsoft
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.devtestlabs import DevTestLabsClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMDtlArmTemplateInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
lab_name=dict(
type='str',
required=True
),
artifact_source_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.mgmt_client = None
self.resource_group = None
self.lab_name = None
self.artifact_source_name = None
self.name = None
super(AzureRMDtlArmTemplateInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_devtestlabarmtemplate_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_devtestlabarmtemplate_facts' module has been renamed to 'azure_rm_devtestlabarmtemplate_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if self.name:
self.results['armtemplates'] = self.get()
else:
self.results['armtemplates'] = self.list()
return self.results
def list(self):
response = None
results = []
try:
response = self.mgmt_client.arm_templates.list(resource_group_name=self.resource_group,
lab_name=self.lab_name,
artifact_source_name=self.artifact_source_name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not get facts for DTL ARM Template.')
if response is not None:
for item in response:
results.append(self.format_response(item))
return results
def get(self):
response = None
results = []
try:
response = self.mgmt_client.arm_templates.get(resource_group_name=self.resource_group,
lab_name=self.lab_name,
artifact_source_name=self.artifact_source_name,
name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.fail('Could not get facts for DTL ARM Template.')
if response:
results.append(self.format_response(response))
return results
def format_response(self, item):
d = item.as_dict()
d = {
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
'artifact_source_name': self.parse_resource_to_dict(d.get('id')).get('child_name_1'),
'id': d.get('id', None),
'name': d.get('name'),
'display_name': d.get('display_name'),
'description': d.get('description'),
'publisher': d.get('publisher')
}
return d
def main():
AzureRMDtlArmTemplateInfo()
if __name__ == '__main__':
main()
|
gpl-3.0
|
pychess/pychess
|
lib/pychess/Players/PyChessCECP.py
|
1
|
19627
|
import re
import signal
import sys
from threading import Thread
import pychess
from pychess.Players.PyChess import PyChess
from pychess.System import conf, fident
from pychess.Utils.book import getOpenings
from pychess.Utils.const import NORMALCHESS, FEN_START, BLACK, FISCHERRANDOMCHESS, \
CRAZYHOUSECHESS, WILDCASTLESHUFFLECHESS, LOSERSCHESS, SUICIDECHESS, ATOMICCHESS, \
THREECHECKCHESS, KINGOFTHEHILLCHESS, ASEANCHESS, MAKRUKCHESS, CAMBODIANCHESS, \
SITTUYINCHESS, GIVEAWAYCHESS, HORDECHESS, RACINGKINGSCHESS, PLACEMENTCHESS, \
SCHESS, LIGHTBRIGADECHESS, WHITE
from pychess.Utils.lutils.Benchmark import benchmark
from pychess.Utils.lutils.perft import perft
from pychess.Utils.lutils.LBoard import LBoard
from pychess.Utils.lutils.ldata import MAXPLY
from pychess.Utils.lutils import lsearch, leval
from pychess.Utils.lutils.lmove import parseSAN, parseAny, toSAN, ParsingError
from pychess.Utils.lutils.lmovegen import genAllMoves, genCaptures, genCheckEvasions
from pychess.Utils.lutils.validator import validateMove
from pychess.System.Log import log
from pychess.Variants.horde import HORDESTART
from pychess.Variants.placement import PLACEMENTSTART
from pychess.Variants.threecheck import THREECHECKSTART
from pychess.Variants.asean import ASEANSTART, MAKRUKSTART, KAMBODIANSTART, SITTUYINSTART
from pychess.Variants.seirawan import SCHESSSTART
from pychess.Variants.lightbrigade import LIGHTBRIGADESTART
if sys.platform != "win32":
import readline
readline.clear_history()
ASCII = sys.platform == "win32"
def get_input():
return input()
class PyChessCECP(PyChess):
def __init__(self):
PyChess.__init__(self)
self.board = LBoard(NORMALCHESS)
self.board.applyFen(FEN_START)
self.forced = False
self.analyzing = False
self.thread = None
self.features = {
"ping": 1,
"setboard": 1,
"playother": 1,
"san": 1,
"usermove": 1,
"time": 1,
"draw": 1,
"sigint": 0,
"sigterm": 0,
"reuse": 1,
"analyze": 1,
"myname": "PyChess %s" % pychess.VERSION,
"variants": "normal,wildcastle,nocastle,fischerandom,crazyhouse,light-brigade," +
"losers,suicide,giveaway,horde,atomic,racingkings,seirawan," +
"kingofthehill,3check,placement,asean,cambodian,makruk,sittuyin",
"colors": 0,
"ics": 0,
"name": 0,
"pause": 0, # Unimplemented
"nps": 0, # Unimplemented
"debug": 1,
"memory": 0, # Unimplemented
"smp": 0, # Unimplemented
"egt": "gaviota",
"option": "skipPruneChance -slider 0 0 100"
}
python = sys.executable.split("/")[-1]
python_version = "%s.%s.%s" % sys.version_info[0:3]
self.print("# %s [%s %s]" %
(self.features["myname"], python, python_version))
def handle_sigterm(self, *args):
self.__stopSearching()
sys.exit(0)
def makeReady(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, self.handle_sigterm)
def run(self):
while True:
try:
line = get_input()
except EOFError:
line = "quit"
lines = line.split()
try:
if not lines:
continue
log.debug(line, extra={"task": "xboard"})
# CECP commands
# See http://home.hccnet.nl/h.g.muller/engine-intf.html
if lines[0] == "xboard":
pass
elif lines[0] == "protover":
stringPairs = ["=".join([k, '"%s"' % v if isinstance(
v, str) else str(v)]) for k, v in self.features.items()]
self.print("feature %s" % " ".join(stringPairs))
self.print("feature done=1")
elif lines[0] in ("accepted", "rejected"):
# We only really care about one case:
if tuple(lines) == ("rejected", "debug"):
self.debug = False
elif lines[0] == "new":
self.__stopSearching()
self.board = LBoard(NORMALCHESS)
self.board.applyFen(FEN_START)
self.outOfBook = False
self.forced = False
self.playingAs = BLACK
self.clock[:] = self.basetime, self.basetime
self.searchtime = 0
self.sd = MAXPLY
if self.analyzing:
self.__analyze()
elif lines[0] == "variant":
if len(lines) > 1:
if lines[1] == "fischerandom":
self.board.variant = FISCHERRANDOMCHESS
elif lines[1] == "crazyhouse":
self.board.variant = CRAZYHOUSECHESS
self.board.iniHouse()
elif lines[1] == "wildcastle":
self.board.variant = WILDCASTLESHUFFLECHESS
elif lines[1] == "losers":
self.board.variant = LOSERSCHESS
elif lines[1] == "suicide":
self.board.variant = SUICIDECHESS
elif lines[1] == "giveaway":
self.board.variant = GIVEAWAYCHESS
elif lines[1] == "atomic":
self.board.variant = ATOMICCHESS
self.board.iniAtomic()
elif lines[1] == "3check":
self.board = LBoard(THREECHECKCHESS)
self.board.applyFen(THREECHECKSTART)
elif lines[1] == "racingkings":
self.board.variant = RACINGKINGSCHESS
elif lines[1] == "kingofthehill":
self.board.variant = KINGOFTHEHILLCHESS
elif lines[1] == "horde":
self.board = LBoard(HORDECHESS)
self.board.applyFen(HORDESTART)
elif lines[1] == "placement":
self.board = LBoard(PLACEMENTCHESS)
self.board.applyFen(PLACEMENTSTART)
elif lines[1] == "asean":
self.board = LBoard(ASEANCHESS)
self.board.applyFen(ASEANSTART)
elif lines[1] == "makruk":
self.board = LBoard(MAKRUKCHESS)
self.board.applyFen(MAKRUKSTART)
elif lines[1] == "cambodian":
self.board = LBoard(CAMBODIANCHESS)
self.board.applyFen(KAMBODIANSTART)
elif lines[1] == "sittuyin":
self.board = LBoard(SITTUYINCHESS)
self.board.applyFen(SITTUYINSTART)
elif lines[1] == "seirawan":
self.board = LBoard(SCHESS)
self.board.applyFen(SCHESSSTART)
elif lines[1] == "light-brigade":
self.board = LBoard(LIGHTBRIGADECHESS)
self.board.applyFen(LIGHTBRIGADESTART)
elif lines[0] == "quit":
self.forced = True
self.__stopSearching()
sys.exit(0)
elif lines[0] == "random":
leval.random = True
elif lines[0] == "force":
if not self.forced and not self.analyzing:
self.forced = True
self.__stopSearching()
elif lines[0] == "go":
self.playingAs = self.board.color
self.forced = False
self.__go()
elif lines[0] == "playother":
self.playingAs = 1 - self.board.color
self.forced = False
# TODO: start pondering, if possible
elif lines[0] in ("black", "white"):
newColor = lines[0] == "black" and BLACK or WHITE
self.__stopSearching()
self.playingAs = 1 - newColor
if self.board.color != newColor:
self.board.setColor(newColor)
self.board.setEnpassant(None)
if self.analyzing:
self.__analyze()
elif lines[0] == "level":
self.movestogo = int(lines[1])
inc = int(lines[3])
minutes = lines[2].split(":")
# Per protocol spec, strip off any non-numeric suffixes.
for i in range(len(minutes)):
minutes[i] = re.match(r'\d*', minutes[i]).group()
self.basetime = int(minutes[0]) * 60
if len(minutes) > 1 and minutes[1]:
self.basetime += int(minutes[1])
self.clock[:] = self.basetime, self.basetime
self.increment = inc
self.searchtime = 0
elif lines[0] == "st":
self.searchtime = float(lines[1])
elif lines[0] == "sd":
self.sd = int(lines[1])
# Unimplemented: nps
elif lines[0] == "time":
self.clock[self.playingAs] = float(lines[1]) / 100.
elif lines[0] == "otim":
self.clock[1 - self.playingAs] = float(lines[1]) / 100.
elif lines[0] == "usermove":
self.__stopSearching()
try:
move = parseAny(self.board, lines[1])
except ParsingError:
self.print("Error (unknown command): %s" % lines[1])
self.print(self.board.prepr(ascii=ASCII))
continue
if not validateMove(self.board, move):
self.print("Illegal move: %s" % lines[1])
self.print(self.board.prepr(ascii=ASCII))
continue
self.board.applyMove(move)
self.playingAs = self.board.color
if not self.forced and not self.analyzing:
self.__go()
if self.analyzing:
self.__analyze()
elif lines[0] == "?":
if not self.forced and not self.analyzing:
self.__stopSearching()
elif lines[0] == "ping":
self.print("pong %s" % lines[1])
elif lines[0] == "draw":
if self.__willingToDraw():
self.print("offer draw")
elif lines[0] == "result":
# We don't really care what the result is at the moment.
pass
elif lines[0] == "setboard":
self.__stopSearching()
try:
self.board = LBoard(self.board.variant)
fen = " ".join(lines[1:])
self.board.applyFen(fen.replace("[", "/").replace("]",
""))
except SyntaxError as err:
self.print("tellusererror Illegal position: %s" %
str(err))
# "edit" is unimplemented. See docs. Exiting edit mode returns to analyze mode.
elif lines[0] == "hint":
pass # TODO: Respond "Hint: MOVE" if we have an expected reply
elif lines[0] == "bk":
entries = getOpenings(self.board)
if entries:
totalWeight = sum(entry[1] for entry in entries)
for entry in entries:
self.print("\t%s\t%02.2f%%" %
(toSAN(self.board, entry[0]), entry[1] *
100.0 / totalWeight))
elif lines[0] == "undo":
self.__stopSearching()
self.board.popMove()
if self.analyzing:
self.__analyze()
elif lines[0] == "remove":
self.__stopSearching()
self.board.popMove()
self.board.popMove()
if self.analyzing:
self.__analyze()
elif lines[0] in ("hard", "easy"):
self.ponder = (lines[0] == "hard")
elif lines[0] in ("post", "nopost"):
self.post = (lines[0] == "post")
elif lines[0] == "analyze":
self.analyzing = True
self.__analyze()
elif lines[0] in ("name", "rating", "ics", "computer"):
pass # We don't care.
# Unimplemented: pause, resume
elif lines[0] == "memory":
# FIXME: this is supposed to control the *total* memory use.
if lsearch.searching:
self.print("Error (already searching):", line)
else:
limit = int(lines[1])
if limit < 1:
self.print("Error (limit too low):", line)
else:
pass
# TODO implement
# lsearch.setHashSize(limit)
elif lines[0] == "cores":
pass # We aren't SMP-capable.
elif lines[0] == "egtpath":
if len(lines) >= 3 and lines[1] == "gaviota":
if lines[2]:
conf.set("egtb_path", lines[2])
else:
conf.set("egtb_path", conf.get("egtb_path"))
from pychess.Utils.lutils.lsearch import enableEGTB
enableEGTB()
elif lines[0] == "option" and len(lines) > 1:
name, eq, value = lines[1].partition("=")
if value:
value = int(
value
) # CECP spec says option values are *always* numeric
if name == "skipPruneChance":
if 0 <= value <= 100:
self.skipPruneChance = value / 100.0
else:
self.print(
"Error (argument must be an integer 0..100): %s"
% line)
# CECP analyze mode commands
# See http://www.gnu.org/software/xboard/engine-intf.html#11
elif lines[0] == "exit":
if self.analyzing:
self.__stopSearching()
self.analyzing = False
# Periodic updates (".") are not implemented.
# Custom commands
elif lines[0] == "moves":
self.print(self.board.prepr(ascii=ASCII))
self.print([toSAN(self.board, move)
for move in genAllMoves(self.board)])
elif lines[0] == "captures":
self.print(self.board.prepr(ascii=ASCII))
self.print([toSAN(self.board, move)
for move in genCaptures(self.board)])
elif lines[0] == "evasions":
self.print(self.board.prepr(ascii=ASCII))
self.print([toSAN(self.board, move)
for move in genCheckEvasions(self.board)])
elif lines[0] == "benchmark":
if len(lines) > 1:
benchmark(int(lines[1]))
else:
benchmark()
elif lines[0] == "profile":
if len(lines) > 1:
import cProfile
cProfile.runctx("benchmark()", locals(), globals(),
lines[1])
else:
self.print("Usage: profile outputfilename")
elif lines[0] == "perft":
root = "0" if len(lines) < 3 else lines[2]
depth = "1" if len(lines) == 1 else lines[1]
if root.isdigit() and depth.isdigit():
perft(self.board, int(depth), int(root))
else:
self.print("Error (arguments must be integer")
elif lines[0] == "stop_unittest":
break
elif len(lines) == 1:
# A GUI without usermove support might try to send a move.
try:
move = parseAny(self.board, line)
except ParsingError:
self.print("Error (unknown command): %s" % line)
continue
if not validateMove(self.board, move):
self.print("Illegal move: %s" % lines[0])
self.print(self.board.prepr(ascii=ASCII))
continue
self.__stopSearching()
self.board.applyMove(move)
self.playingAs = self.board.color
if not self.forced and not self.analyzing:
self.__go()
if self.analyzing:
self.__analyze()
else:
self.print("Error (unknown command): %s" % line)
except IndexError:
self.print("Error (missing argument): %s" % line)
def __stopSearching(self):
lsearch.searching = False
if self.thread:
self.thread.join()
def __go(self):
def ondone(result):
if not self.forced:
self.board.applyMove(parseSAN(self.board, result))
self.print("move %s" % result)
# TODO: start pondering, if enabled
self.thread = Thread(target=PyChess._PyChess__go,
name=fident(PyChess._PyChess__go),
args=(self, ondone))
self.thread.daemon = True
self.thread.start()
def __analyze(self):
self.thread = Thread(target=PyChess._PyChess__analyze,
name=fident(PyChess._PyChess__analyze),
args=(self, ))
self.thread.daemon = True
self.thread.start()
def __willingToDraw(self):
return self.scr <= 0 # FIXME: this misbehaves in all but the simplest use cases
|
gpl-3.0
|
jordanemedlock/psychtruths
|
temboo/core/Library/Amazon/SQS/SendMessage.py
|
4
|
5370
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SendMessage
# Sends up to ten messages to the specified queue.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SendMessage(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SendMessage Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SendMessage, self).__init__(temboo_session, '/Library/Amazon/SQS/SendMessage')
def new_input_set(self):
return SendMessageInputSet()
def _make_result_set(self, result, path):
return SendMessageResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SendMessageChoreographyExecution(session, exec_id, path)
class SendMessageInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SendMessage
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(SendMessageInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSAccountId(self, value):
"""
Set the value of the AWSAccountId input for this Choreo. ((required, integer) The id for the AWS account associated with the queue you're sending a message to (remove all dashes in the account number).)
"""
super(SendMessageInputSet, self)._set_input('AWSAccountId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(SendMessageInputSet, self)._set_input('AWSSecretKeyId', value)
def set_DelaySeconds(self, value):
"""
Set the value of the DelaySeconds input for this Choreo. ((optional, string) The number of seconds (0 to 900 - 15 minutes) to delay a specific message.)
"""
super(SendMessageInputSet, self)._set_input('DelaySeconds', value)
def set_MessageAttributes(self, value):
"""
Set the value of the MessageAttributes input for this Choreo. ((optional, json) A JSON array of message attributes. See Choreo notes for formatting details.)
"""
super(SendMessageInputSet, self)._set_input('MessageAttributes', value)
def set_MessageBody(self, value):
"""
Set the value of the MessageBody input for this Choreo. ((required, string) The message to send. Maximum size is 64 KB.)
"""
super(SendMessageInputSet, self)._set_input('MessageBody', value)
def set_QueueName(self, value):
"""
Set the value of the QueueName input for this Choreo. ((required, string) The name of the queue you want to send a messages to.)
"""
super(SendMessageInputSet, self)._set_input('QueueName', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(SendMessageInputSet, self)._set_input('ResponseFormat', value)
def set_UserRegion(self, value):
"""
Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the SQS endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)
"""
super(SendMessageInputSet, self)._set_input('UserRegion', value)
class SendMessageResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SendMessage Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class SendMessageChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SendMessageResultSet(response, path)
|
apache-2.0
|
G33KS44n/mysql-5.6
|
xtrabackup/test/python/testtools/tests/test_distutilscmd.py
|
42
|
2635
|
# Copyright (c) 2010-2011 Testtools authors. See LICENSE for details.
"""Tests for the distutils test command logic."""
from distutils.dist import Distribution
from testtools.helpers import try_import, try_imports
fixtures = try_import('fixtures')
StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
import testtools
from testtools import TestCase
from testtools.distutilscmd import TestCommand
if fixtures:
class SampleTestFixture(fixtures.Fixture):
"""Creates testtools.runexample temporarily."""
def __init__(self):
self.package = fixtures.PythonPackage(
'runexample', [('__init__.py', """
from testtools import TestCase
class TestFoo(TestCase):
def test_bar(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
""")])
def setUp(self):
super(SampleTestFixture, self).setUp()
self.useFixture(self.package)
testtools.__path__.append(self.package.base)
self.addCleanup(testtools.__path__.remove, self.package.base)
class TestCommandTest(TestCase):
def setUp(self):
super(TestCommandTest, self).setUp()
if fixtures is None:
self.skipTest("Need fixtures")
def test_test_module(self):
self.useFixture(SampleTestFixture())
stream = StringIO()
dist = Distribution()
dist.script_name = 'setup.py'
dist.script_args = ['test']
dist.cmdclass = {'test': TestCommand}
dist.command_options = {
'test': {'test_module': ('command line', 'testtools.runexample')}}
cmd = dist.reinitialize_command('test')
cmd.runner.stdout = stream
dist.run_command('test')
self.assertEqual("""Tests running...
Ran 2 tests in 0.000s
OK
""", stream.getvalue())
def test_test_suite(self):
self.useFixture(SampleTestFixture())
stream = StringIO()
dist = Distribution()
dist.script_name = 'setup.py'
dist.script_args = ['test']
dist.cmdclass = {'test': TestCommand}
dist.command_options = {
'test': {
'test_suite': (
'command line', 'testtools.runexample.test_suite')}}
cmd = dist.reinitialize_command('test')
cmd.runner.stdout = stream
dist.run_command('test')
self.assertEqual("""Tests running...
Ran 2 tests in 0.000s
OK
""", stream.getvalue())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
|
gpl-2.0
|
hsuantien/scikit-learn
|
sklearn/feature_selection/univariate_selection.py
|
95
|
23709
|
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = check_array(X.sum(axis=0))
class_prob = check_array(Y.mean(axis=0))
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. The regressor of interest and the data are orthogonalized
wrt constant regressors.
2. The cross correlation between data and regressors is computed.
3. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
# XXX could use corr /= row_norms(X.T) here, but the test doesn't pass
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
self.scores_, self.pvalues_ = self.score_func(X, y)
self.scores_ = np.asarray(self.scores_)
self.pvalues_ = np.asarray(self.pvalues_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = len(scores) * self.percentile // 100
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
http://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features
* np.arange(n_features)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between labe/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
|
bsd-3-clause
|
mpvillafranca/hearcloud
|
applications/box/forms.py
|
2
|
2475
|
from django import forms
from django.core.exceptions import ValidationError
from .models import Song, Playlist
class CreatePlaylistForm(forms.ModelForm):
"""
Form class to create playlists
"""
class Meta:
model = Playlist
fields = [
'name',
]
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'})
}
class UpdateSongForm(forms.ModelForm):
"""
Form class to update already created songs on the db
"""
class Meta:
model = Song
fields = [
'artwork', 'title', 'artist', 'year', 'album',
'release_date', 'album_artist', 'track_number', 'track_total', 'bpm',
'original_artist', 'key', 'composer', 'lyricist', 'comments',
'remixer', 'label', 'genre', 'file', 'lyrics',
]
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'artist': forms.TextInput(attrs={'class': 'form-control'}),
'year': forms.NumberInput(attrs={'class': 'form-control'}),
'album': forms.TextInput(attrs={'class': 'form-control'}),
'release_date': forms.DateInput(attrs={'class': 'form-control'}),
'album_artist': forms.TextInput(attrs={'class': 'form-control'}),
'track_number': forms.NumberInput(attrs={'class': 'form-control'}),
'track_total': forms.NumberInput(attrs={'class': 'form-control'}),
'bpm': forms.NumberInput(attrs={'class': 'form-control'}),
'original_artist': forms.TextInput(attrs={'class': 'form-control'}),
'key': forms.TextInput(attrs={'class': 'form-control'}),
'composer': forms.TextInput(attrs={'class': 'form-control'}),
'lyricist': forms.TextInput(attrs={'class': 'form-control'}),
'comments': forms.TextInput(attrs={'class': 'form-control'}),
'remixer': forms.TextInput(attrs={'class': 'form-control'}),
'label': forms.TextInput(attrs={'class': 'form-control'}),
'genre': forms.TextInput(attrs={'class': 'form-control'}),
'file': forms.TextInput(attrs={'class': 'form-control'}),
'lyrics': forms.Textarea(attrs={'class': 'form-control'})
}
def save(self, commit=True):
instance = super(UpdateSongForm, self).save(commit=False)
if commit:
# save
instance.save()
return instance
|
agpl-3.0
|
eMerzh/Diamond-1
|
src/collectors/sidekiqweb/test/testsidekiqweb.py
|
35
|
2061
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from sidekiqweb import SidekiqWebCollector
################################################################################
class TestSidekiqWebCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SidekiqWebCollector', {
'interval': 10
})
self.collector = SidekiqWebCollector(config, None)
def test_import(self):
self.assertTrue(SidekiqWebCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'redis.connected_clients': 22,
'redis.uptime_in_days': 62,
'redis.used_memory_human_byte': 1426063.36,
'redis.used_memory_peak_human_byte': 8598323.2,
'sidekiq.busy': 0,
'sidekiq.default_latency': 0,
'sidekiq.enqueued': 0,
'sidekiq.failed': 22,
'sidekiq.processed': 4622701,
'sidekiq.retries': 0,
'sidekiq.scheduled': 30,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats_blank')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
################################################################################
if __name__ == "__main__":
unittest.main()
|
mit
|
wilkerwma/codeschool
|
src/codeschool/models/wagtail.py
|
2
|
11441
|
from markdown import markdown
from django.db import models, transaction
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import FieldPanel
from viewpack.types import DetailObject
from .mixins import MigrateMixin
from .serialize import SerializationMixin
class PageSerializationMixin(SerializationMixin):
"""
A serializable page.
"""
_serialization_exclude_fields = {
'id', 'path', 'depth', 'numchild', 'slug', 'has_published_changes',
'url_path', 'content_type_id', 'page_ptr_id', 'owner_id',
'latest_revision_created_at', 'first_published_at',
}
class CodeschoolPageMixin(PageSerializationMixin):
"""
Extend wagtail's page with some extra functionality.
"""
# cache parent link for creating objects in a consistent
# tree state.
__parent = None
__db = None
# a list of children scheduled to be saved when the page
# gain a pk
__children = ()
#: Alias for page.title
name = property(lambda x: x.title)
#: Default content color
content_color = "#10A2A4"
@name.setter
def name(self, value):
self.title = value
def __init__(self, *args, **kwargs):
# Try to obtain the value for the parent page element.
parent = kwargs.pop('parent_page', None)
if parent is not None:
if not isinstance(parent, Page):
name = parent.__class__.__name__
raise TypeError(
'The parent page must be a Page instance. got %s.' % name
)
self.__parent = parent
super().__init__(*args, **kwargs)
def __save_to_parent(self, *args, **kwargs):
"""
Saves the model using the __parent reference to insert it in the correct
point in the tree.
"""
# If not parent is set, we use the default save method from super()
if self.__parent is None:
kwargs.setdefault('using', self.__db)
return super().save(*args, **kwargs)
# Parent must be saved into the database.
if self.__parent.id is None:
raise ValueError('parent must be saved into the database!')
# Now we have to construct all path, depth, etc info from the parent.
# It seems that there is a bug in add_child() method that prevent it
# from calculating the correct path when the parent has not children
parent, self.__parent = self.__parent, None
self.depth = parent.depth + 1
self.url_path = '%s/%s/' % (parent.url_path.rstrip('/'), self.slug)
self.numchild = 0
if parent.numchild == 0:
self.path = parent.path + '0001'
else:
last_sibling = parent.get_last_child()
if last_sibling is None:
# The tree is possibly in an inconsistent state: the parent
# claims to have a child, but has no last child.
raise RuntimeError('invalid tree: %s',
parent.get_parent().find_problems(),
parent.numchild, parent.get_children())
else:
self.path = last_sibling._inc_path()
# Save self and parent
with transaction.atomic():
super().save(*args, **kwargs)
parent.numchild += 1
parent.save(update_fields=['numchild'])
def get_parent(self, *args):
"""
Returns the parent node.
"""
if self.__parent is None:
return super().get_parent(*args)
return self.__parent
def set_parent(self, parent):
"""
Sets the parent node link.
"""
if self.id is None:
self.__parent = parent
self.path = None
self.url_path = None
self.depth = self.__parent.depth + 1
else:
self.move(parent)
def get_default_parent(self):
"""
Return the default parent instance if no parent is set.
"""
from cs_core.models import rogue_root
return rogue_root()
def save(self, *args, **kwargs):
# Prepare fields
if self.id is None:
self.prepare_create()
else:
self.prepare_save()
# We check if __parent is set. This should only happen if pk is None.
# If parent is set, we *do not* call the super save method. Instead,
# we add_child() the parent node and remove the __parent reference.
if self.__parent is not None:
assert self.id is None
self.__save_to_parent(*args, **kwargs)
# Now we do not set an explicit parent, but it would be required to save
# the model anyway. We ask for the default parent page and proceed
elif self.pk is None and not self.path:
self.__parent = self.get_default_parent()
self.__save_to_parent(*args, **kwargs)
else:
kwargs.setdefault('using', self.__db)
super().save(*args, **kwargs)
# Save any children nodes, if they exist.
if self.__children:
with transaction.atomic():
for add_child in self.__children:
self.add_child(**add_child)
# Clean temporary fields
if self.__db:
del self.__db
if self.__parent:
del self.__parent
if self.__children:
del self.__children
def prepare_create(self):
"""
Called just before saving an element without a pk.
This method fills up any required fields that were not set up during
initialization.
"""
self.slug = self.slug or slugify(self.title)
def prepare_save(self):
"""
Called just before saving an element with a pk.
This method fills up any required fields that were not set up during
initialization.
"""
self.slug = self.slug or slugify(self.title)
if self.depth is None and self.path:
self.depth = len(self.path) // 4
elif self.depth is None:
self.depth = 0
def add_child(self, **kwargs):
"""
Add a new child element in the page.
This method accepts the insertion of child nodes even before the page
gains a pk attribute. This is done by defering the operation to a
temporary list. Everything is then saved to the database when the
save() method is called.
"""
if self.pk is None:
if 'instance' not in kwargs:
raise ValueError('must specify an instance!')
self.__children += kwargs,
else:
super().add_child(**kwargs)
def get_template(self, request, *args, **kwargs):
template = super().get_template(request, *args, **kwargs)
if template.endswith('.html'):
return template[:-5] + '.jinja2'
return template
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
# Create an alias to "page" context variable to all page super classes
for model in type(self).mro():
if model not in BASE_CLASSES_BLACKLIST:
if issubclass(model, Page):
context.setdefault(model.__name__.lower(), self)
context.setdefault('object', self)
# Sets the content color variable
context.setdefault('content_color', self.content_color)
# Sets model name and plural
meta = self._meta
context.setdefault('verbose_name', meta.verbose_name)
context.setdefault('verbose_name_plural', meta.verbose_name_plural)
#TODO: this is an ugly hack that should go away!
obj = DetailObject(None)
obj.object = self
context.setdefault('detail_object', obj)
description = getattr(self, 'long_description_html', None)
if description:
context['description'] = description
return context
def get_absolute_url(self):
return self.url
class CodeschoolProxyPage(CodeschoolPageMixin, MigrateMixin, Page):
"""
A base class for all codeschool page types that are proxies from wagtail's
Page model.
"""
class Meta:
proxy = True
app_label = 'cs_core'
parent_page_types = []
class CodeschoolPage(CodeschoolPageMixin, MigrateMixin, Page):
"""
Base class for all codeschool page types.
This abstract class makes a few tweaks to Wagtail's default behavior.
"""
class Meta:
abstract = True
page_ptr = models.OneToOneField(
Page,
parent_link=True,
related_name='%(class)s_instance',
)
content_color = models.CharField(
_('color'),
max_length=20,
default="#10A2A4",
help_text=_('Personalize the main color for page content.'),
)
class ShortDescribablePage(CodeschoolPage):
"""
A describable page model that only adds the short_description field,
leaving the long_description/body definition to the user.
"""
class Meta:
abstract = True
short_description = models.CharField(
_('short description'),
max_length=140,
blank=True,
help_text=_('A very brief one-phrase description used in listings.'),
)
short_description_html = property(lambda x: markdown(x.short_description))
def save(self, *args, **kwargs):
self.seo_title = self.seo_title or self.short_description
return super().save(*args, **kwargs)
# Wagtail admin configurations
content_panels = CodeschoolPage.content_panels + [
FieldPanel('short_description'),
]
class DescribablePage(ShortDescribablePage):
"""
A describable model that inherits from a wagtail's Page model and uses a
RichTextField for its long_description field.
"""
class Meta:
abstract = True
body = RichTextField(
_('long description'),
blank=True,
help_text=_('A detailed explanation.')
)
# Html expansion of descriptions
#long_description = property(lambda x: html_to_markdown(x.body))
long_description = property(lambda x: x.body)
@long_description.setter
def long_description(self, value):
self.body = markdown(value)
long_description_html = property(lambda x: x.body)
# Wagtail admin configurations
content_panels = ShortDescribablePage.content_panels + [
FieldPanel('body', classname="full"),
]
class RootList(CodeschoolPageMixin, Page):
"""
Base class for all pages used as a root page in listings.
"""
class Meta:
proxy = True
app_label = 'cs_core'
short_description = ''
short_description_html = ''
long_description = ''
long_description_html = ''
def get_context(self, request, *args, **kwargs):
context = super().get_context(self, request, *args, **kwargs)
context['object_list'] = (x.specific for x in self.get_children())
return context
BASE_CLASSES_BLACKLIST = {
RootList, DescribablePage, ShortDescribablePage, CodeschoolPage,
CodeschoolPageMixin, PageSerializationMixin, CodeschoolProxyPage
}
|
gpl-3.0
|
bureau14/qdb-benchmark
|
thirdparty/boost/libs/python/test/test_pointer_adoption.py
|
46
|
1708
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from test_pointer_adoption_ext import *
>>> num_a_instances()
0
>>> a = create('dynamically allocated')
>>> num_a_instances()
1
>>> a.content()
'dynamically allocated'
>>> innards = a.get_inner()
>>> innards.change('with an exposed reference')
>>> a.content()
'with an exposed reference'
# The a instance should be kept alive...
>>> a = None
>>> num_a_instances()
1
# ...until we're done with its innards
>>> innards = None
>>> num_a_instances()
0
>>> b = B()
>>> a = create('another')
>>> b.a_content()
'empty'
>>> innards = b.adopt(a);
>>> b.a_content()
'another'
>>> num_a_instances()
1
>>> del a # innards and b are both holding a reference
>>> num_a_instances()
1
>>> innards.change('yet another')
>>> b.a_content()
'yet another'
>>> del innards
>>> num_a_instances() # b still owns a reference to a
1
>>> del b
>>> num_a_instances()
0
Test call policies for constructors here
>>> a = create('second a')
>>> num_a_instances()
1
>>> b = B(a)
>>> num_a_instances()
1
>>> a.content()
'second a'
>>> del a
>>> num_a_instances()
1
>>> b.a_content()
'second a'
>>> del b
>>> num_a_instances()
0
>>> assert as_A(create('dynalloc')) is not None
>>> base = Base()
>>> assert as_A(base) is None
"""
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
bsd-2-clause
|
adamtiger/tensorflow
|
tensorflow/contrib/distributions/python/ops/bijectors/weibull.py
|
18
|
1141
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Weibull bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.distributions.python.ops.bijectors.weibull_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["Weibull"]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
tlatzko/spmcluster
|
.tox/2.7-cover/lib/python2.7/site-packages/pip/_vendor/html5lib/ihatexml.py
|
1727
|
16581
|
from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
|
bsd-2-clause
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-64/09-modules/myenv/lib/python2.7/site-packages/django/apps/config.py
|
121
|
8077
|
import os
from importlib import import_module
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils._os import upath
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
|
gpl-3.0
|
FICTURE7/youtube-dl
|
youtube_dl/extractor/tvigle.py
|
117
|
3921
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
parse_age_limit,
)
class TvigleIE(InfoExtractor):
IE_NAME = 'tvigle'
IE_DESC = 'Интернет-телевидение Tvigle.ru'
_VALID_URL = r'https?://(?:www\.)?(?:tvigle\.ru/(?:[^/]+/)+(?P<display_id>[^/]+)/$|cloud\.tvigle\.ru/video/(?P<id>\d+))'
_TESTS = [
{
'url': 'http://www.tvigle.ru/video/sokrat/',
'md5': '36514aed3657d4f70b4b2cef8eb520cd',
'info_dict': {
'id': '1848932',
'display_id': 'sokrat',
'ext': 'flv',
'title': 'Сократ',
'description': 'md5:d6b92ffb7217b4b8ebad2e7665253c17',
'duration': 6586,
'age_limit': 12,
},
'skip': 'georestricted',
},
{
'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/',
'md5': 'e7efe5350dd5011d0de6550b53c3ba7b',
'info_dict': {
'id': '5142516',
'ext': 'flv',
'title': 'Ведущий телепрограммы «60 минут» (США) о Владимире Высоцком',
'description': 'md5:027f7dc872948f14c96d19b4178428a4',
'duration': 186.080,
'age_limit': 0,
},
'skip': 'georestricted',
}, {
'url': 'https://cloud.tvigle.ru/video/5267604/',
'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
if not video_id:
webpage = self._download_webpage(url, display_id)
video_id = self._html_search_regex(
r'class="video-preview current_playing" id="(\d+)">',
webpage, 'video id')
video_data = self._download_json(
'http://cloud.tvigle.ru/api/play/video/%s/' % video_id, display_id)
item = video_data['playlist']['items'][0]
videos = item.get('videos')
error_message = item.get('errorMessage')
if not videos and error_message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
title = item['title']
description = item.get('description')
thumbnail = item.get('thumbnail')
duration = float_or_none(item.get('durationMilliseconds'), 1000)
age_limit = parse_age_limit(item.get('ageRestrictions'))
formats = []
for vcodec, fmts in item['videos'].items():
for format_id, video_url in fmts.items():
if format_id == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id=vcodec))
continue
height = self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)
formats.append({
'url': video_url,
'format_id': '%s-%s' % (vcodec, format_id),
'vcodec': vcodec,
'height': int_or_none(height),
'filesize': int_or_none(item.get('video_files_size', {}).get(vcodec, {}).get(format_id)),
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': age_limit,
'formats': formats,
}
|
unlicense
|
bsmurphy/PyKrige
|
pykrige/core.py
|
1
|
30314
|
# coding: utf-8
"""
PyKrige
=======
Code by Benjamin S. Murphy and the PyKrige Developers
bscott.murphy@gmail.com
Summary
-------
Methods used by multiple classes.
References
----------
[1] P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology,
(Cambridge University Press, 1997) 272 p.
[2] T. Vincenty, Direct and Inverse Solutions of Geodesics on the Ellipsoid
with Application of Nested Equations, Survey Review 23 (176),
(Directorate of Overseas Survey, Kingston Road, Tolworth, Surrey 1975)
Copyright (c) 2015-2020, PyKrige Developers
"""
import numpy as np
from scipy.spatial.distance import pdist, squareform, cdist
from scipy.optimize import least_squares
import scipy.linalg as spl
eps = 1.0e-10 # Cutoff for comparison to zero
P_INV = {"pinv": spl.pinv, "pinv2": spl.pinv2, "pinvh": spl.pinvh}
def great_circle_distance(lon1, lat1, lon2, lat2):
"""Calculate the great circle distance between one or multiple pairs of
points given in spherical coordinates. Spherical coordinates are expected
in degrees. Angle definition follows standard longitude/latitude definition.
This uses the arctan version of the great-circle distance function
(en.wikipedia.org/wiki/Great-circle_distance) for increased
numerical stability.
Parameters
----------
lon1: float scalar or numpy array
Longitude coordinate(s) of the first element(s) of the point
pair(s), given in degrees.
lat1: float scalar or numpy array
Latitude coordinate(s) of the first element(s) of the point
pair(s), given in degrees.
lon2: float scalar or numpy array
Longitude coordinate(s) of the second element(s) of the point
pair(s), given in degrees.
lat2: float scalar or numpy array
Latitude coordinate(s) of the second element(s) of the point
pair(s), given in degrees.
Calculation of distances follows numpy elementwise semantics, so if
an array of length N is passed, all input parameters need to be
arrays of length N or scalars.
Returns
-------
distance: float scalar or numpy array
The great circle distance(s) (in degrees) between the
given pair(s) of points.
"""
# Convert to radians:
lat1 = np.array(lat1) * np.pi / 180.0
lat2 = np.array(lat2) * np.pi / 180.0
dlon = (lon1 - lon2) * np.pi / 180.0
# Evaluate trigonometric functions that need to be evaluated more
# than once:
c1 = np.cos(lat1)
s1 = np.sin(lat1)
c2 = np.cos(lat2)
s2 = np.sin(lat2)
cd = np.cos(dlon)
# This uses the arctan version of the great-circle distance function
# from en.wikipedia.org/wiki/Great-circle_distance for increased
# numerical stability.
# Formula can be obtained from [2] combining eqns. (14)-(16)
# for spherical geometry (f=0).
return (
180.0
/ np.pi
* np.arctan2(
np.sqrt((c2 * np.sin(dlon)) ** 2 + (c1 * s2 - s1 * c2 * cd) ** 2),
s1 * s2 + c1 * c2 * cd,
)
)
def euclid3_to_great_circle(euclid3_distance):
"""Convert euclidean distance between points on a unit sphere to
the corresponding great circle distance.
Parameters
----------
euclid3_distance: float scalar or numpy array
The euclidean three-space distance(s) between points on a
unit sphere, thus between [0,2].
Returns
-------
great_circle_dist: float scalar or numpy array
The corresponding great circle distance(s) between the points.
"""
# Eliminate some possible numerical errors:
euclid3_distance[euclid3_distance > 2.0] = 2.0
return 180.0 - 360.0 / np.pi * np.arccos(0.5 * euclid3_distance)
def _adjust_for_anisotropy(X, center, scaling, angle):
"""Adjusts data coordinates to take into account anisotropy.
Can also be used to take into account data scaling. Angles are CCW about
specified axes. Scaling is applied in rotated coordinate system.
Parameters
----------
X : ndarray
float array [n_samples, n_dim], the input array of coordinates
center : ndarray
float array [n_dim], the coordinate of centers
scaling : ndarray
float array [n_dim - 1], the scaling of last two dimensions
angle : ndarray
float array [2*n_dim - 3], the anisotropy angle (degrees)
Returns
-------
X_adj : ndarray
float array [n_samples, n_dim], the X array adjusted for anisotropy.
"""
center = np.asarray(center)[None, :]
angle = np.asarray(angle) * np.pi / 180
X -= center
Ndim = X.shape[1]
if Ndim == 1:
raise NotImplementedError("Not implemnented yet?")
elif Ndim == 2:
stretch = np.array([[1, 0], [0, scaling[0]]])
rot_tot = np.array(
[
[np.cos(-angle[0]), -np.sin(-angle[0])],
[np.sin(-angle[0]), np.cos(-angle[0])],
]
)
elif Ndim == 3:
stretch = np.array(
[[1.0, 0.0, 0.0], [0.0, scaling[0], 0.0], [0.0, 0.0, scaling[1]]]
)
rotate_x = np.array(
[
[1.0, 0.0, 0.0],
[0.0, np.cos(-angle[0]), -np.sin(-angle[0])],
[0.0, np.sin(-angle[0]), np.cos(-angle[0])],
]
)
rotate_y = np.array(
[
[np.cos(-angle[1]), 0.0, np.sin(-angle[1])],
[0.0, 1.0, 0.0],
[-np.sin(-angle[1]), 0.0, np.cos(-angle[1])],
]
)
rotate_z = np.array(
[
[np.cos(-angle[2]), -np.sin(-angle[2]), 0.0],
[np.sin(-angle[2]), np.cos(-angle[2]), 0.0],
[0.0, 0.0, 1.0],
]
)
rot_tot = np.dot(rotate_z, np.dot(rotate_y, rotate_x))
else:
raise ValueError(
"Adjust for anisotropy function doesn't support ND spaces where N>3"
)
X_adj = np.dot(stretch, np.dot(rot_tot, X.T)).T
X_adj += center
return X_adj
def _make_variogram_parameter_list(variogram_model, variogram_model_parameters):
"""Converts the user input for the variogram model parameters into the
format expected in the rest of the code.
Makes a list of variogram model parameters in the expected order if the
user has provided the model parameters. If not, returns None, which
will ensure that the automatic variogram estimation routine is
triggered.
Parameters
----------
variogram_model : str
specifies the variogram model type
variogram_model_parameters : list, dict, or None
parameters provided by the user, can also be None if the user
did not specify the variogram model parameters; if None,
this function returns None, that way the automatic variogram
estimation routine will kick in down the road...
Returns
-------
parameter_list : list
variogram model parameters stored in a list in the expected order;
if variogram_model is 'custom', model parameters should already
be encapsulated in a list, so the list is returned unaltered;
if variogram_model_parameters was not specified by the user,
None is returned; order for internal variogram models is as follows...
linear - [slope, nugget]
power - [scale, exponent, nugget]
gaussian - [psill, range, nugget]
spherical - [psill, range, nugget]
exponential - [psill, range, nugget]
hole-effect - [psill, range, nugget]
"""
if variogram_model_parameters is None:
parameter_list = None
elif type(variogram_model_parameters) is dict:
if variogram_model in ["linear"]:
if (
"slope" not in variogram_model_parameters.keys()
or "nugget" not in variogram_model_parameters.keys()
):
raise KeyError(
"'linear' variogram model requires 'slope' "
"and 'nugget' specified in variogram model "
"parameter dictionary."
)
else:
parameter_list = [
variogram_model_parameters["slope"],
variogram_model_parameters["nugget"],
]
elif variogram_model in ["power"]:
if (
"scale" not in variogram_model_parameters.keys()
or "exponent" not in variogram_model_parameters.keys()
or "nugget" not in variogram_model_parameters.keys()
):
raise KeyError(
"'power' variogram model requires 'scale', "
"'exponent', and 'nugget' specified in "
"variogram model parameter dictionary."
)
else:
parameter_list = [
variogram_model_parameters["scale"],
variogram_model_parameters["exponent"],
variogram_model_parameters["nugget"],
]
elif variogram_model in ["gaussian", "spherical", "exponential", "hole-effect"]:
if (
"range" not in variogram_model_parameters.keys()
or "nugget" not in variogram_model_parameters.keys()
):
raise KeyError(
"'%s' variogram model requires 'range', "
"'nugget', and either 'sill' or 'psill' "
"specified in variogram model parameter "
"dictionary." % variogram_model
)
else:
if "sill" in variogram_model_parameters.keys():
parameter_list = [
variogram_model_parameters["sill"]
- variogram_model_parameters["nugget"],
variogram_model_parameters["range"],
variogram_model_parameters["nugget"],
]
elif "psill" in variogram_model_parameters.keys():
parameter_list = [
variogram_model_parameters["psill"],
variogram_model_parameters["range"],
variogram_model_parameters["nugget"],
]
else:
raise KeyError(
"'%s' variogram model requires either "
"'sill' or 'psill' specified in "
"variogram model parameter "
"dictionary." % variogram_model
)
elif variogram_model in ["custom"]:
raise TypeError(
"For user-specified custom variogram model, "
"parameters must be specified in a list, "
"not a dict."
)
else:
raise ValueError(
"Specified variogram model must be one of the "
"following: 'linear', 'power', 'gaussian', "
"'spherical', 'exponential', 'hole-effect', "
"'custom'."
)
elif type(variogram_model_parameters) is list:
if variogram_model in ["linear"]:
if len(variogram_model_parameters) != 2:
raise ValueError(
"Variogram model parameter list must have "
"exactly two entries when variogram model "
"set to 'linear'."
)
parameter_list = variogram_model_parameters
elif variogram_model in ["power"]:
if len(variogram_model_parameters) != 3:
raise ValueError(
"Variogram model parameter list must have "
"exactly three entries when variogram model "
"set to 'power'."
)
parameter_list = variogram_model_parameters
elif variogram_model in ["gaussian", "spherical", "exponential", "hole-effect"]:
if len(variogram_model_parameters) != 3:
raise ValueError(
"Variogram model parameter list must have "
"exactly three entries when variogram model "
"set to '%s'." % variogram_model
)
parameter_list = [
variogram_model_parameters[0] - variogram_model_parameters[2],
variogram_model_parameters[1],
variogram_model_parameters[2],
]
elif variogram_model in ["custom"]:
parameter_list = variogram_model_parameters
else:
raise ValueError(
"Specified variogram model must be one of the "
"following: 'linear', 'power', 'gaussian', "
"'spherical', 'exponential', 'hole-effect', "
"'custom'."
)
else:
raise TypeError(
"Variogram model parameters must be provided in either "
"a list or a dict when they are explicitly specified."
)
return parameter_list
def _initialize_variogram_model(
X,
y,
variogram_model,
variogram_model_parameters,
variogram_function,
nlags,
weight,
coordinates_type,
):
"""Initializes the variogram model for kriging. If user does not specify
parameters, calls automatic variogram estimation routine.
Returns lags, semivariance, and variogram model parameters.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of values to be kriged
variogram_model: str
user-specified variogram model to use
variogram_model_parameters: list
user-specified parameters for variogram model
variogram_function: callable
function that will be called to evaluate variogram model
(only used if user does not specify variogram model parameters)
nlags: int
integer scalar, number of bins into which to group inter-point distances
weight: bool
boolean flag that indicates whether the semivariances at smaller lags
should be weighted more heavily in the automatic variogram estimation
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
lags: ndarray
float array [nlags], distance values for bins into which the
semivariances were grouped
semivariance: ndarray
float array [nlags], averaged semivariance for each bin
variogram_model_parameters: list
parameters for the variogram model, either returned unaffected if the
user specified them or returned from the automatic variogram
estimation routine
"""
# distance calculation for rectangular coords now leverages
# scipy.spatial.distance's pdist function, which gives pairwise distances
# in a condensed distance vector (distance matrix flattened to a vector)
# to calculate semivariances...
if coordinates_type == "euclidean":
d = pdist(X, metric="euclidean")
g = 0.5 * pdist(y[:, None], metric="sqeuclidean")
# geographic coordinates only accepted if the problem is 2D
# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat
# old method of distance calculation is retained here...
# could be improved in the future
elif coordinates_type == "geographic":
if X.shape[1] != 2:
raise ValueError(
"Geographic coordinate type only supported for 2D datasets."
)
x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True)
y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True)
z1, z2 = np.meshgrid(y, y, sparse=True)
d = great_circle_distance(x1, y1, x2, y2)
g = 0.5 * (z1 - z2) ** 2.0
indices = np.indices(d.shape)
d = d[(indices[0, :, :] > indices[1, :, :])]
g = g[(indices[0, :, :] > indices[1, :, :])]
else:
raise ValueError(
"Specified coordinate type '%s' is not supported." % coordinates_type
)
# Equal-sized bins are now implemented. The upper limit on the bins
# is appended to the list (instead of calculated as part of the
# list comprehension) to avoid any numerical oddities
# (specifically, say, ending up as 0.99999999999999 instead of 1.0).
# Appending dmax + 0.001 ensures that the largest distance value
# is included in the semivariogram calculation.
dmax = np.amax(d)
dmin = np.amin(d)
dd = (dmax - dmin) / nlags
bins = [dmin + n * dd for n in range(nlags)]
dmax += 0.001
bins.append(dmax)
# This old binning method was experimental and doesn't seem
# to work too well. Bins were computed such that there are more
# at shorter lags. This effectively weights smaller distances more
# highly in determining the variogram. As Kitanidis points out,
# the variogram fit to the data at smaller lag distances is more
# important. However, the value at the largest lag probably ends up
# being biased too high for the larger values and thereby throws off
# automatic variogram calculation and confuses comparison of the
# semivariogram with the variogram model.
#
# dmax = np.amax(d)
# dmin = np.amin(d)
# dd = dmax - dmin
# bins = [dd*(0.5**n) + dmin for n in range(nlags, 1, -1)]
# bins.insert(0, dmin)
# bins.append(dmax)
lags = np.zeros(nlags)
semivariance = np.zeros(nlags)
for n in range(nlags):
# This 'if... else...' statement ensures that there are data
# in the bin so that numpy can actually find the mean. If we
# don't test this first, then Python kicks out an annoying warning
# message when there is an empty bin and we try to calculate the mean.
if d[(d >= bins[n]) & (d < bins[n + 1])].size > 0:
lags[n] = np.mean(d[(d >= bins[n]) & (d < bins[n + 1])])
semivariance[n] = np.mean(g[(d >= bins[n]) & (d < bins[n + 1])])
else:
lags[n] = np.nan
semivariance[n] = np.nan
lags = lags[~np.isnan(semivariance)]
semivariance = semivariance[~np.isnan(semivariance)]
# a few tests the make sure that, if the variogram_model_parameters
# are supplied, they have been supplied as expected...
# if variogram_model_parameters was not defined, then estimate the variogram
if variogram_model_parameters is not None:
if variogram_model == "linear" and len(variogram_model_parameters) != 2:
raise ValueError(
"Exactly two parameters required for linear variogram model."
)
elif (
variogram_model
in ["power", "spherical", "exponential", "gaussian", "hole-effect"]
and len(variogram_model_parameters) != 3
):
raise ValueError(
"Exactly three parameters required for "
"%s variogram model" % variogram_model
)
else:
if variogram_model == "custom":
raise ValueError(
"Variogram parameters must be specified when "
"implementing custom variogram model."
)
else:
variogram_model_parameters = _calculate_variogram_model(
lags, semivariance, variogram_model, variogram_function, weight
)
return lags, semivariance, variogram_model_parameters
def _variogram_residuals(params, x, y, variogram_function, weight):
"""Function used in variogram model estimation. Returns residuals between
calculated variogram and actual data (lags/semivariance).
Called by _calculate_variogram_model.
Parameters
----------
params: list or 1D array
parameters for calculating the model variogram
x: ndarray
lags (distances) at which to evaluate the model variogram
y: ndarray
experimental semivariances at the specified lags
variogram_function: callable
the actual funtion that evaluates the model variogram
weight: bool
flag for implementing the crude weighting routine, used in order to
fit smaller lags better
Returns
-------
resid: 1d array
residuals, dimension same as y
"""
# this crude weighting routine can be used to better fit the model
# variogram to the experimental variogram at smaller lags...
# the weights are calculated from a logistic function, so weights at small
# lags are ~1 and weights at the longest lags are ~0;
# the center of the logistic weighting is hard-coded to be at 70% of the
# distance from the shortest lag to the largest lag
if weight:
drange = np.amax(x) - np.amin(x)
k = 2.1972 / (0.1 * drange)
x0 = 0.7 * drange + np.amin(x)
weights = 1.0 / (1.0 + np.exp(-k * (x0 - x)))
weights /= np.sum(weights)
resid = (variogram_function(params, x) - y) * weights
else:
resid = variogram_function(params, x) - y
return resid
def _calculate_variogram_model(
lags, semivariance, variogram_model, variogram_function, weight
):
"""Function that fits a variogram model when parameters are not specified.
Returns variogram model parameters that minimize the RMSE between the
specified variogram function and the actual calculated variogram points.
Parameters
----------
lags: 1d array
binned lags/distances to use for variogram model parameter estimation
semivariance: 1d array
binned/averaged experimental semivariances to use for variogram model
parameter estimation
variogram_model: str/unicode
specified variogram model to use for parameter estimation
variogram_function: callable
the actual funtion that evaluates the model variogram
weight: bool
flag for implementing the crude weighting routine, used in order to fit
smaller lags better this is passed on to the residual calculation
cfunction, where weighting is actually applied...
Returns
-------
res: list
list of estimated variogram model parameters
NOTE that the estimation routine works in terms of the partial sill
(psill = sill - nugget) -- setting bounds such that psill > 0 ensures that
the sill will always be greater than the nugget...
"""
if variogram_model == "linear":
x0 = [
(np.amax(semivariance) - np.amin(semivariance))
/ (np.amax(lags) - np.amin(lags)),
np.amin(semivariance),
]
bnds = ([0.0, 0.0], [np.inf, np.amax(semivariance)])
elif variogram_model == "power":
x0 = [
(np.amax(semivariance) - np.amin(semivariance))
/ (np.amax(lags) - np.amin(lags)),
1.1,
np.amin(semivariance),
]
bnds = ([0.0, 0.001, 0.0], [np.inf, 1.999, np.amax(semivariance)])
else:
x0 = [
np.amax(semivariance) - np.amin(semivariance),
0.25 * np.amax(lags),
np.amin(semivariance),
]
bnds = (
[0.0, 0.0, 0.0],
[10.0 * np.amax(semivariance), np.amax(lags), np.amax(semivariance)],
)
# use 'soft' L1-norm minimization in order to buffer against
# potential outliers (weird/skewed points)
res = least_squares(
_variogram_residuals,
x0,
bounds=bnds,
loss="soft_l1",
args=(lags, semivariance, variogram_function, weight),
)
return res.x
def _krige(
X,
y,
coords,
variogram_function,
variogram_model_parameters,
coordinates_type,
pseudo_inv=False,
):
"""Sets up and solves the ordinary kriging system for the given
coordinate pair. This function is only used for the statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
coords: ndarray
float array [1, n_dim], point at which to evaluate the kriging system
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
Returns
-------
zinterp: float
kriging estimate at the specified point
sigmasq: float
mean square error of the kriging estimate
"""
zero_index = None
zero_value = False
# calculate distance between points... need a square distance matrix
# of inter-measurement-point distances and a vector of distances between
# measurement points (X) and the kriging point (coords)
if coordinates_type == "euclidean":
d = squareform(pdist(X, metric="euclidean"))
bd = np.squeeze(cdist(X, coords[None, :], metric="euclidean"))
# geographic coordinate distances still calculated in the old way...
# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat
# also assume problem is 2D; check done earlier in initializing variogram
elif coordinates_type == "geographic":
x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True)
y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True)
d = great_circle_distance(x1, y1, x2, y2)
bd = great_circle_distance(
X[:, 0],
X[:, 1],
coords[0] * np.ones(X.shape[0]),
coords[1] * np.ones(X.shape[0]),
)
# this check is done when initializing variogram, but kept here anyways...
else:
raise ValueError(
"Specified coordinate type '%s' is not supported." % coordinates_type
)
# check if kriging point overlaps with measurement point
if np.any(np.absolute(bd) <= 1e-10):
zero_value = True
zero_index = np.where(bd <= 1e-10)[0][0]
# set up kriging matrix
n = X.shape[0]
a = np.zeros((n + 1, n + 1))
a[:n, :n] = -variogram_function(variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
# set up RHS
b = np.zeros((n + 1, 1))
b[:n, 0] = -variogram_function(variogram_model_parameters, bd)
if zero_value:
b[zero_index, 0] = 0.0
b[n, 0] = 1.0
# solve
if pseudo_inv:
res = np.linalg.lstsq(a, b, rcond=None)[0]
else:
res = np.linalg.solve(a, b)
zinterp = np.sum(res[:n, 0] * y)
sigmasq = np.sum(res[:, 0] * -b[:, 0])
return zinterp, sigmasq
def _find_statistics(
X,
y,
variogram_function,
variogram_model_parameters,
coordinates_type,
pseudo_inv=False,
):
"""Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
pseudo_inv : :class:`bool`, optional
Whether the kriging system is solved with the pseudo inverted
kriging matrix. If `True`, this leads to more numerical stability
and redundant points are averaged. But it can take more time.
Default: False
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
"""
delta = np.zeros(y.shape)
sigma = np.zeros(y.shape)
for i in range(y.shape[0]):
# skip the first value in the kriging problem
if i == 0:
continue
else:
k, ss = _krige(
X[:i, :],
y[:i],
X[i, :],
variogram_function,
variogram_model_parameters,
coordinates_type,
pseudo_inv,
)
# if the estimation error is zero, it's probably because
# the evaluation point X[i, :] is really close to one of the
# kriging system points in X[:i, :]...
# in the case of zero estimation error, the results are not stored
if np.absolute(ss) < eps:
continue
delta[i] = y[i] - k
sigma[i] = np.sqrt(ss)
# only use non-zero entries in these arrays... sigma is used to pull out
# non-zero entries in both cases because it is guaranteed to be positive,
# whereas delta can be either positive or negative
delta = delta[sigma > eps]
sigma = sigma[sigma > eps]
epsilon = delta / sigma
return delta, sigma, epsilon
def calcQ1(epsilon):
"""Returns the Q1 statistic for the variogram fit (see [1])."""
return abs(np.sum(epsilon) / (epsilon.shape[0] - 1))
def calcQ2(epsilon):
"""Returns the Q2 statistic for the variogram fit (see [1])."""
return np.sum(epsilon ** 2) / (epsilon.shape[0] - 1)
def calc_cR(Q2, sigma):
"""Returns the cR statistic for the variogram fit (see [1])."""
return Q2 * np.exp(np.sum(np.log(sigma ** 2)) / sigma.shape[0])
|
bsd-3-clause
|
getredash/redash
|
redash/utils/__init__.py
|
1
|
7622
|
import codecs
import io
import csv
import datetime
import decimal
import hashlib
import os
import random
import re
import uuid
import binascii
import pystache
import pytz
import simplejson
import sqlparse
from flask import current_app
from funcy import select_values
from redash import settings
from sqlalchemy.orm.query import Query
from .human_time import parse_human_time
COMMENTS_REGEX = re.compile("/\*.*?\*/")
WRITER_ENCODING = os.environ.get("REDASH_CSV_WRITER_ENCODING", "utf-8")
WRITER_ERRORS = os.environ.get("REDASH_CSV_WRITER_ERRORS", "strict")
def utcnow():
"""Return datetime.now value with timezone specified.
Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,
which leads to errors in calculations.
"""
return datetime.datetime.now(pytz.utc)
def dt_from_timestamp(timestamp, tz_aware=True):
timestamp = datetime.datetime.utcfromtimestamp(float(timestamp))
if tz_aware:
timestamp = timestamp.replace(tzinfo=pytz.utc)
return timestamp
def slugify(s):
return re.sub("[^a-z0-9_\-]+", "-", s.lower())
def gen_query_hash(sql):
"""Return hash of the given query after stripping all comments, line breaks
and multiple spaces, and lower casing all text.
TODO: possible issue - the following queries will get the same id:
1. SELECT 1 FROM table WHERE column='Value';
2. SELECT 1 FROM table where column='value';
"""
sql = COMMENTS_REGEX.sub("", sql)
sql = "".join(sql.split()).lower()
return hashlib.md5(sql.encode("utf-8")).hexdigest()
def generate_token(length):
chars = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"
rand = random.SystemRandom()
return "".join(rand.choice(chars) for x in range(length))
class JSONEncoder(simplejson.JSONEncoder):
"""Adapter for `simplejson.dumps`."""
def default(self, o):
# Some SQLAlchemy collections are lazy.
if isinstance(o, Query):
result = list(o)
elif isinstance(o, decimal.Decimal):
result = float(o)
elif isinstance(o, (datetime.timedelta, uuid.UUID)):
result = str(o)
# See "Date Time String Format" in the ECMA-262 specification.
elif isinstance(o, datetime.datetime):
result = o.isoformat()
if o.microsecond:
result = result[:23] + result[26:]
if result.endswith("+00:00"):
result = result[:-6] + "Z"
elif isinstance(o, datetime.date):
result = o.isoformat()
elif isinstance(o, datetime.time):
if o.utcoffset() is not None:
raise ValueError("JSON can't represent timezone-aware times.")
result = o.isoformat()
if o.microsecond:
result = result[:12]
elif isinstance(o, memoryview):
result = binascii.hexlify(o).decode()
elif isinstance(o, bytes):
result = binascii.hexlify(o).decode()
else:
result = super(JSONEncoder, self).default(o)
return result
def json_loads(data, *args, **kwargs):
"""A custom JSON loading function which passes all parameters to the
simplejson.loads function."""
return simplejson.loads(data, *args, **kwargs)
def json_dumps(data, *args, **kwargs):
"""A custom JSON dumping function which passes all parameters to the
simplejson.dumps function."""
kwargs.setdefault("cls", JSONEncoder)
kwargs.setdefault("encoding", None)
# Float value nan or inf in Python should be render to None or null in json.
# Using ignore_nan = False will make Python render nan as NaN, leading to parse error in front-end
kwargs.setdefault('ignore_nan', True)
return simplejson.dumps(data, *args, **kwargs)
def mustache_render(template, context=None, **kwargs):
renderer = pystache.Renderer(escape=lambda u: u)
return renderer.render(template, context, **kwargs)
def build_url(request, host, path):
parts = request.host.split(":")
if len(parts) > 1:
port = parts[1]
if (port, request.scheme) not in (("80", "http"), ("443", "https")):
host = "{}:{}".format(host, port)
return "{}://{}{}".format(request.scheme, host, path)
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding=WRITER_ENCODING, **kwds):
# Redirect output to a queue
self.queue = io.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def _encode_utf8(self, val):
if isinstance(val, str):
return val.encode(WRITER_ENCODING, WRITER_ERRORS)
return val
def writerow(self, row):
self.writer.writerow([self._encode_utf8(s) for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode(WRITER_ENCODING)
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def collect_parameters_from_request(args):
parameters = {}
for k, v in args.items():
if k.startswith("p_"):
parameters[k[2:]] = v
return parameters
def base_url(org):
if settings.MULTI_ORG:
return "https://{}/{}".format(settings.HOST, org.slug)
return settings.HOST
def filter_none(d):
return select_values(lambda v: v is not None, d)
def to_filename(s):
s = re.sub('[<>:"\\\/|?*]+', " ", s, flags=re.UNICODE)
s = re.sub("\s+", "_", s, flags=re.UNICODE)
return s.strip("_")
def deprecated():
def wrapper(K):
setattr(K, "deprecated", True)
return K
return wrapper
def render_template(path, context):
""" Render a template with context, without loading the entire app context.
Using Flask's `render_template` function requires the entire app context to load, which in turn triggers any
function decorated with the `context_processor` decorator, which is not explicitly required for rendering purposes.
"""
return current_app.jinja_env.get_template(path).render(**context)
def query_is_select_no_limit(query):
parsed_query = sqlparse.parse(query)[0]
last_keyword_idx = find_last_keyword_idx(parsed_query)
# Either invalid query or query that is not select
if last_keyword_idx == -1 or parsed_query.tokens[0].value.upper() != "SELECT":
return False
no_limit = parsed_query.tokens[last_keyword_idx].value.upper() != "LIMIT" \
and parsed_query.tokens[last_keyword_idx].value.upper() != "OFFSET"
return no_limit
def find_last_keyword_idx(parsed_query):
for i in reversed(range(len(parsed_query.tokens))):
if parsed_query.tokens[i].ttype in sqlparse.tokens.Keyword:
return i
return -1
def add_limit_to_query(query):
parsed_query = sqlparse.parse(query)[0]
limit_tokens = sqlparse.parse(" LIMIT 1000")[0].tokens
length = len(parsed_query.tokens)
if parsed_query.tokens[length - 1].ttype == sqlparse.tokens.Punctuation:
parsed_query.tokens[length - 1:length - 1] = limit_tokens
else:
parsed_query.tokens += limit_tokens
return str(parsed_query)
|
bsd-2-clause
|
jianglu/mojo
|
gpu/gles2_conform_support/generate_gles2_conform_tests.py
|
139
|
1430
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for OpenGL ES 2.0 conformance tests."""
import os
import re
import sys
def ReadFileAsLines(filename):
"""Reads a file, removing blank lines and lines that start with #"""
file = open(filename, "r")
raw_lines = file.readlines()
file.close()
lines = []
for line in raw_lines:
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
lines.append(line)
return lines
def GenerateTests(file):
"""Generates gles2_conform_test_autogen.cc"""
tests = ReadFileAsLines(
"../../third_party/gles2_conform/GTF_ES/glsl/GTF/mustpass_es20.run")
file.write("""
#include "gpu/gles2_conform_support/gles2_conform_test.h"
#include "testing/gtest/include/gtest/gtest.h"
""")
for test in tests:
file.write("""
TEST(GLES2ConformTest, %(name)s) {
EXPECT_TRUE(RunGLES2ConformTest("%(path)s"));
}
""" % {
"name": re.sub(r'[^A-Za-z0-9]', '_', test),
"path": test,
})
def main(argv):
"""This is the main function."""
if len(argv) >= 1:
dir = argv[0]
else:
dir = '.'
file = open(os.path.join(dir, 'gles2_conform_test_autogen.cc'), 'wb')
GenerateTests(file)
file.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
bsd-3-clause
|
thepaul/uftrace
|
tests/t128_arg_module2.py
|
1
|
1586
|
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'allocfree', """
# DURATION TID FUNCTION
3.937 us [ 447] | __monstartup();
1.909 us [ 447] | __cxa_atexit();
[ 447] | main() {
[ 447] | alloc1() {
[ 447] | alloc2() {
[ 447] | alloc3() {
[ 447] | alloc4() {
[ 447] | alloc5() {
8.408 us [ 447] | malloc(1);
10.642 us [ 447] | } /* alloc5 */
11.502 us [ 447] | } /* alloc4 */
12.057 us [ 447] | } /* alloc3 */
12.780 us [ 447] | } /* alloc2 */
13.400 us [ 447] | } /* alloc1 */
[ 447] | free1() {
[ 447] | free2() {
[ 447] | free3() {
[ 447] | free4() {
[ 447] | free5() {
2.072 us [ 447] | free();
3.951 us [ 447] | } /* free5 */
4.561 us [ 447] | } /* free4 */
5.151 us [ 447] | } /* free3 */
5.713 us [ 447] | } /* free2 */
6.341 us [ 447] | } /* free1 */
21.174 us [ 447] | } /* main */
""")
def build(self, name, cflags='', ldflags=''):
# cygprof doesn't support arguments now
if cflags.find('-finstrument-functions') >= 0:
return TestBase.TEST_SKIP
return TestBase.build(self, name, cflags, ldflags)
def runcmd(self):
return '%s -A "alloc*@PLT,arg1" %s' % (TestBase.uftrace_cmd, 't-allocfree')
|
gpl-2.0
|
pyconca/2013-web
|
symposion/sponsorship/templatetags/sponsorship_tags.py
|
7
|
2239
|
from django import template
from symposion.conference.models import current_conference
from symposion.sponsorship.models import Sponsor, SponsorLevel
register = template.Library()
class SponsorsNode(template.Node):
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) == 3 and bits[1] == "as":
return cls(bits[2])
elif len(bits) == 4 and bits[2] == "as":
return cls(bits[3], bits[1])
else:
raise template.TemplateSyntaxError("%r takes 'as var' or 'level as var'" % bits[0])
def __init__(self, context_var, level=None):
if level:
self.level = template.Variable(level)
else:
self.level = None
self.context_var = context_var
def render(self, context):
conference = current_conference()
if self.level:
level = self.level.resolve(context)
queryset = Sponsor.objects.filter(level__conference = conference, level__name__iexact = level, active = True).order_by("added")
else:
queryset = Sponsor.objects.filter(level__conference = conference, active = True).order_by("level__order", "added")
context[self.context_var] = queryset
return u""
class SponsorLevelNode(template.Node):
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) == 3 and bits[1] == "as":
return cls(bits[2])
else:
raise template.TemplateSyntaxError("%r takes 'as var'" % bits[0])
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
conference = current_conference()
context[self.context_var] = SponsorLevel.objects.filter(conference=conference)
return u""
@register.tag
def sponsors(parser, token):
"""
{% sponsors as all_sponsors %}
or
{% sponsors "gold" as gold_sponsors %}
"""
return SponsorsNode.handle_token(parser, token)
@register.tag
def sponsor_levels(parser, token):
"""
{% sponsor_levels as levels %}
"""
return SponsorLevelNode.handle_token(parser, token)
|
bsd-3-clause
|
rjschwei/azure-sdk-for-python
|
azure-batch/azure/batch/models/job_terminate_options.py
|
3
|
3082
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobTerminateOptions(Model):
"""Additional parameters for the Job_terminate operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
mit
|
sshnaidm/ru
|
plugin.video.serialu.net.plus/resources/lib/Auth.py
|
2
|
3779
|
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
import urllib, urllib2, json, sys, os, cookielib
import urlparse
import gzip, StringIO, zlib
from BeautifulSoup import BeautifulSoup
from PlayerSelect import PlayerDialog
from Main import MainScreen
from Data import Data
class Auth(object):
def __init__(self, *args, **kwargs):
self.Addon = kwargs.get('Addon')
#--- paths ------------------------------
self.Addon_path = self.Addon.getAddonInfo('path').decode(sys.getfilesystemencoding())
self.Data_path = xbmc.translatePath(os.path.join(self.Addon_path, r'resources', r'data'))
#---
self.fcookies = xbmc.translatePath(os.path.join(self.Addon_path, r'cookies.txt'))
self.HTML_retry = 0
self.player = xbmc.Player()
#---
kwargs={'Auth': self}
self.Data = Data(**kwargs)
#-------
# get cookies from last session
self.cj = cookielib.MozillaCookieJar(self.fcookies)
try:
self.cj.load(self.fcookies, True, True)
except:
pass
hr = urllib2.HTTPCookieProcessor(self.cj)
opener = urllib2.build_opener(hr)
urllib2.install_opener(opener)
def _del_(self):
self.cj.save(elf.fcookies, True, True)
del self.Player
#-----------------------------------------------------------------------------
def Authorize(self):
return True
#---------------------- HTML request -----------------------------------------
def get_HTML(self, url, post = None, ref = None, get_url = False):
request = urllib2.Request(url, post)
host = urlparse.urlsplit(url).hostname
if ref==None:
ref='http://'+host
request.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)')
request.add_header('Host', host)
request.add_header('Accept', '*/*')
request.add_header('Accept-Language', 'ru-RU')
request.add_header('Accept-Encoding', 'gzip')
request.add_header('Referer', ref)
is_OK = False
try:
f = urllib2.urlopen(request, timeout=240)
is_OK = True
except IOError, e:
is_OK = False
if hasattr(e, 'reason'):
print e.reason #'ERROR: '+e.reason
xbmc.executebuiltin('Notification(SEASONVAR.ru,%s,5000,%s)'%(e.reason.capitalize(), os.path.join(self.Addon.getAddonInfo('path'), 'warning.jpg')))
#---
if self.HTML_retry < 3:
xbmc.sleep(2000)
self.HTML_retry = self.HTML_retry+1
return self.get_HTML(url, post, ref, get_url)
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
if is_OK == True:
if get_url == True:
html = f.geturl()
else:
html = f.read()
#--
if f.headers.get('content-encoding', '') == 'gzip':
html = StringIO.StringIO(html)
gzipper = gzip.GzipFile(fileobj=html)
html = gzipper.read()
elif f.headers.getheader("Content-Encoding") == 'deflate':
html = zlib.decompress(html)
self.HTML_retry = 0
return html
#---------------------------------------------------------------------------
def Player(self, **kwargs):
kwargs['Auth'] = self
aw = PlayerDialog('tvp_playerDialog.xml', self.Addon.getAddonInfo('path'), **kwargs)
aw.doModal()
del aw
#---------------------------------------------------------------------------
def showMain(self):
kwargs={'Auth': self}
aw = MainScreen('tvp_main.xml', self.Addon.getAddonInfo('path'), **kwargs)
aw.doModal()
del aw
|
gpl-2.0
|
parapente/beets
|
beetsplug/lyrics.py
|
1
|
21671
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches, embeds, and displays lyrics.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import re
import requests
import json
import unicodedata
import urllib
import difflib
import itertools
import warnings
from HTMLParser import HTMLParseError
from beets import plugins
from beets import ui
DIV_RE = re.compile(r'<(/?)div>?', re.I)
COMMENT_RE = re.compile(r'<!--.*-->', re.S)
TAG_RE = re.compile(r'<[^>]*>')
BREAK_RE = re.compile(r'\n?\s*<br([\s|/][^>]*)*>\s*\n?', re.I)
URL_CHARACTERS = {
u'\u2018': u"'",
u'\u2019': u"'",
u'\u201c': u'"',
u'\u201d': u'"',
u'\u2010': u'-',
u'\u2011': u'-',
u'\u2012': u'-',
u'\u2013': u'-',
u'\u2014': u'-',
u'\u2015': u'-',
u'\u2016': u'-',
u'\u2026': u'...',
}
# Utilities.
def unescape(text):
"""Resolves &#xxx; HTML entities (and some others)."""
if isinstance(text, bytes):
text = text.decode('utf8', 'ignore')
out = text.replace(u' ', u' ')
def replchar(m):
num = m.group(1)
return unichr(int(num))
out = re.sub(u"&#(\d+);", replchar, out)
return out
def extract_text_between(html, start_marker, end_marker):
try:
_, html = html.split(start_marker, 1)
html, _ = html.split(end_marker, 1)
except ValueError:
return u''
return html
def extract_text_in(html, starttag):
"""Extract the text from a <DIV> tag in the HTML starting with
``starttag``. Returns None if parsing fails.
"""
# Strip off the leading text before opening tag.
try:
_, html = html.split(starttag, 1)
except ValueError:
return
# Walk through balanced DIV tags.
level = 0
parts = []
pos = 0
for match in DIV_RE.finditer(html):
if match.group(1): # Closing tag.
level -= 1
if level == 0:
pos = match.end()
else: # Opening tag.
if level == 0:
parts.append(html[pos:match.start()])
level += 1
if level == -1:
parts.append(html[pos:match.start()])
break
else:
print('no closing tag found!')
return
return u''.join(parts)
def search_pairs(item):
"""Yield a pairs of artists and titles to search for.
The first item in the pair is the name of the artist, the second
item is a list of song names.
In addition to the artist and title obtained from the `item` the
method tries to strip extra information like paranthesized suffixes
and featured artists from the strings and add them as candidates.
The method also tries to split multiple titles separated with `/`.
"""
title, artist = item.title, item.artist
titles = [title]
artists = [artist]
# Remove any featuring artists from the artists name
pattern = r"(.*?) {0}".format(plugins.feat_tokens())
match = re.search(pattern, artist, re.IGNORECASE)
if match:
artists.append(match.group(1))
# Remove a parenthesized suffix from a title string. Common
# examples include (live), (remix), and (acoustic).
pattern = r"(.+?)\s+[(].*[)]$"
match = re.search(pattern, title, re.IGNORECASE)
if match:
titles.append(match.group(1))
# Remove any featuring artists from the title
pattern = r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False))
for title in titles[:]:
match = re.search(pattern, title, re.IGNORECASE)
if match:
titles.append(match.group(1))
# Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)
# and each of them.
multi_titles = []
for title in titles:
multi_titles.append([title])
if '/' in title:
multi_titles.append([x.strip() for x in title.split('/')])
return itertools.product(artists, multi_titles)
class Backend(object):
def __init__(self, config, log):
self._log = log
@staticmethod
def _encode(s):
"""Encode the string for inclusion in a URL"""
if isinstance(s, unicode):
for char, repl in URL_CHARACTERS.items():
s = s.replace(char, repl)
s = s.encode('utf8', 'ignore')
return urllib.quote(s)
def build_url(self, artist, title):
return self.URL_PATTERN % (self._encode(artist.title()),
self._encode(title.title()))
def fetch_url(self, url):
"""Retrieve the content at a given URL, or return None if the source
is unreachable.
"""
try:
# Disable the InsecureRequestWarning that comes from using
# `verify=false`.
# https://github.com/kennethreitz/requests/issues/2214
# We're not overly worried about the NSA MITMing our lyrics scraper
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = requests.get(url, verify=False)
except requests.RequestException as exc:
self._log.debug(u'lyrics request failed: {0}', exc)
return
if r.status_code == requests.codes.ok:
return r.text
else:
self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code)
def fetch(self, artist, title):
raise NotImplementedError()
class SymbolsReplaced(Backend):
@classmethod
def _encode(cls, s):
s = re.sub(r'\s+', '_', s)
s = s.replace("<", "Less_Than")
s = s.replace(">", "Greater_Than")
s = s.replace("#", "Number_")
s = re.sub(r'[\[\{]', '(', s)
s = re.sub(r'[\]\}]', ')', s)
return super(SymbolsReplaced, cls)._encode(s)
class MusiXmatch(SymbolsReplaced):
URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
lyrics = extract_text_between(html,
'"lyrics_body":', '"lyrics_language":')
return lyrics.strip(',"').replace('\\n', '\n')
class Genius(Backend):
"""Fetch lyrics from Genius via genius-api."""
def __init__(self, config, log):
super(Genius, self).__init__(config, log)
self.api_key = config['genius_api_key'].get(unicode)
self.headers = {'Authorization': "Bearer %s" % self.api_key}
def search_genius(self, artist, title):
query = u"%s %s" % (artist, title)
url = u'https://api.genius.com/search?q=%s' \
% (urllib.quote(query.encode('utf8')))
self._log.debug('genius: requesting search {}', url)
try:
req = requests.get(
url,
headers=self.headers,
allow_redirects=True
)
req.raise_for_status()
except requests.RequestException as exc:
self._log.debug('genius: request error: {}', exc)
return None
try:
return req.json()
except ValueError:
self._log.debug('genius: invalid response: {}', req.text)
return None
def get_lyrics(self, link):
url = u'http://genius-api.com/api/lyricsInfo'
self._log.debug('genius: requesting lyrics for link {}', link)
try:
req = requests.post(
url,
data={'link': link},
headers=self.headers,
allow_redirects=True
)
req.raise_for_status()
except requests.RequestException as exc:
self._log.debug('genius: request error: {}', exc)
return None
try:
return req.json()
except ValueError:
self._log.debug('genius: invalid response: {}', req.text)
return None
def build_lyric_string(self, lyrics):
if 'lyrics' not in lyrics:
return
sections = lyrics['lyrics']['sections']
lyrics_list = []
for section in sections:
lyrics_list.append(section['name'])
lyrics_list.append('\n')
for verse in section['verses']:
if 'content' in verse:
lyrics_list.append(verse['content'])
return ''.join(lyrics_list)
def fetch(self, artist, title):
search_data = self.search_genius(artist, title)
if not search_data:
return
if not search_data['meta']['status'] == 200:
return
else:
records = search_data['response']['hits']
if not records:
return
record_url = records[0]['result']['url']
lyric_data = self.get_lyrics(record_url)
if not lyric_data:
return
lyrics = self.build_lyric_string(lyric_data)
return lyrics
class LyricsWiki(SymbolsReplaced):
"""Fetch lyrics from LyricsWiki."""
URL_PATTERN = 'http://lyrics.wikia.com/%s:%s'
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
lyrics = extract_text_in(html, u"<div class='lyricbox'>")
if lyrics and 'Unfortunately, we are not licensed' not in lyrics:
return lyrics
class LyricsCom(Backend):
"""Fetch lyrics from Lyrics.com."""
URL_PATTERN = 'http://www.lyrics.com/%s-lyrics-%s.html'
NOT_FOUND = (
'Sorry, we do not have the lyric',
'Submit Lyrics',
)
@classmethod
def _encode(cls, s):
s = re.sub(r'[^\w\s-]', '', s)
s = re.sub(r'\s+', '-', s)
return super(LyricsCom, cls)._encode(s).lower()
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
lyrics = extract_text_between(html, '<div id="lyrics" class="SCREENO'
'NLY" itemprop="description">', '</div>')
if not lyrics:
return
for not_found_str in self.NOT_FOUND:
if not_found_str in lyrics:
return
parts = lyrics.split('\n---\nLyrics powered by', 1)
if parts:
return parts[0]
def remove_credits(text):
"""Remove first/last line of text if it contains the word 'lyrics'
eg 'Lyrics by songsdatabase.com'
"""
textlines = text.split('\n')
credits = None
for i in (0, -1):
if textlines and 'lyrics' in textlines[i].lower():
credits = textlines.pop(i)
if credits:
text = '\n'.join(textlines)
return text
def _scrape_strip_cruft(html, plain_text_out=False):
"""Clean up HTML
"""
html = unescape(html)
html = html.replace('\r', '\n') # Normalize EOL.
html = re.sub(r' +', ' ', html) # Whitespaces collapse.
html = BREAK_RE.sub('\n', html) # <br> eats up surrounding '\n'.
html = re.sub(r'<(script).*?</\1>(?s)', '', html) # Strip script tags.
if plain_text_out: # Strip remaining HTML tags
html = COMMENT_RE.sub('', html)
html = TAG_RE.sub('', html)
html = '\n'.join([x.strip() for x in html.strip().split('\n')])
html = re.sub(r'\n{3,}', r'\n\n', html)
return html
def _scrape_merge_paragraphs(html):
html = re.sub(r'</p>\s*<p(\s*[^>]*)>', '\n', html)
return re.sub(r'<div .*>\s*</div>', '\n', html)
def scrape_lyrics_from_html(html):
"""Scrape lyrics from a URL. If no lyrics can be found, return None
instead.
"""
from bs4 import SoupStrainer, BeautifulSoup
if not html:
return None
def is_text_notcode(text):
length = len(text)
return (length > 20 and
text.count(' ') > length / 25 and
(text.find('{') == -1 or text.find(';') == -1))
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
# extract all long text blocks that are not code
try:
soup = BeautifulSoup(html, "html.parser",
parse_only=SoupStrainer(text=is_text_notcode))
except HTMLParseError:
return None
soup = sorted(soup.stripped_strings, key=len)[-1]
return soup
class Google(Backend):
"""Fetch lyrics from Google search results."""
def __init__(self, config, log):
super(Google, self).__init__(config, log)
self.api_key = config['google_API_key'].get(unicode)
self.engine_id = config['google_engine_ID'].get(unicode)
def is_lyrics(self, text, artist=None):
"""Determine whether the text seems to be valid lyrics.
"""
if not text:
return False
badTriggersOcc = []
nbLines = text.count('\n')
if nbLines <= 1:
self._log.debug(u"Ignoring too short lyrics '{0}'", text)
return False
elif nbLines < 5:
badTriggersOcc.append('too_short')
else:
# Lyrics look legit, remove credits to avoid being penalized
# further down
text = remove_credits(text)
badTriggers = ['lyrics', 'copyright', 'property', 'links']
if artist:
badTriggersOcc += [artist]
for item in badTriggers:
badTriggersOcc += [item] * len(re.findall(r'\W%s\W' % item,
text, re.I))
if badTriggersOcc:
self._log.debug(u'Bad triggers detected: {0}', badTriggersOcc)
return len(badTriggersOcc) < 2
def slugify(self, text):
"""Normalize a string and remove non-alphanumeric characters.
"""
text = re.sub(r"[-'_\s]", '_', text)
text = re.sub(r"_+", '_', text).strip('_')
pat = "([^,\(]*)\((.*?)\)" # Remove content within parentheses
text = re.sub(pat, '\g<1>', text).strip()
try:
text = unicodedata.normalize('NFKD', text).encode('ascii',
'ignore')
text = unicode(re.sub('[-\s]+', ' ', text))
except UnicodeDecodeError:
self._log.exception(u"Failing to normalize '{0}'", text)
return text
BY_TRANS = ['by', 'par', 'de', 'von']
LYRICS_TRANS = ['lyrics', 'paroles', 'letras', 'liedtexte']
def is_page_candidate(self, url_link, url_title, title, artist):
"""Return True if the URL title makes it a good candidate to be a
page that contains lyrics of title by artist.
"""
title = self.slugify(title.lower())
artist = self.slugify(artist.lower())
sitename = re.search(u"//([^/]+)/.*",
self.slugify(url_link.lower())).group(1)
url_title = self.slugify(url_title.lower())
# Check if URL title contains song title (exact match)
if url_title.find(title) != -1:
return True
# or try extracting song title from URL title and check if
# they are close enough
tokens = [by + '_' + artist for by in self.BY_TRANS] + \
[artist, sitename, sitename.replace('www.', '')] + \
self.LYRICS_TRANS
tokens = [re.escape(t) for t in tokens]
song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title)
song_title = song_title.strip('_|')
typo_ratio = .9
ratio = difflib.SequenceMatcher(None, song_title, title).ratio()
return ratio >= typo_ratio
def fetch(self, artist, title):
query = u"%s %s" % (artist, title)
url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
% (self.api_key, self.engine_id,
urllib.quote(query.encode('utf8')))
data = urllib.urlopen(url)
data = json.load(data)
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug(u'google lyrics backend error: {0}', reason)
return
if 'items' in data.keys():
for item in data['items']:
url_link = item['link']
url_title = item.get('title', u'')
if not self.is_page_candidate(url_link, url_title,
title, artist):
continue
html = self.fetch_url(url_link)
lyrics = scrape_lyrics_from_html(html)
if not lyrics:
continue
if self.is_lyrics(lyrics, artist):
self._log.debug(u'got lyrics from {0}',
item['displayLink'])
return lyrics
class LyricsPlugin(plugins.BeetsPlugin):
SOURCES = ['google', 'lyricwiki', 'lyrics.com', 'musixmatch', 'genius']
SOURCE_BACKENDS = {
'google': Google,
'lyricwiki': LyricsWiki,
'lyrics.com': LyricsCom,
'musixmatch': MusiXmatch,
'genius': Genius,
}
def __init__(self):
super(LyricsPlugin, self).__init__()
self.import_stages = [self.imported]
self.config.add({
'auto': True,
'google_API_key': None,
'google_engine_ID': u'009217259823014548361:lndtuqkycfu',
'genius_api_key':
"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W"
"76V-uFL5jks5dNvcGCdarqFjDhP9c",
'fallback': None,
'force': False,
'sources': self.SOURCES,
})
self.config['google_API_key'].redact = True
self.config['google_engine_ID'].redact = True
self.config['genius_api_key'].redact = True
available_sources = list(self.SOURCES)
if not self.config['google_API_key'].get() and \
'google' in self.SOURCES:
available_sources.remove('google')
self.config['sources'] = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
self.backends = [self.SOURCE_BACKENDS[key](self.config, self._log)
for key in self.config['sources'].as_str_seq()]
def commands(self):
cmd = ui.Subcommand('lyrics', help='fetch song lyrics')
cmd.parser.add_option('-p', '--print', dest='printlyr',
action='store_true', default=False,
help='print lyrics to console')
cmd.parser.add_option('-f', '--force', dest='force_refetch',
action='store_true', default=False,
help='always re-download lyrics')
def func(lib, opts, args):
# The "write to files" option corresponds to the
# import_write config value.
write = ui.should_write()
for item in lib.items(ui.decargs(args)):
self.fetch_item_lyrics(
lib, item, write,
opts.force_refetch or self.config['force'],
)
if opts.printlyr and item.lyrics:
ui.print_(item.lyrics)
cmd.func = func
return [cmd]
def imported(self, session, task):
"""Import hook for fetching lyrics automatically.
"""
if self.config['auto']:
for item in task.imported_items():
self.fetch_item_lyrics(session.lib, item,
False, self.config['force'])
def fetch_item_lyrics(self, lib, item, write, force):
"""Fetch and store lyrics for a single item. If ``write``, then the
lyrics will also be written to the file itself."""
# Skip if the item already has lyrics.
if not force and item.lyrics:
self._log.info(u'lyrics already present: {0}', item)
return
lyrics = None
for artist, titles in search_pairs(item):
lyrics = [self.get_lyrics(artist, title) for title in titles]
if any(lyrics):
break
lyrics = u"\n\n---\n\n".join([l for l in lyrics if l])
if lyrics:
self._log.info(u'fetched lyrics: {0}', item)
else:
self._log.info(u'lyrics not found: {0}', item)
fallback = self.config['fallback'].get()
if fallback:
lyrics = fallback
else:
return
item.lyrics = lyrics
if write:
item.try_write()
item.store()
def get_lyrics(self, artist, title):
"""Fetch lyrics, trying each source in turn. Return a string or
None if no lyrics were found.
"""
for backend in self.backends:
lyrics = backend.fetch(artist, title)
if lyrics:
self._log.debug(u'got lyrics from backend: {0}',
backend.__class__.__name__)
return _scrape_strip_cruft(lyrics, True)
|
mit
|
meganbkratz/acq4
|
acq4/util/imaging/record_thread.py
|
3
|
7494
|
import time
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
from PyQt4 import QtGui, QtCore
import acq4.util.debug as debug
from acq4.util.metaarray import MetaArray
import numpy as np
import acq4.util.ptime as ptime
import acq4.Manager
from acq4.util.DataManager import FileHandle, DirHandle
try:
from acq4.filetypes.ImageFile import *
HAVE_IMAGEFILE = True
except ImportError:
HAVE_IMAGEFILE = False
class RecordThread(Thread):
"""Class for offloading image recording to a worker thread.
"""
# sigShowMessage = QtCore.Signal(object)
sigRecordingFailed = QtCore.Signal()
sigRecordingFinished = QtCore.Signal(object, object) # file handle, num frames
sigSavedFrame = QtCore.Signal(object)
def __init__(self, ui):
Thread.__init__(self)
self.m = acq4.Manager.getManager()
self._stackSize = 0 # size of currently recorded stack
self._recording = False
self.currentFrame = None
self.frameLimit = None
# Interaction with worker thread:
self.lock = Mutex(QtCore.QMutex.Recursive)
self.newFrames = [] # list of frames and the files they should be sored / appended to.
# Attributes private to worker thread:
self.currentStack = None # file handle of currently recorded stack
self.startFrameTime = None
self.lastFrameTime = None
self.currentFrameNum = 0
def startRecording(self, frameLimit=None):
"""Ask the recording thread to begin recording a new image stack.
*frameLimit* may specify the maximum number of frames in the stack before
the recording will stop.
"""
if self.recording:
raise Exception("Already recording; cannot start a new stack.")
self.frameLimit = frameLimit
self._stackSize = 0
self._recording = True
def stopRecording(self):
"""Ask the recording thread to stop recording new images to the image
stack.
"""
self.frameLimit = None
self._stackSize = 0
self._recording = False
with self.lock:
self.newFrames.append(False)
@property
def recording(self):
"""Bool indicating whether the thread is currently recording new frames
to an image stack.
"""
return self._recording
def saveFrame(self):
"""Ask the recording thread to save the most recently acquired frame.
"""
with self.lock:
self.newFrames.append({'frame': self.currentFrame, 'dir': self.m.getCurrentDir(), 'stack': False})
def newFrame(self, frame=None):
"""Inform the recording thread that a new frame has arrived.
Returns the number of frames currently waiting to be written.
"""
if frame is None:
return
self.currentFrame = frame
with self.lock:
if self.recording:
self.newFrames.append({'frame': self.currentFrame, 'dir': self.m.getCurrentDir(), 'stack': True})
self._stackSize += 1
framesLeft = len(self.newFrames)
if self.recording:
if self.frameLimit is not None and self._stackSize >= self.frameLimit:
self.frameLimit = None
self.stopRecording()
return framesLeft
@property
def stackSize(self):
"""The total number of frames requested for storage in the current
image stack.
"""
return self._stackSize
def quit(self):
"""Stop the recording thread.
No new frames will be written after the thread exits.
"""
with self.lock:
self.stopThread = True
self.newFrames = []
self.currentFrame = None
def run(self):
# run is invoked in the worker thread automatically after calling start()
self.stopThread = False
while True:
with self.lock:
if self.stopThread:
break
newFrames = self.newFrames[:]
self.newFrames = []
try:
self.handleFrames(newFrames)
except:
debug.printExc('Error in image recording thread:')
self.sigRecordingFailed.emit()
time.sleep(100e-3)
def handleFrames(self, frames):
# Write as many frames into the stack as possible.
# If False appears in the list of frames, it indicates the end of a stack
# and any further frames are written to a new stack.
recFrames = []
for frame in frames:
if frame is False:
# stop current recording
if len(recFrames) > 0:
## write prior frames now
self.writeFrames(recFrames, dh)
recFrames = []
if self.currentStack is not None:
dur = self.lastFrameTime - self.startFrameTime
if dur > 0:
fps = (self.currentFrameNum+1) / dur
else:
fps = 0
self.currentStack.setInfo({'frames': self.currentFrameNum, 'duration': dur, 'averageFPS': fps})
# self.showMessage('Finished recording %s - %d frames, %02f sec' % (self.currentStack.name(), self.currentFrameNum, dur))
self.sigRecordingFinished.emit(self.currentStack, self.currentFrameNum)
self.currentStack = None
self.currentFrameNum = 0
continue
data = frame['frame'].getImage()
info = frame['frame'].info()
dh = frame['dir']
stack = frame['stack']
if stack is False:
# Store single frame to new file
try:
if HAVE_IMAGEFILE:
fileName = 'image.tif'
fh = dh.writeFile(data, fileName, info, fileType="ImageFile", autoIncrement=True)
else:
fileName = 'image.ma'
fh = dh.writeFile(data, fileName, info, fileType="MetaArray", autoIncrement=True)
self.sigSavedFrame.emit(fh.name())
except:
self.sigSavedFrame.emit(False)
raise
continue
# Store frame to current (or new) stack
recFrames.append((data, info))
self.lastFrameTime = info['time']
if len(recFrames) > 0:
self.writeFrames(recFrames, dh)
self.currentFrameNum += len(recFrames)
def writeFrames(self, frames, dh):
newRec = self.currentStack is None
if newRec:
self.startFrameTime = frames[0][1]['time']
times = [f[1]['time'] for f in frames]
arrayInfo = [
{'name': 'Time', 'values': array(times) - self.startFrameTime, 'units': 's'},
{'name': 'X'},
{'name': 'Y'}
]
imgs = [f[0][np.newaxis,...] for f in frames]
data = MetaArray(np.concatenate(imgs, axis=0), info=arrayInfo)
if newRec:
self.currentStack = dh.writeFile(data, 'video', autoIncrement=True, info=frames[0][1], appendAxis='Time')
else:
data.write(self.currentStack.name(), appendAxis='Time')
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.