text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/python
import sys, os
import tornado.ioloop
import tornado.web
import logging
import logging.handlers
import re
from urllib import unquote
import config
from vehiclenet import *
reload(sys)
sys.setdefaultencoding('utf8')
def deamon(chdir = False):
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
print 'fork #1 failed: %d (%s)' % (e.errno, e.strerror)
os._exit(1)
def init():
WeatherHandler.cache()
class DefaultHandler(tornado.web.RequestHandler):
def get(self):
self.write('VehicleNet Say Hello!')
class LogHandler(tornado.web.RequestHandler):
def get(self):
log_filename = 'logs/logging'
if not os.path.exists(log_filename):
self.write('The log file is empty.')
return
log_file = None
log_file_lines = None
try:
log_file = open(log_filename, 'r')
if log_file is None:
raise Exception('log_file is None')
log_file_lines = log_file.readlines()
if log_file_lines is None:
raise Exception('log_file_lines is None')
except Exception, e:
logger = logging.getLogger('web')
logger.error('Failed to read the log file (logs/logging), error: %s' % e)
finally:
if log_file is not None:
log_file.close()
if log_file_lines is None:
self.write('Failed to read the log file.')
line_limit = 500
for _ in log_file_lines[::-1]:
line_limit -= 1
if line_limit > 0:
self.write(unquote(_) + '<BR/>')
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
}
routes = [
(r"/", DefaultHandler),
(r"/carlink/weather/findWeather.htm", WeatherHandler),
(r"/carlink/music/findMusic.htm", MusicSearchHandler),
(r"/carlink//music/findMusic.htm", MusicSearchHandler),
(r"/carlink/music/findMusicTop.htm", MusicTopHandler),
(r"/carlink/music/findMusicLrc.htm", LrcSearchHandler),
(r"/carlink/news/findNews.htm", NewsHandler),
]
if config.Mode == 'DEBUG':
routes.append((r"/log", LogHandler))
application = tornado.web.Application(routes, **settings)
if __name__ == "__main__":
if '-d' in sys.argv:
deamon()
logdir = 'logs'
if not os.path.exists(logdir):
os.makedirs(logdir)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler = logging.handlers.TimedRotatingFileHandler(
'%s/logging' % logdir, 'M', 20, 360)
handler.suffix = '%Y%m%d%H%M%S.log'
handler.extMatch = re.compile(r'^\d{4}\d{2}\d{2}\d{2}\d{2}\d{2}')
handler.setFormatter(formatter)
logger = logging.getLogger('web')
logger.addHandler(handler)
if config.Mode == 'DEBUG':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
init()
application.listen(80)
print 'Server is running, listening on port 80....'
tornado.ioloop.IOLoop.instance().start()
|
codemeow5/vehiclenet-python
|
web.py
|
Python
|
gpl-2.0
| 2,732 | 0.027086 |
"""映射 集合 ... 高级数据结构类型"""
from string import Template
val_dict = {1: 'a', 2: 'b', 3: 'c'}
print(val_dict)
print(val_dict.keys())
print(val_dict.items())
print(val_dict.values())
factory_dict = dict((['x', 1], ['y', 2]))
print(factory_dict)
ddcit = {}.fromkeys(('x', 'y', 'z'), -24)
ddcit.update(val_dict) # 新值覆盖旧值
print(ddcit)
print(ddcit.get("m", "no such key "))
print(ddcit.setdefault('x', "new value "))
print(type(ddcit.keys()))
for key in ddcit.keys():
s = Template("key is ${key} and value is ${value}")
# 不加 key 和 value 就出错了 为什么
print(s.substitute(key=key, value=ddcit[key]))
# has_key 方法取消了 参见 Python3 文档 https://docs.python.org/3.1/whatsnew/3.0.html#builtins
var_tuple = (1, 'acs')
var_list = [1, 2, 3]
strange_dict = {var_tuple: 11, 1: 'abcd'}
# 键成员关系操作
print(1 in strange_dict)
# strange_dict = {var_tuple: 11, 1: 'abcd', var_list: 'acv'}
# 语法上没错误,但是 会包 unhashable type: 'list' 错误 所有基于 dict 的操作都会报错误
# 因为 check key 是否 hashable 的合法性
print(strange_dict[var_tuple])
# print(strange_dict[var_list])
# strange_dict.pop(var_list)
strange_dict.pop(var_tuple)
strange_dict.clear()
del strange_dict
val_dict1 = {1: 'a', '2': "v"}
val_dict2 = {1: 'v'}
# print(val_dict > val_dict2) Python3 不再支持了
print(dict([['x', 1], ['z', 2]]))
# fixed zip(函数) map(lambda 表达式 等价于 zip
print(type(hash((1, 2, 3))))
print(hash((1, 2, 'a')))
# print(hash(([1, 23, 34], 'a')))
# 集合保证元素不重复 ,真正意义上的数学集合(元素不重复)
# 而不是编程意义上的集合
print("------set-----")
var_set = set('aasn223wuerhe')
print(type(var_set))
print(var_set)
print("frozensetr ")
var_frozen_set = frozenset('aaddk2u9m3pq40aiwoe27na')
print(var_frozen_set)
print('a' in var_set)
print('2' in var_frozen_set) # True 数字被当做字符处理
print(2 in var_frozen_set) # False
# 可变集合 的 CRUD
var_set.update("anddipwq")
print(var_set)
var_set.discard("n")
print(var_set)
var_set.remove("a")
print(var_set)
var_set.pop()
print(var_set)
var_set.clear()
print(var_set)
var_set.add("$")
print(var_set)
var_set1 = set('rtyufghvb')
print(var_set1)
var_set2 = set('qwertyuiop')
print(var_set2)
var_set3 = set('qwertyuiop')
print(var_set3)
var_set4 = var_set1
print(var_set4)
var_set5 = set('qwert')
print(var_set5)
# 数学意义上的集合操作
print(var_set1 == var_set2)
print(var_set1 != var_set2)
print(var_set5 < var_set3)
print(var_set5.issubset(var_set3))
print(var_set1 <= var_set4)
print(var_set1.issuperset(var_set4))
print(var_set1 ^ var_set2) # A B 公共集合的剩余部分 A△B
print(var_set1.symmetric_difference(var_set2))
print(var_set1.union(var_set5))
print(var_set1 | var_set5)
print(var_set5 & var_set3)
print(var_set5.intersection(var_set3))
print(var_set3 - var_set5)
print(var_set3.difference(var_set5))
# 混合集合类型操作 根据左边操作数 确定集合是不是可变
immutable_set = frozenset("ansaskwke")
mutable_set = set("24m9sjwe")
immutable_set_1 = immutable_set | mutable_set
print(type(immutable_set_1))
# print(1 | 2) python3 居然支持了 我擦啊
|
yanjinbin/learnPython
|
chapter_7/chapter7.py
|
Python
|
gpl-3.0
| 3,243 | 0.000699 |
# test seasonal.adjust_seasons() options handling
#
# adjust_seasons() handles a variety of optional arguments.
# verify that adjust_trend() correctly calledfor different option combinations.
#
# No noise in this test set.
#
from __future__ import division
import numpy as np
from seasonal import fit_trend, adjust_seasons # pylint:disable=import-error
from seasonal.sequences import sine # pylint:disable=import-error
PERIOD = 25
CYCLES = 4
AMP = 1.0
TREND = AMP / PERIOD
LEVEL = 1000.0
SEASONS = sine(AMP, PERIOD, 1)
DATA = LEVEL + np.arange(PERIOD * CYCLES) * TREND + np.tile(SEASONS, CYCLES)
ZEROS = np.zeros(PERIOD * CYCLES)
def iszero(a):
return np.all(np.isclose(a, ZEROS))
def isseasons(a):
return np.all(np.isclose(a, SEASONS))
def test_auto():
adjusted = adjust_seasons(DATA)
assert adjusted.std() < DATA.std()
def test_trend_line():
adjusted = adjust_seasons(DATA, trend="line")
assert adjusted.std() < DATA.std()
def test_explicit_trend():
trend = fit_trend(DATA, kind="line")
adjusted = adjust_seasons(DATA, trend=trend)
assert adjusted.std() < DATA.std()
def test_trend_period():
adjusted = adjust_seasons(DATA, trend="line", period=PERIOD)
assert adjusted.std() < DATA.std()
def test_trend_seasons():
adjusted = adjust_seasons(DATA, trend="line", seasons=SEASONS)
assert adjusted.std() < DATA.std()
def test_trend_spline():
adjusted = adjust_seasons(DATA, trend="spline")
assert adjusted.std() < DATA.std()
def test_period():
adjusted = adjust_seasons(DATA, period=PERIOD)
assert adjusted.std() < DATA.std()
adjusted = adjust_seasons(DATA, period=PERIOD // 2) # no seasonality
assert adjusted is None
def test_seasons():
adjusted = adjust_seasons(DATA, seasons=SEASONS)
assert adjusted.std() < DATA.std()
|
welch/seasonal
|
tests/adjust_seasons_test.py
|
Python
|
mit
| 1,815 | 0.007163 |
#!/usr/bin/env python
import subprocess
import praw
import datetime
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
challengePageSubmissionId = 'hjbcvx'
flaskport = 8936
thisMonthName = "July"
nextMonthName = "August"
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period for " + thisMonthName + " is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want. And be sure to join us for the " + nextMonthName + " challenge. Signup posts for " + nextMonthName + " will begin during the last week of " + thisMonthName + "."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
# submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
submission = redditSession.submission(id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
# return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.models.Comment]
commentForest = submission.comments
# commentForest.replace_more(limit=None, threshold=0)
return [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
currentDayOfMonthIndex = datetime.date.today().day
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 3
global commentHashesAndComments
global submission
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="bodyencodedformlcorpus" value="' + b64encode(comment.body.encode('utf-8')) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
bodyEncodedForMLCorpus = str(request.form["bodyencodedformlcorpus"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusCheckin(bodyEncodedForMLCorpus)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSignupAndCheckin(bodyEncodedForMLCorpus)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusRelapse(bodyEncodedForMLCorpus)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusReinstate(bodyEncodedForMLCorpus)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusTooLate(bodyEncodedForMLCorpus)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
recordMLCorpusSkip(bodyEncodedForMLCorpus)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
@app.route('/updategooglechart.html', methods=["POST"])
def updategooglechart():
print "TODO: Copy display to clipboard"
subprocess.call(['./update-google-chart.py'])
return Response("hello", mimetype='text/html')
def recordMLCorpusCheckin(aString):
with open("../new-ml-corpus-monthly-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSignupAndCheckin(aString):
with open("../new-ml-corpus-monthly-signup-and-checkin.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusRelapse(aString):
with open("../new-ml-corpus-monthly-relapse.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusReinstate(aString):
with open("../new-ml-corpus-monthly-reinstate.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusTooLate(aString):
with open("../new-ml-corpus-monthly-too-late.txt", "a") as f:
f.write(aString)
f.write("\n")
def recordMLCorpusSkip(aString):
with open("../new-ml-corpus-monthly-skip.txt", "a") as f:
f.write(aString)
f.write("\n")
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
foobarbazblarg/stayclean
|
stayclean-2020-july/serve-challenge-with-flask.py
|
Python
|
mit
| 12,690 | 0.003546 |
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
reverse = 0
r = n
for i in range(32):
bit = r % 2
reverse += bit << (32-i-1)
r = r / 2
return reverse
s = Solution()
r = s.reverseBits(43261596)
print(r)
|
lutianming/leetcode
|
reverse_bits.py
|
Python
|
mit
| 321 | 0.003115 |
import unittest
from rsync_usb.ChunkLocation import ChunkLocation
class ChunkLocationTests(unittest.TestCase):
'''Test TargetHashesWriter and TargetHashesReader'''
def testProperties(self):
pos = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos.path, 'dummy')
self.assertEqual(pos.start_pos, 100)
self.assertEqual(pos.data_len, 10)
def testEndPos(self):
pos = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos.start_pos + pos.data_len - 1, pos.end_pos)
self.assertEqual(pos.end_pos, 109)
def testEqual(self):
pos_a = ChunkLocation('dummy', 100, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertEqual(pos_a, pos_b)
# -- Overlaping chunk tests -----------------------------------------------
def assertOverlaping(self, pos_a, pos_b):
msg = "%s should overlap %s but did not"
self.assertTrue(pos_a.overlaps(pos_b), msg % (str(pos_a), str(pos_b)))
self.assertTrue(pos_b.overlaps(pos_a), msg % (str(pos_b), str(pos_a)))
def assertNotOverlaping(self, pos_a, pos_b):
msg = "%s should not overlap %s but does"
self.assertFalse(pos_a.overlaps(pos_b), msg % (str(pos_a), str(pos_b)))
self.assertFalse(pos_b.overlaps(pos_a), msg % (str(pos_b), str(pos_a)))
def testNoOverlapBefore(self):
pos_a = ChunkLocation('dummy', 10, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testNoOverlapAfter(self):
pos_a = ChunkLocation('dummy', 1000, 10)
pos_b = ChunkLocation('dummy', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testNoOverlapDifferentPaths(self):
pos_a = ChunkLocation('dummy_a', 100, 10)
pos_b = ChunkLocation('dummy_b', 100, 10)
self.assertNotOverlaping(pos_a, pos_b)
def testOverlapEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapStartsBefore(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ----|=======|---------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 4, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapStartsBeforeAndEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ----|=========|-------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 4, 11)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInside(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: -----|=========|------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 5, 11)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInsideSameStart(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: ------|========|------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 6, 10)
self.assertOverlaping(pos_a, pos_b)
def testOverlapInsideSameEnd(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=======|-------
# B: -----|========|-------
pos_a = ChunkLocation('dummy', 6, 9)
pos_b = ChunkLocation('dummy', 5, 10)
self.assertOverlaping(pos_a, pos_b)
def testOverlapEndsAfter(self):
# 0000000000111111111112
# 0123456789001234567890
# A: -------|=======|------
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 7, 9)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
def testOverlapEndsAfterAndEqual(self):
# 0000000000111111111112
# 0123456789001234567890
# A: ------|=========|-----
# B: ------|=======|-------
pos_a = ChunkLocation('dummy', 6, 11)
pos_b = ChunkLocation('dummy', 6, 9)
self.assertOverlaping(pos_a, pos_b)
|
shearern/rsync-usb
|
src/rsync_usb_tests/ChunkLocationTests.py
|
Python
|
gpl-2.0
| 4,596 | 0.001958 |
# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.boolean import boolean
FLAGS = ('skip_missing',)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
def _raise_terms_error(msg=""):
raise AnsibleError(
"subelements lookup expects a list of two or three items, "
+ msg)
terms[0] = listify_lookup_plugin_terms(terms[0], templar=self._templar, loader=self._loader)
# check lookup terms - check number of terms
if not isinstance(terms, list) or not 2 <= len(terms) <= 3:
_raise_terms_error()
# first term should be a list (or dict), second a string holding the subkey
if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], string_types):
_raise_terms_error("first a dict or a list, second a string pointing to the subkey")
subelements = terms[1].split(".")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped', False) is not False:
# the registered result was completely skipped
return []
elementlist = []
for key in terms[0].iterkeys():
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
# check for optional flags in third term
flags = {}
if len(terms) == 3:
flags = terms[2]
if not isinstance(flags, dict) and not all([isinstance(key, string_types) and key in FLAGS for key in flags]):
_raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS)
# build_items
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0)
if item0.get('skipped', False) is not False:
# this particular item is to be skipped
continue
skip_missing = boolean(flags.get('skip_missing', False))
subvalue = item0
lastsubkey = False
sublist = []
for subkey in subelements:
if subkey == subelements[-1]:
lastsubkey = True
if not subkey in subvalue:
if skip_missing:
continue
else:
raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue))
if not lastsubkey:
if not isinstance(subvalue[subkey], dict):
if skip_missing:
continue
else:
raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey]))
else:
subvalue = subvalue[subkey]
else: # lastsubkey
if not isinstance(subvalue[subkey], list):
raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey]))
else:
sublist = subvalue.pop(subkey, [])
for item1 in sublist:
ret.append((item0, item1))
return ret
|
wenottingham/ansible
|
lib/ansible/plugins/lookup/subelements.py
|
Python
|
gpl-3.0
| 4,311 | 0.002784 |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Auth Middleware that handles auth for a service
This module can be installed as a filter in front of your service to validate
that requests are coming from a trusted component that has handled
authenticating the call. If a call comes from an untrusted source, it will
redirect it back to be properly authenticated. This is done by sending our a
305 proxy redirect response with the URL for the auth service.
The auth service settings are specified in the INI file (keystone.ini). The ini
file is passed in as the WSGI config file when starting the service. For this
proof of concept, the ini file is in echo/echo/echo.ini.
In the current implementation use a basic auth password to verify that the
request is coming from a valid auth component or service
Refer to: http://wiki.openstack.org/openstack-authn
HEADERS
-------
HTTP_ is a standard http header
HTTP_X is an extended http header
> Coming in from initial call
HTTP_X_AUTH_TOKEN : the client token being passed in
HTTP_X_STORAGE_TOKEN: the client token being passed in (legacy Rackspace use)
to support cloud files
> Used for communication between components
www-authenticate : only used if this component is being used remotely
HTTP_AUTHORIZATION : basic auth password used to validate the connection
> What we add to the request for use by the OpenStack service
HTTP_X_AUTHORIZATION: the client identity being passed in
"""
from webob.exc import HTTPUseProxy, HTTPUnauthorized
class RemoteAuth(object):
# app is the downstream WSGI component, usually the OpenStack service
#
# if app is not provided, the assumption is this filter is being run
# from a separate server.
def __init__(self, app, conf):
# app is the next app in WSGI chain - eventually the OpenStack service
self.app = app
self.conf = conf
# where to redirect untrusted requests to
self.proxy_location = conf.get('proxy_location')
# secret that will tell us a request is coming from a trusted auth
# component
self.remote_auth_pass = conf.get('remote_auth_pass')
print 'Starting Remote Auth middleware'
def __call__(self, env, start_response):
# Validate the request is trusted
# Authenticate the Auth component itself.
headers = [('www-authenticate', 'Basic realm="API Auth"')]
if 'HTTP_AUTHORIZATION' not in env:
# Redirect to proxy (auth component) and show that basic auth is
# required
return HTTPUseProxy(location=self.proxy_location,
headers=headers)(env, start_response)
else:
auth_type, encoded_creds = env['HTTP_AUTHORIZATION'].split(None, 1)
if encoded_creds != self.remote_auth_pass:
return HTTPUnauthorized(headers=headers)(env, start_response)
# Make sure that the user has been authenticated by the Auth Service
if 'HTTP_X_AUTHORIZATION' not in env:
return HTTPUnauthorized()(env, start_response)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return RemoteAuth(app, conf)
return auth_filter
|
ntt-pf-lab/backup_keystone
|
keystone/middleware/remoteauth.py
|
Python
|
apache-2.0
| 4,006 | 0.00025 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Client module for connecting to and interacting with SmartyStreets API
"""
import json
import numbers
import requests
from .data import Address, AddressCollection
from .exceptions import SmartyStreetsError, ERROR_CODES
def validate_args(f):
"""
Ensures that *args consist of a consistent type
:param f: any client method with *args parameter
:return: function f
"""
def wrapper(self, args):
arg_types = set([type(arg) for arg in args])
if len(arg_types) > 1:
raise TypeError("Mixed input types are not allowed")
elif list(arg_types)[0] not in (dict, str):
raise TypeError("Only dict and str types accepted")
return f(self, args)
return wrapper
def truncate_args(f):
"""
Ensures that *args do not exceed a set limit or are truncated to meet that limit
:param f: any Client method with *args parameter
:return: function f
"""
def wrapper(self, args):
if len(args) > 100:
if self.truncate_addresses:
args = args[:100]
else:
raise ValueError("This exceeds 100 address at a time SmartyStreets limit")
return f(self, args)
return wrapper
def stringify(data):
"""
Ensure all values in the dictionary are strings, except for the value for `candidate` which
should just be an integer.
:param data: a list of addresses in dictionary format
:return: the same list with all values except for `candidate` count as a string
"""
def serialize(k, v):
if k == "candidates":
return int(v)
if isinstance(v, numbers.Number):
if k == "zipcode":
# If values are presented as integers then leading digits may be cut off,
# and these are significant for the zipcode. Add them back.
return str(v).zfill(5)
return str(v)
return v
return [
{
k: serialize(k, v) for k, v in json_dict.items()
}
for json_dict in data
]
class Client(object):
"""
Client class for interacting with the SmartyStreets API
"""
BASE_URL = "https://api.smartystreets.com/"
def __init__(self, auth_id, auth_token, standardize=False, invalid=False, logging=True,
accept_keypair=False, truncate_addresses=False, timeout=None):
"""
Constructs the client
:param auth_id: authentication ID from SmartyStreets
:param auth_token: authentication token
:param standardize: boolean include addresses that match zip+4 in addition to DPV confirmed
addresses
:param invalid: boolean to include address candidates that may not be deliverable
:param logging: boolean to allow SmartyStreets to log requests
:param accept_keypair: boolean to toggle default keypair behavior
:param truncate_addresses: boolean to silently truncate address lists in excess of the
SmartyStreets maximum rather than raise an error.
:param timeout: optional timeout value in seconds for requests.
:return: the configured client object
"""
self.auth_id = auth_id
self.auth_token = auth_token
self.standardize = standardize
self.invalid = invalid
self.logging = logging
self.accept_keypair = accept_keypair
self.truncate_addresses = truncate_addresses
self.timeout = timeout
self.session = requests.Session()
self.session.mount(self.BASE_URL, requests.adapters.HTTPAdapter(max_retries=5))
def post(self, endpoint, data):
"""
Executes the HTTP POST request
:param endpoint: string indicating the URL component to call
:param data: the data to submit
:return: the dumped JSON response content
"""
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'x-standardize-only': 'true' if self.standardize else 'false',
'x-include-invalid': 'true' if self.invalid else 'false',
'x-accept-keypair': 'true' if self.accept_keypair else 'false',
}
if not self.logging:
headers['x-suppress-logging'] = 'true'
params = {'auth-id': self.auth_id, 'auth-token': self.auth_token}
url = self.BASE_URL + endpoint
response = self.session.post(url, json.dumps(stringify(data)),
params=params, headers=headers, timeout=self.timeout)
if response.status_code == 200:
return response.json()
raise ERROR_CODES.get(response.status_code, SmartyStreetsError)
@truncate_args
@validate_args
def street_addresses(self, addresses):
"""
API method for verifying street address and geolocating
Returns an AddressCollection always for consistency. In common usage it'd be simple and
sane to return an Address when only one address was searched, however this makes
populating search addresses from lists of unknown length problematic. If that list
returns only one address now the code has to check the type of return value to ensure
that it isn't applying behavior for an expected list type rather than a single dictionary.
>>> client.street_addresses(["100 Main St, Anywhere, USA"], ["6 S Blvd, Richmond, VA"])
>>> client.street_addresses([{"street": "100 Main St, anywhere USA"}, ... ])
:param addresses: 1 or more addresses in string or dict format
:return: an AddressCollection
"""
# While it's okay in theory to accept freeform addresses they do need to be submitted in
# a dictionary format.
if type(addresses[0]) != dict:
addresses = [{'street': arg} for arg in addresses]
return AddressCollection(self.post('street-address', data=addresses))
def street_address(self, address):
"""
Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match
"""
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0])
def zipcode(self, *args):
raise NotImplementedError("You cannot lookup zipcodes yet")
|
audantic/smartystreets.py
|
smartystreets/client.py
|
Python
|
bsd-3-clause
| 6,663 | 0.002852 |
from i3pystatus.playerctl import Playerctl
class Spotify(Playerctl):
"""
Get Spotify info using playerctl. Based on `Playerctl`_ module.
"""
player_name = "spotify"
|
m45t3r/i3pystatus
|
i3pystatus/spotify.py
|
Python
|
mit
| 183 | 0 |
from datetime import timedelta
from contentstore.utils import get_modulestore
from xmodule.modulestore.django import loc_mapper
from xblock.fields import Scope
class CourseGradingModel(object):
"""
Basically a DAO and Model combo for CRUD operations pertaining to grading policy.
"""
# Within this class, allow access to protected members of client classes.
# This comes up when accessing kvs data and caches during kvs saves and modulestore writes.
def __init__(self, course_descriptor):
self.graders = [
CourseGradingModel.jsonize_grader(i, grader) for i, grader in enumerate(course_descriptor.raw_grader)
] # weights transformed to ints [0..100]
self.grade_cutoffs = course_descriptor.grade_cutoffs
self.grace_period = CourseGradingModel.convert_set_grace_period(course_descriptor)
@classmethod
def fetch(cls, course_locator):
"""
Fetch the course grading policy for the given course from persistence and return a CourseGradingModel.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_locator)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
model = cls(descriptor)
return model
@staticmethod
def fetch_grader(course_location, index):
"""
Fetch the course's nth grader
Returns an empty dict if there's no such grader.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
index = int(index)
if len(descriptor.raw_grader) > index:
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
# return empty model
else:
return {"id": index,
"type": "",
"min_count": 0,
"drop_count": 0,
"short_label": None,
"weight": 0
}
@staticmethod
def update_from_json(course_locator, jsondict, user):
"""
Decode the json into CourseGradingModel and save any changes. Returns the modified model.
Probably not the usual path for updates as it's too coarse grained.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_locator)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
graders_parsed = [CourseGradingModel.parse_grader(jsonele) for jsonele in jsondict['graders']]
descriptor.raw_grader = graders_parsed
descriptor.grade_cutoffs = jsondict['grade_cutoffs']
get_modulestore(course_old_location).update_item(descriptor, user.id)
CourseGradingModel.update_grace_period_from_json(course_locator, jsondict['grace_period'], user)
return CourseGradingModel.fetch(course_locator)
@staticmethod
def update_grader_from_json(course_location, grader, user):
"""
Create or update the grader of the given type (string key) for the given course. Returns the modified
grader which is a full model on the client but not on the server (just a dict)
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
# parse removes the id; so, grab it before parse
index = int(grader.get('id', len(descriptor.raw_grader)))
grader = CourseGradingModel.parse_grader(grader)
if index < len(descriptor.raw_grader):
descriptor.raw_grader[index] = grader
else:
descriptor.raw_grader.append(grader)
get_modulestore(course_old_location).update_item(descriptor, user.id)
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
@staticmethod
def update_cutoffs_from_json(course_location, cutoffs, user):
"""
Create or update the grade cutoffs for the given course. Returns sent in cutoffs (ie., no extra
db fetch).
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
descriptor.grade_cutoffs = cutoffs
get_modulestore(course_old_location).update_item(descriptor, user.id)
return cutoffs
@staticmethod
def update_grace_period_from_json(course_location, graceperiodjson, user):
"""
Update the course's default grace period. Incoming dict is {hours: h, minutes: m} possibly as a
grace_period entry in an enclosing dict. It is also safe to call this method with a value of
None for graceperiodjson.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
# Before a graceperiod has ever been created, it will be None (once it has been
# created, it cannot be set back to None).
if graceperiodjson is not None:
if 'grace_period' in graceperiodjson:
graceperiodjson = graceperiodjson['grace_period']
grace_timedelta = timedelta(**graceperiodjson)
descriptor.graceperiod = grace_timedelta
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def delete_grader(course_location, index, user):
"""
Delete the grader of the given type from the given course.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
index = int(index)
if index < len(descriptor.raw_grader):
del descriptor.raw_grader[index]
# force propagation to definition
descriptor.raw_grader = descriptor.raw_grader
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def delete_grace_period(course_location, user):
"""
Delete the course's grace period.
"""
course_old_location = loc_mapper().translate_locator_to_location(course_location)
descriptor = get_modulestore(course_old_location).get_item(course_old_location)
del descriptor.graceperiod
get_modulestore(course_old_location).update_item(descriptor, user.id)
@staticmethod
def get_section_grader_type(location):
old_location = loc_mapper().translate_locator_to_location(location)
descriptor = get_modulestore(old_location).get_item(old_location)
return {
"graderType": descriptor.format if descriptor.format is not None else 'notgraded',
"location": unicode(location),
}
@staticmethod
def update_section_grader_type(descriptor, grader_type, user):
if grader_type is not None and grader_type != u'notgraded':
descriptor.format = grader_type
descriptor.graded = True
else:
del descriptor.format
del descriptor.graded
get_modulestore(descriptor.location).update_item(descriptor, user.id)
return {'graderType': grader_type}
@staticmethod
def convert_set_grace_period(descriptor):
# 5 hours 59 minutes 59 seconds => converted to iso format
rawgrace = descriptor.graceperiod
if rawgrace:
hours_from_days = rawgrace.days * 24
seconds = rawgrace.seconds
hours_from_seconds = int(seconds / 3600)
hours = hours_from_days + hours_from_seconds
seconds -= hours_from_seconds * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
graceperiod = {'hours': 0, 'minutes': 0, 'seconds': 0}
if hours > 0:
graceperiod['hours'] = hours
if minutes > 0:
graceperiod['minutes'] = minutes
if seconds > 0:
graceperiod['seconds'] = seconds
return graceperiod
else:
return None
@staticmethod
def parse_grader(json_grader):
# manual to clear out kruft
result = {"type": json_grader["type"],
"min_count": int(json_grader.get('min_count', 0)),
"drop_count": int(json_grader.get('drop_count', 0)),
"short_label": json_grader.get('short_label', None),
"weight": float(json_grader.get('weight', 0)) / 100.0
}
return result
@staticmethod
def jsonize_grader(i, grader):
grader['id'] = i
if grader['weight']:
grader['weight'] *= 100
if not 'short_label' in grader:
grader['short_label'] = ""
return grader
|
liuqr/edx-xiaodun
|
cms/djangoapps/models/settings/course_grading.py
|
Python
|
agpl-3.0
| 9,046 | 0.003869 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf_export tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import test
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
def _test_function(unused_arg=0):
pass
def _test_function2(unused_arg=0):
pass
class TestClassA(object):
pass
class TestClassB(TestClassA):
pass
class ValidateExportTest(test.TestCase):
"""Tests for tf_export class."""
class MockModule(object):
def __init__(self, name):
self.__name__ = name
def setUp(self):
self._modules = []
def tearDown(self):
for name in self._modules:
del sys.modules[name]
self._modules = []
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
if hasattr(symbol, '_tf_api_names_v1'):
del symbol._tf_api_names_v1
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
sys.modules[name] = mock_module
self._modules.append(name)
return mock_module
def testExportSingleFunction(self):
export_decorator = tf_export.tf_export('nameA', 'nameB')
decorated_function = export_decorator(_test_function)
self.assertEquals(decorated_function, _test_function)
self.assertEquals(('nameA', 'nameB'), decorated_function._tf_api_names)
def testExportMultipleFunctions(self):
export_decorator1 = tf_export.tf_export('nameA', 'nameB')
export_decorator2 = tf_export.tf_export('nameC', 'nameD')
decorated_function1 = export_decorator1(_test_function)
decorated_function2 = export_decorator2(_test_function2)
self.assertEquals(decorated_function1, _test_function)
self.assertEquals(decorated_function2, _test_function2)
self.assertEquals(('nameA', 'nameB'), decorated_function1._tf_api_names)
self.assertEquals(('nameC', 'nameD'), decorated_function2._tf_api_names)
def testExportClasses(self):
export_decorator_a = tf_export.tf_export('TestClassA1')
export_decorator_a(TestClassA)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
export_decorator_b = tf_export.tf_export('TestClassB1')
export_decorator_b(TestClassB)
self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
self.assertEquals(('TestClassB1',), TestClassB._tf_api_names)
def testExportSingleConstant(self):
module1 = self._CreateMockModule('module1')
export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator.export_constant('module1', 'test_constant')
self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
module1._tf_api_constants)
def testExportMultipleConstants(self):
module1 = self._CreateMockModule('module1')
module2 = self._CreateMockModule('module2')
test_constant1 = 123
test_constant2 = 'abc'
test_constant3 = 0.5
export_decorator1 = tf_export.tf_export('NAME_A', 'NAME_B')
export_decorator2 = tf_export.tf_export('NAME_C', 'NAME_D')
export_decorator3 = tf_export.tf_export('NAME_E', 'NAME_F')
export_decorator1.export_constant('module1', test_constant1)
export_decorator2.export_constant('module2', test_constant2)
export_decorator3.export_constant('module2', test_constant3)
self.assertEquals([(('NAME_A', 'NAME_B'), 123)],
module1._tf_api_constants)
self.assertEquals([(('NAME_C', 'NAME_D'), 'abc'),
(('NAME_E', 'NAME_F'), 0.5)],
module2._tf_api_constants)
def testRaisesExceptionIfAlreadyHasAPINames(self):
_test_function._tf_api_names = ['abc']
export_decorator = tf_export.tf_export('nameA', 'nameB')
with self.assertRaises(tf_export.SymbolAlreadyExposedError):
export_decorator(_test_function)
def testRaisesExceptionIfInvalidSymbolName(self):
# TensorFlow code is not allowed to export symbols under package
# tf.estimator
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('estimator.invalid')
# All symbols exported by Estimator must be under tf.estimator package.
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('Estimator.invalid')
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('invalid.estimator')
def testRaisesExceptionIfInvalidV1SymbolName(self):
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.tf_export('valid', v1=['estimator.invalid'])
with self.assertRaises(tf_export.InvalidSymbolNameError):
tf_export.estimator_export('estimator.valid', v1=['invalid'])
def testOverridesFunction(self):
_test_function2._tf_api_names = ['abc']
export_decorator = tf_export.tf_export(
'nameA', 'nameB', overrides=[_test_function2])
export_decorator(_test_function)
# _test_function overrides _test_function2. So, _tf_api_names
# should be removed from _test_function2.
self.assertFalse(hasattr(_test_function2, '_tf_api_names'))
def testMultipleDecorators(self):
def get_wrapper(func):
def wrapper(*unused_args, **unused_kwargs):
pass
return tf_decorator.make_decorator(func, wrapper)
decorated_function = get_wrapper(_test_function)
export_decorator = tf_export.tf_export('nameA', 'nameB')
exported_function = export_decorator(decorated_function)
self.assertEquals(decorated_function, exported_function)
self.assertEquals(('nameA', 'nameB'), _test_function._tf_api_names)
if __name__ == '__main__':
test.main()
|
hehongliang/tensorflow
|
tensorflow/python/util/tf_export_test.py
|
Python
|
apache-2.0
| 6,573 | 0.005021 |
#!/usr/bin/env python3
# sudo apt-get install python3-tk
# This file is part of PFunc. PFunc provides a set of simple tools for users
# to analyze preference functions and other function-valued traits.
#
# Copyright 2016-2022 Joseph Kilmer
#
# PFunc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PFunc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import statements
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import tkinter.font as tkFont
from sys import argv
from sys import platform
from os import getcwd
from os import environ
from os import listdir
from os import path
from math import log10
from math import ceil as ceiling
import shelve
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt # must come after matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as FigureCanvas
from matplotlib.figure import Figure
from datetime import datetime
# If using matplotlib 2+, make it look like matplotlib 1.5.x
if int(matplotlib.__version__.split('.')[0]) >= 2:
matplotlib.style.use('classic')
# For opening the PDF help file:
if platform == 'win32':
from os import startfile
else:
import subprocess
# For finding R on the system:
try:
import rpy2.robjects as robjects # must come after matplotlib or numpy
environ['R_HOME']
except:
custom_path = '0'
if 'PFuncPath.txt' in listdir():
with open('PFuncPath.txt') as pathfile:
lines = pathfile.readlines()
for l in lines:
if l[0:11] == 'custom_path':
custom_path = str(l[12:-1])
break
if custom_path == '0':
if platform == 'win32' and 'R' in listdir('C:\\Program Files'):
r_versions = []
for d in listdir('C:\\Program Files\\R'):
if d[0:2] == 'R-':
r_versions.append(d)
custom_path = 'C:\\Program Files\\R\\' + r_versions[-1]
elif platform == 'darwin':
custom_path = '/Library/Frameworks/R.framework/Resources'
elif platform == 'linux':
custom_path = '/usr/bin'
environ['R_HOME'] = custom_path
environ['R_USER'] = path.dirname(path.realpath(argv[0]))
import rpy2.robjects as robjects
r = robjects.r
class PrefFunc():
'''This is the base-level data structure for the program. Each PrefFunc
object corresponds to an individual in the dataset. This is called when
opening a new file and when creating group-level splines.
As input, it takes a dataframe that originated in R, and the names of
a bunch of different variables that act as settings for generating splines.
'''
def __init__(self, r_data_frame, id_number, smoothing_value, current_sp,
sp_lim, sp_min, sp_max,
loc_peak, peak_min, peak_max,
tol_type, tol_drop, tol_absolute, tol_mode,
tol_floor, strength_mode, spline_type='individual'):
self.smoothing_value = smoothing_value
self.current_sp = current_sp
self.sp_lim = sp_lim
self.sp_min = sp_min
self.sp_max = sp_max
self.loc_peak = loc_peak
self.peak_min = peak_min
self.peak_max = peak_max
self.tol_type = tol_type
self.tol_drop = tol_drop
self.tol_absolute = tol_absolute
self.tol_mode = tol_mode
self.tol_floor = tol_floor
self.strength_mode = strength_mode
self.r_data_frame = r_data_frame
self.id_number = id_number
self.type = spline_type
self.sp_status = 'magenta' # magenta = default, cyan = adjusted
self.update()
self.name = r('names(%s)[2]' % self.r_data_frame.r_repr())[0]
self.data_x = r('curr.func$data.x')
self.data_y = r('curr.func$data.y')
self.page = ((self.id_number - 1) // 9) + 1
self.slot = ((self.id_number - 1) % 9) + 1
self.background = 'white'
if self.type == 'group':
self.constituents = r('mydf')
self.background = '#ffff99'
self.name = r('names(%s)[3]' % self.r_data_frame.r_repr())[0]
def update(self):
self.generate_spline()
self.populate_stats()
def generate_spline(self):
if self.tol_type.get() == 'relative':
instance_drop = self.tol_drop.get()
instance_floor = self.tol_floor.get()
elif self.tol_type.get() == 'absolute':
instance_drop = 1
instance_floor = self.tol_absolute.get()
if self.loc_peak.get() == 0:
instance_peak = '1'
elif self.loc_peak.get() == 1:
instance_peak = 'c(%s, %s)' % (self.peak_min.get(),
self.peak_max.get())
if self.sp_status == 'magenta':
self.reset_sp()
if self.type == 'group':
r("ind.data <- %s[2:3]" % self.r_data_frame.r_repr())
else:
r("ind.data <- %s" % self.r_data_frame.r_repr())
r("""curr.func <- PFunc(ind.data, 2, %s, peak.within = %s,
drop = %s, tol.mode = '%s',
sp.binding = %d, min.sp = %s, max.sp = %s,
graph.se = TRUE,
forgui = TRUE, tol.floor = %s
)""" % (self.smoothing_value.get(),
instance_peak, instance_drop, self.tol_mode.get(),
self.sp_lim.get(), self.sp_min.get(), self.sp_max.get(),
instance_floor))
r("master.gam.list[[%s]] <- curr.func$gam.object" % self.id_number)
def populate_stats(self):
self.spline_x = r('curr.func$stimulus')
self.spline_y = r('curr.func$response')
self.se = r('curr.func$se')
self.peak_pref = ('%s' % r('curr.func$peak.preference')).split()[1]
self.peak_resp = ('%s' % r('curr.func$peak.response')).split()[1]
self.broad_tolerance = ('%s' % r('curr.func$broad.tol')).split()[1]
self.strict_tolerance = ('%s' % r('curr.func$strict.tol')).split()[1]
self.broad_tolerance_points = r('curr.func$broad.tol.points')
self.strict_tolerance_points = r('curr.func$strict.tol.points')
self.tolerance_height = ('%s' % r('curr.func$tol.height')).split()[1]
self.hd_strength = ('%s' % r('curr.func$hd.strength')).split()[1]
self.hi_strength = ('%s' % r('curr.func$hi.strength')).split()[1]
self.responsiveness = ('%s' % r('curr.func$responsiveness')).split()[1]
self.axes_ranges = r('range.bundle') # min.x, max.x, min.y, max.y
self.smoothing_value.set((
'%s' % r('curr.func$smoothing.parameter')).split()[1])
self.is_flat = r('curr.func$is.flat')
def stiffen(self):
'''Increase the smoothing parameter'''
self.smoothing_value.set(self.increment_sp(by=0.1))
self.sp_status = 'cyan'
self.update()
self.current_sp.set(self.smoothing_value.get())
def loosen(self):
'''Decrease the smoothing parameter'''
self.smoothing_value.set(self.increment_sp(by=-0.1))
self.sp_status = 'cyan'
self.update()
self.current_sp.set(self.smoothing_value.get())
def reset_sp(self):
'''Reset the smoothing parameter to the default value'''
self.smoothing_value.set('-1')
self.sp_status = 'none' # Protection against infinite loops in update
self.update()
self.sp_status = 'magenta'
def increment_sp(self, by):
'''Adjust the smoothing parameter by one step up or down.
Steps are logarithmic.
'''
current_sp = float(self.current_sp.get())
log_sp_val = log10(current_sp)
round_log_sp_val = round(log_sp_val, 1)
new_sp_val = round(10 ** (round_log_sp_val + by), 6)
return str(new_sp_val)
def update_peak(self):
'''Update just the peak of the preference function, without running the
whole PFunc function in R again.
'''
previous_peak = self.peak_pref
if self.loc_peak.get() == 0:
instance_peak = '1'
elif self.loc_peak.get() == 1:
instance_peak = 'c(%s, %s)' % (self.peak_min.get(),
self.peak_max.get())
peak_bundle = r('''Peak(input.stimuli = %s,
preference.function = master.gam.list[[%s]],
peak.within = %s,
is.flat = %s)
''' % (self.data_x.r_repr(),
self.id_number,
instance_peak,
self.is_flat.r_repr()))
self.peak_pref = ('%s' % r('%s$peak.preference'
% peak_bundle.r_repr())).split()[1]
self.peak_resp = ('%s' % r('%s$peak.response'
% peak_bundle.r_repr())).split()[1]
if self.tol_mode.get() == 'strict' and previous_peak != self.peak_pref:
self.update_tolerance()
def update_tolerance(self):
'''Update just the tolerance of the preference function, without
running the whole PFunc function in R again.
'''
if self.tol_type.get() == 'relative':
instance_drop = self.tol_drop.get()
instance_floor = self.tol_floor.get()
elif self.tol_type.get() == 'absolute':
instance_drop = 1
instance_floor = self.tol_absolute.get()
r('''temp.stim.values <- data.frame(stimulus = %s)
temp.peak.bundle <- list(peak.preference = %s,
peak.response = %s,
predicting.stimuli = temp.stim.values,
predicted.response = as.vector(%s),
max.stim = max(temp.stim.values),
min.stim = min(temp.stim.values))
''' % (self.spline_x.r_repr(),
self.peak_pref,
self.peak_resp,
self.spline_y.r_repr()
)
)
tolerance_bundle = r('''Tolerance(drop = %s,
peak.bundle = temp.peak.bundle,
is.flat = %s,
preference.function =
master.gam.list[[%s]],
tol.floor = %s)
''' % (instance_drop,
self.is_flat.r_repr(),
self.id_number,
instance_floor))
self.broad_tolerance = ('%s' % r('%s$broad.tolerance'
% tolerance_bundle.r_repr())).split()[1]
self.strict_tolerance = ('%s' % r('%s$strict.tolerance'
% tolerance_bundle.r_repr())).split()[1]
self.broad_tolerance_points = r('%s$cross.points'
% tolerance_bundle.r_repr())
self.strict_tolerance_points = r('%s$strict.points'
% tolerance_bundle.r_repr())
self.tolerance_height = ('%s' % r('%s$tolerance.height'
% tolerance_bundle.r_repr())).split()[1]
class GraphArea(Frame):
'''Contains everything in the main viewing window of PFunc, including
the welcome screen and the graphs.
Input is particular pieces of display data as well as the names of
variables controlled by View settings.
'''
def __init__(self, individual_dict, current_col, current_page,
view_names, view_pts, view_pandtol, view_spline, view_se,
tol_mode, input_font, parent=None, **kw):
Frame.__init__(self, parent, relief=SUNKEN, bd=1)
self.current_col = current_col
self.recent_col = IntVar()
self.current_page = current_page
self.view_names = view_names
self.view_pts = view_pts
self.view_pandtol = view_pandtol
self.view_spline = view_spline
self.view_se = view_se
self.tol_mode = tol_mode
self.input_font = input_font
self.parent = parent
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.individual_dict = individual_dict
self.page_dict = {}
self.individual_slot_dict = {}
self.slot_dict = {}
self.tcid_tols = {}
self.wrapper = Frame(self)
self.wrapper.grid(row=0, column=0, sticky=NSEW)
self.wrapper.columnconfigure(0, weight=1)
self.wrapper.rowconfigure(0, weight=1)
self.create_welcome()
self.build_page_controls()
self.fig = ''
self.fig_canvas = ''
self.cid = ''
self.current_slot = ''
self.recent_slot = ''
self.num_pages = 0
def create_welcome(self):
self.welcome_canvas = Canvas(self.wrapper, height=550, width=550,
bg='white')
self.Welcome_text = self.welcome_canvas.create_text(
275, 225, text='Welcome to PFunc', font=('Helvetica', 36))
self.instruction_text = self.welcome_canvas.create_text(
275, 275, text='Open a data file to begin.',
font=('Helvetica', 12))
self.copyright_text1 = 'Copyright (C) 2016, 2017 Joseph Kilmer'
self.copyright_text2 = ('PFunc is distributed under the GNU General '
'Public License v3. See About in the Help '
'menu for a summary of GPLv3.\nTo view the '
'full license, see the accompanying file '
'called COPYING.txt or visit '
'http://www.gnu.org/licenses/.')
if platform == 'darwin':
self.copyright1 = self.welcome_canvas.create_text(
275, 500, text=self.copyright_text1,
font=('Helvetica', 10), justify=CENTER)
self.copyright2 = self.welcome_canvas.create_text(
275, 530, text=self.copyright_text2,
font=('Helvetica', 9), justify=CENTER)
else:
self.copyright1 = self.welcome_canvas.create_text(
275, 500, text=self.copyright_text1,
font=('Helvetica', 8), justify=CENTER)
self.copyright2 = self.welcome_canvas.create_text(
275, 530, text=self.copyright_text2,
font=('Helvetica', 7), justify=CENTER)
self.welcome_canvas.grid(row=0, column=0, sticky=NSEW)
self.view = 'welcome'
def loading_screen(self):
if self.view == 'welcome':
self.welcome_canvas.destroy()
else:
self.wrapper.destroy()
self.wrapper = Frame(self)
self.wrapper.grid(row=0, column=0, sticky=NSEW)
self.wrapper.columnconfigure(0, weight=1)
self.wrapper.rowconfigure(0, weight=1)
self.loading_canvas = Canvas(self.wrapper, height=550, width=550,
bg='gray75')
self.loading_text = self.loading_canvas.create_text(
275, 225, text='Loading...', font=('Helvetica', 24))
self.loading_text2 = self.loading_canvas.create_text(
275, 260, text='This may take several seconds.', font=('Helvetica', 12))
self.loading_canvas.lift(self.loading_text)
self.loading_canvas.grid(row=0, column=0, sticky=NSEW)
self.loading_canvas.update_idletasks()
self.view = 'loading'
def mini_graphs(self, page, and_deselect=True):
'''Display 3x3 grid of preference function graphs for a given page.'''
try:
self.parent.config(cursor='wait')
except:
self.parent.config(cursor='watch')
self.parent.update()
self.view = 'mini'
self.slot_dict.clear()
self.tcid_tols.clear()
self.individual_slot_dict.clear()
self.wrapper.destroy()
self.wrapper = Frame(self)
self.wrapper.grid(row=0, column=0, sticky=NSEW)
self.wrapper.columnconfigure(0, weight=1)
self.wrapper.rowconfigure(0, weight=1)
if self.first_page_butt.cget('state') == DISABLED:
self.first_page_butt.configure(state=NORMAL)
self.back_page_butt.configure(state=NORMAL)
self.page_num_ent.configure(state=NORMAL)
self.page_total.configure(state=NORMAL)
self.next_page_butt.configure(state=NORMAL)
self.last_page_butt.configure(state=NORMAL)
self.fig = Figure(figsize=(7, 7))
self.fig.subplots_adjust(top=0.95, right=0.95, bottom=0.12, hspace=0.4,
wspace=0.3)
self.fig_canvas = FigureCanvas(self.fig, master=self)
self.cid = self.fig_canvas.mpl_connect('button_press_event',
self.mini_graph_click)
self.fig_canvas.get_tk_widget().grid(row=0, column=0, sticky=NSEW)
# This is what creates the individual graphs:
counter = 1
for i in self.page_dict[page]:
individual = self.individual_dict[i]
if int(matplotlib.__version__.split('.')[0]) >= 2:
self.slot_dict[counter] = self.fig.add_subplot(
'33%d' % counter, facecolor=individual.background)
else:
self.slot_dict[counter] = self.fig.add_subplot(
'33%d' % counter, axisbg=individual.background)
slot = self.slot_dict[counter]
slot.tick_params(labelsize=10, top=False, right=False)
slot.spines['top'].set_visible(False)
slot.spines['right'].set_visible(False)
self.tcid_tols[str(slot.axes)] = counter
self.individual_slot_dict[counter] = i
self.draw_graph(slot, individual)
counter += 1
self.fig.text(0.05, 0.45, 'Preference', ha='center', va='bottom',
rotation='vertical', fontsize=20)
self.fig.text(0.53, 0.02, 'Stimulus', ha='center', va='bottom',
fontsize=20)
if self.current_slot != '':
self.select_mini_graph(self.current_slot, and_deselect)
self.fig.canvas.draw()
self.parent.config(cursor='')
def mega_graph(self, column):
'''Draw one big graph for a particular individual.'''
try:
self.parent.config(cursor='wait')
except:
self.parent.config(cursor='watch')
self.parent.update()
self.view = 'mega'
self.wrapper.destroy()
self.wrapper = Frame(self)
self.wrapper.grid(row=0, column=0, sticky=NSEW)
self.fig = Figure(figsize=(7, 7), dpi=80)
self.fig.subplots_adjust(top=0.95, right=0.95, bottom=0.15, left=0.15,
hspace=0.3, wspace=0.3)
self.fig_canvas = FigureCanvas(self.fig, master=self)
self.cid = self.fig_canvas.mpl_connect('button_press_event',
self.mega_graph_click)
self.fig_canvas.get_tk_widget().grid(row=0, column=0, sticky=NSEW)
individual = self.individual_dict[column]
if int(matplotlib.__version__.split('.')[0]) >= 2:
slot = self.fig.add_subplot('111', facecolor=individual.background)
else:
slot = self.fig.add_subplot('111', axisbg=individual.background)
slot.tick_params(labelsize=20, top=False, right=False, pad=8)
slot.spines['top'].set_visible(False)
slot.spines['right'].set_visible(False)
self.current_page.set(individual.page)
self.draw_graph(slot, individual)
self.fig.text(0.05, 0.45, 'Preference', ha='center', va='bottom',
rotation='vertical', fontsize=20)
self.fig.text(0.53, 0.02, 'Stimulus', ha='center', va='bottom',
fontsize=20)
self.fig.canvas.draw()
self.parent.config(cursor='')
def mini_graph_click(self, event):
'''Defines what happens when a mini graph is clicked.
A single click either selects or deselects the graph.
A double-click expands the mini graph into a mega graph.
'''
if str(event.inaxes) == 'None':
self.deselect_mini_graph()
self.current_slot = ''
elif event.button == 1:
new_slot = self.tcid_tols[str(event.inaxes)]
if event.dblclick:
if self.current_slot == '':
self.current_col.set(self.recent_col.get())
self.current_slot = self.recent_slot
self.select_mini_graph(self.current_slot, and_deselect=False)
self.mega_graph(self.current_col.get())
self.page_total.configure(text='/ %s' %
len(self.individual_dict))
self.page_num_ent.configure(textvariable=self.current_col)
elif self.current_slot != new_slot:
self.select_mini_graph(new_slot)
self.current_slot = new_slot
self.recent_col.set(self.current_col.get())
self.recent_slot = new_slot
else:
self.recent_slot = self.current_slot
self.recent_col.set(self.current_col.get())
self.deselect_mini_graph()
self.current_slot = ''
self.fig.canvas.draw()
def mega_graph_click(self, event):
'''When a mega graph is double-clicked, the view returns to the 3x3
grid of mini graphs.
'''
if event.button == 1 and event.dblclick:
self.mini_graphs(self.current_page.get(), and_deselect=False)
self.fig.canvas.draw()
self.page_total.configure(text='/ %s' % self.num_pages)
self.page_num_ent.configure(textvariable=self.current_page)
def select_mini_graph(self, new_slot, and_deselect=True):
'''Draws a box around a mini graph and displays its stats when the
mini graph is selected.
'''
if and_deselect:
self.deselect_mini_graph()
if new_slot != '':
self.slot_dict[new_slot].spines['bottom'].set_linewidth(2.0)
self.slot_dict[new_slot].spines['left'].set_linewidth(2.0)
self.slot_dict[new_slot].spines['top'].set_linewidth(2.0)
self.slot_dict[new_slot].spines['right'].set_linewidth(2.0)
self.slot_dict[new_slot].spines['top'].set_visible(True)
self.slot_dict[new_slot].spines['right'].set_visible(True)
self.current_col.set(self.individual_slot_dict[new_slot])
self.event_generate('<<update_sp>>')
self.event_generate('<<update_summary>>')
def deselect_mini_graph(self):
'''Removes the box around the graph and clears the stat display when
a mini graph is deselected.
'''
if self.current_slot != '':
self.slot_dict[
self.current_slot].spines['bottom'].set_linewidth(1.0)
self.slot_dict[self.current_slot].spines['left'].set_linewidth(1.0)
self.slot_dict[self.current_slot].spines['top'].set_visible(False)
self.slot_dict[
self.current_slot].spines['right'].set_visible(False)
self.current_slot = ''
self.current_col.set(0)
self.event_generate('<<clear_display>>')
def update_graph(self):
try:
self.parent.config(cursor='wait')
except:
self.parent.config(cursor='watch')
self.parent.update()
if self.view == 'mini':
self.update_mini_graph()
elif self.view == 'mega':
self.update_mega_graph()
self.parent.config(cursor='')
def update_mini_graph(self):
'''Draws a new graph in response to changes in settings or smoothing
parameter.
'''
slot = self.current_slot
if slot != '':
slot_item = self.slot_dict[slot]
slot_item.clear()
slot_item.tick_params(labelsize=10, top=False, right=False)
individual = self.individual_dict[self.current_col.get()]
self.tcid_tols[str(slot_item.axes)] = slot
self.individual_slot_dict[slot] = self.current_col.get()
self.draw_graph(slot_item, individual)
self.fig.canvas.draw()
def update_mega_graph(self):
'''Draws a new graph in response to changes in settings or smoothing
parameter.
'''
self.fig.clf()
individual = self.individual_dict[self.current_col.get()]
if int(matplotlib.__version__.split('.')[0]) >= 2:
slot = self.fig.add_subplot('111', facecolor=individual.background)
else:
slot = self.fig.add_subplot('111', axisbg=individual.background)
slot.tick_params(labelsize=20, top=False, right=False, pad=8)
slot.spines['top'].set_visible(False)
slot.spines['right'].set_visible(False)
self.draw_graph(slot, individual)
self.fig.text(0.05, 0.45, 'Preference', ha='center', va='bottom',
rotation='vertical', fontsize=20)
self.fig.text(0.53, 0.02, 'Stimulus', ha='center', va='bottom',
fontsize=20)
self.fig.canvas.draw()
def draw_graph(self, slot, individual):
'''Draw a single graph, either in the mini view or the mega view.'''
slot.axis(list(individual.axes_ranges))
if self.view == 'mini':
pt_size = 5
plt.setp(slot.xaxis.get_majorticklabels(), rotation=60)
elif self.view == 'mega':
pt_size = 10
if self.view_pts.get() == 1 and individual.type == 'individual':
slot.plot(individual.data_x, individual.data_y, 'k.',
markersize=pt_size)
elif self.view_pts.get() == 1 and individual.type == 'group':
n_constit = int(r("""
tempdf <- %s
length(levels(as.factor(tempdf$names)))
""" % individual.r_data_frame.r_repr())[0])
for i in range(0, n_constit):
r("current.subset.name <- levels(as.factor(tempdf$names))[%d]"
% (i + 1))
r("""current.subset.rows <- which(tempdf$names
== current.subset.name)""")
constx = r("tempdf[current.subset.rows, 2]").r_repr()[2: -1]
constx = eval('[' + constx + ']')
consty = r("tempdf[current.subset.rows, 3]").r_repr()[2: -1]
consty = eval('[' + consty + ']')
slot.plot(constx, consty, color='#cc99ff', linestyle='solid')
if self.view_pandtol.get() == 1:
if individual.peak_pref != 'NA':
slot.plot([individual.peak_pref, individual.peak_pref],
[individual.axes_ranges[2], individual.peak_resp],
'r-')
if self.tol_mode.get() == 'broad':
current_tolerance_points = individual.broad_tolerance_points
elif self.tol_mode.get() == 'strict':
current_tolerance_points = individual.strict_tolerance_points
for t in range(0, len(current_tolerance_points), 2):
slot.plot([current_tolerance_points[t],
current_tolerance_points[t+1]],
[individual.tolerance_height,
individual.tolerance_height], 'b-')
# for t in range(0, len(individual.tolerance_points), 2):
# slot.plot([individual.tolerance_points[t],
# individual.tolerance_points[t+1]],
# [individual.tolerance_height,
# individual.tolerance_height], 'b-')
if self.view_spline.get() == 1:
slot.plot(individual.spline_x, individual.spline_y, 'k-')
if self.view_se.get() == 1:
upper_se = []
lower_se = []
for i in range(len(individual.se)):
upper_se.append(individual.spline_y[i] + individual.se[i])
lower_se.append(individual.spline_y[i] - individual.se[i])
slot.plot(individual.spline_x, upper_se, color='#666666',
linestyle='dashed')
slot.plot(individual.spline_x, lower_se, color='#666666',
linestyle='dashed')
if self.view_names.get() == 1 and self.view == 'mini':
slot.set_title(individual.name, size='small')
elif self.view_names.get() == 1 and self.view == 'mega':
slot.set_title(individual.name, size='large')
minx = individual.axes_ranges[0]
maxx = individual.axes_ranges[1]
miny = individual.axes_ranges[2]
maxy = individual.axes_ranges[3]
dotx = minx - ((maxx - minx) / 10)
doty = miny - ((maxy - miny) / 10)
dottype = individual.sp_status
slot.plot(dotx, doty, color=dottype, marker='.',
markersize=(pt_size*2), clip_on=False)
slot.plot(dotx, doty, color='black', marker='o', fillstyle='none',
markersize=(pt_size), clip_on=False)
def build_page_controls(self):
'''Initialize the nav buttons at the bottom of the display area.'''
self.page_controls = Frame(self)
self.page_controls.grid(row=1, column=0, sticky=EW+S)
self.page_controls.columnconfigure(0, weight=1)
self.page_controls.columnconfigure(7, weight=1)
if platform == 'darwin':
pd = [8, 8, 0] # padx first&last, padx back&next, pady for all
else:
pd = [1, 4, 0]
self.first_page_butt = Button(self.page_controls,
text='|<<', padx=pd[0],
pady=pd[2], state=DISABLED,
command=self.first_page)
self.first_page_butt.grid(row=0, column=1)
self.back_page_butt = Button(self.page_controls, text='<', padx=pd[1],
pady=pd[2], state=DISABLED,
command=self.back_page)
self.back_page_butt.grid(row=0, column=2)
self.page_num_ent = Entry(self.page_controls, width=3, justify=RIGHT,
textvariable=self.current_page,
font=self.input_font)
self.page_num_ent.grid(row=0, column=3)
self.page_num_ent.bind('<Return>', self.enter_page_number)
self.page_num_ent.configure(state=DISABLED)
self.page_total = Label(self.page_controls, text='/ 0', state=DISABLED)
self.page_total.grid(row=0, column=4)
self.next_page_butt = Button(self.page_controls, text='>', padx=pd[1],
pady=pd[2], state=DISABLED,
command=self.next_page)
self.next_page_butt.grid(row=0, column=5)
self.last_page_butt = Button(self.page_controls, text='>>|',
padx=pd[0], pady=pd[2], state=DISABLED,
command=self.last_page)
self.last_page_butt.grid(row=0, column=6)
def first_page(self):
'''Jump to the first page'''
if self.view == 'mini' and self.current_page.get() > 1:
self.current_page.set(1)
self.current_slot = ''
self.mini_graphs(self.current_page.get())
self.event_generate('<<clear_display>>')
elif self.view == 'mega' and self.current_col.get() > 1:
self.current_col.set(1)
self.recent_col.set(1)
self.mega_graph(self.current_col.get())
individual = self.individual_dict[self.current_col.get()]
self.current_page.set(individual.page)
self.current_slot = individual.slot
self.recent_slot = individual.slot
self.event_generate('<<update_summary>>')
self.event_generate('<<update_sp>>')
def back_page(self):
'''Go back one page'''
if self.view == 'mini' and self.current_page.get() > 1:
self.current_page.set(self.current_page.get() - 1)
self.current_slot = ''
self.mini_graphs(self.current_page.get())
self.event_generate('<<clear_display>>')
elif self.view == 'mega' and self.current_col.get() > 1:
self.current_col.set(self.current_col.get() - 1)
self.recent_col.set(self.current_col.get() - 1)
self.mega_graph(self.current_col.get())
individual = self.individual_dict[self.current_col.get()]
self.current_page.set(individual.page)
self.current_slot = individual.slot
self.recent_slot = individual.slot
self.event_generate('<<update_summary>>')
self.event_generate('<<update_sp>>')
def next_page(self):
'''Go forward one page'''
num_pages = len(self.page_dict)
num_ind = len(self.individual_dict)
if self.view == 'mini' and self.current_page.get() < num_pages:
self.current_page.set(self.current_page.get() + 1)
self.current_slot = ''
self.mini_graphs(self.current_page.get())
self.event_generate('<<clear_display>>')
elif self.view == 'mega' and self.current_col.get() < num_ind:
self.current_col.set(self.current_col.get() + 1)
self.recent_col.set(self.current_col.get() + 1)
self.mega_graph(self.current_col.get())
individual = self.individual_dict[self.current_col.get()]
self.current_page.set(individual.page)
self.current_slot = individual.slot
self.recent_slot = individual.slot
self.event_generate('<<update_summary>>')
self.event_generate('<<update_sp>>')
def last_page(self):
'''Jump ahead to the last page'''
num_pages = len(self.page_dict)
num_ind = len(self.individual_dict)
if self.view == 'mini' and self.current_page.get() < num_pages:
self.current_page.set(num_pages)
self.current_slot = ''
self.mini_graphs(self.current_page.get())
self.event_generate('<<clear_display>>')
elif self.view == 'mega' and self.current_col.get() < num_ind:
self.current_col.set(num_ind)
self.recent_col.set(num_ind)
self.mega_graph(self.current_col.get())
individual = self.individual_dict[self.current_col.get()]
self.current_page.set(individual.page)
self.current_slot = individual.slot
self.recent_slot = individual.slot
self.event_generate('<<update_summary>>')
self.event_generate('<<update_sp>>')
def enter_page_number(self, event):
'''Executed when the widget is active and the Return key is pressed.
The graph view updates to the new page specified in the text box.
'''
if self.view == 'mini':
if self.current_page.get() < 1:
self.current_page.set(1)
if self.current_page.get() > self.num_pages:
self.current_page.set(self.num_pages)
self.deselect_mini_graph()
self.mini_graphs(self.current_page.get())
elif self.view == 'mega':
if self.current_col.get() < 1:
self.current_col.set(1)
if self.current_col.get() > len(self.individual_dict):
self.current_col.set(len(self.individual_dict))
self.mega_graph(self.current_col.get())
self.event_generate('<<update_summary>>')
self.event_generate('<<update_sp>>')
class SmoothingBox(LabelFrame):
'''The frame for displaying and controlling the smoothing parameter.'''
def __init__(self, parent=None, text='Smoothing', padx=2, pady=2,
heading_font='TkDefaultFont', input_font='TkDefaultFont',
row=0, column=0, current_sp='', platform=platform,
**kw):
LabelFrame.__init__(self, parent, text=text,
padx=padx, pady=pady,
font=heading_font)
self.grid(row=row, column=column, sticky=EW)
self.columnconfigure(3, weight=1)
self.sp_ent = Entry(self, width=10, textvariable=current_sp,
state=DISABLED, font=input_font)
self.sp_ent.grid(row=0, column=0, sticky=W)
if platform == 'win32':
self.sp_dn = Button(self, text='-', width=2,
command=self.loosen_event,
pady=0, state=DISABLED)
self.sp_up = Button(self, text='+',
command=self.stiffen_event,
width=2, pady=0, state=DISABLED)
self.reset_butt = Button(self, text='reset', pady=0,
command=self.reset_sp_event,
state=DISABLED, width=8)
elif platform == 'darwin':
self.sp_dn = Button(self, text='-', command=self.loosen_event,
padx=8, pady=0, state=DISABLED)
self.sp_up = Button(self, text='+', command=self.stiffen_event,
padx=8, pady=0, state=DISABLED)
self.reset_butt = Button(self, text='reset', pady=0,
command=self.reset_sp_event,
state=DISABLED)
else:
self.sp_dn = Button(self, text='-', command=self.loosen_event,
padx=4, pady=0, state=DISABLED)
self.sp_up = Button(self, text='+', command=self.stiffen_event,
padx=2, pady=0, state=DISABLED)
self.reset_butt = Button(self, text='reset', pady=0,
command=self.reset_sp_event,
state=DISABLED, padx=4)
self.sp_dn.grid(row=0, column=1, sticky=E)
self.sp_up.grid(row=0, column=2)
self.reset_butt.grid(row=0, column=3, sticky=NSEW)
self.sp_ent.bind('<Return>', self.enter_sp)
def loosen_event(self):
self.event_generate('<<loosen>>')
def stiffen_event(self):
self.event_generate('<<stiffen>>')
def reset_sp_event(self):
self.event_generate('<<reset_sp>>')
def activate(self):
self.sp_ent.configure(state=NORMAL)
self.sp_up.configure(state=NORMAL)
self.sp_dn.configure(state=NORMAL)
self.reset_butt.configure(state=NORMAL)
def enter_sp(self, event):
self.event_generate('<<enter_sp>>')
class SummaryBox(LabelFrame):
'''The frame that displays the spline summary for the currently
selected individual.
'''
def __init__(self, parent=None, text='Summary', padx=2, pady=2,
heading_font='TkDefaultFont', summary_font='TkDefaultFont',
row=0, column=0, **kw):
LabelFrame.__init__(self, parent, text=text,
padx=padx, pady=pady,
font=heading_font)
self.grid(row=row, column=column, sticky=EW)
self.columnconfigure(0, weight=1)
self.summary_text = ('Peak Preference: \n'
'Peak Height: \n'
'Tolerance: \n'
'Strength: \n'
'Responsiveness: \n'
'Smoothing: ')
self.summary_window = Text(self, height=6, width=25,
font=summary_font)
self.summary_window.grid(row=0, column=0, sticky=EW)
self.summary_window.insert(END, self.summary_text)
self.summary_window.configure(state=DISABLED)
def update_summary(self, individual=None,
strength_mode=None, tol_mode=None):
self.summary_window.configure(state=NORMAL)
self.summary_window.delete(1.17, '1.end')
self.summary_window.delete(2.13, '2.end')
self.summary_window.delete(3.11, '3.end')
self.summary_window.delete('4.10', '4.end')
self.summary_window.delete(5.16, '5.end')
self.summary_window.delete(6.11, '6.end')
if individual is not None:
self.summary_window.insert(1.17, individual.peak_pref)
self.summary_window.insert(2.13, individual.peak_resp)
# self.summary_window.insert(3.11, individual.tolerance)
if tol_mode.get() == 'broad':
self.summary_window.insert('3.11', individual.broad_tolerance)
elif tol_mode.get() == 'strict':
self.summary_window.insert('3.11', individual.strict_tolerance)
if strength_mode.get() == 'Height-Dependent':
self.summary_window.insert('4.10', individual.hd_strength)
elif strength_mode.get() == 'Height-Independent':
self.summary_window.insert('4.10', individual.hi_strength)
self.summary_window.insert(5.16, individual.responsiveness)
self.summary_window.insert(6.11, individual.smoothing_value.get())
self.summary_window.configure(state=DISABLED)
class ViewBoxItem(Frame):
'''A single entry in the View Settings frame.'''
def __init__(self, parent=None, text='', variable=None,
pady=0, row=0, column=0, **kw):
Frame.__init__(self, parent, padx=0, pady=0)
self.grid(row=row, column=column, sticky=EW)
self.v_box = Checkbutton(
self, variable=variable, pady=pady,
command=lambda: self.event_generate('<<update_all_graphs>>'))
self.v_box.grid(row=0, column=0)
self.v_label = Label(self, text=text, pady=pady)
self.v_label.grid(row=0, column=1, sticky=W)
class ViewBox(LabelFrame):
'''The frame that contains settings for toggling particular graphical
elements in the graphs.
'''
def __init__(self, parent=None, text='View', padx=2, pady=0,
heading_font='TkDefaultFont',
view_names_var=None, view_pts_var=None,
view_pandtol_var=None, view_spline_var=None,
view_se_var=None, row=0, column=0, **kw):
LabelFrame.__init__(self, parent, text=text,
padx=padx, pady=pady,
font=heading_font)
self.grid(row=row, column=column, sticky=EW)
self.v_names = ViewBoxItem(self, text='Names', pady=pady,
row=0, variable=view_names_var)
self.v_datapts = ViewBoxItem(self, text='Data Points', pady=pady,
row=1, variable=view_pts_var)
self.v_pktol = ViewBoxItem(self, text='Peak & Tolerance', pady=pady,
row=2, variable=view_pandtol_var)
self.v_splines = ViewBoxItem(self, text='Splines', pady=pady,
row=3, variable=view_spline_var)
self.v_se = ViewBoxItem(self, text='Standard Error', pady=pady,
row=4, variable=view_se_var)
class SmoothingLimitsBox(LabelFrame):
'''The frame that allows users to control the minimum and maximum values for
smoothing parameters.
'''
def __init__(self, parent=None, text='Smoothing Limits', padx=2, pady=2,
heading_font='TkDefaultFont', input_font='TkDefaultFont',
row=0, column=0, sp_lim_state=None, sp_min=None,
sp_max=None, **kw):
self.sp_lim_state = sp_lim_state
self.sp_lim_title_frame = Frame()
LabelFrame.__init__(self, parent, labelwidget=self.sp_lim_title_frame,
padx=padx, pady=pady, font=heading_font)
self.grid(row=row, column=column, sticky=EW)
self.sp_lim_name = Label(self.sp_lim_title_frame,
text=text, font=heading_font)
self.sp_lim_name.grid(row=0, column=0)
self.sp_lim_box = Checkbutton(self.sp_lim_title_frame,
variable=sp_lim_state,
command=self.sp_lim_toggle)
self.sp_lim_box.grid(row=0, column=1)
self.sp_lim_min_lab = Label(self, text=' Min')
self.sp_lim_min_lab.grid(row=0, column=0, sticky=W)
self.sp_lim_min_ent = Entry(self, width=4,
textvariable=sp_min, font=input_font)
self.sp_lim_min_ent.grid(row=0, column=1, sticky=W)
self.sp_lim_max_lab = Label(self, text=' Max')
self.sp_lim_max_lab.grid(row=0, column=2, sticky=W)
self.sp_lim_max_ent = Entry(self, width=4,
textvariable=sp_max, font=input_font)
self.sp_lim_max_ent.grid(row=0, column=3, sticky=W)
self.sp_lim_min_ent.bind('<Return>', self.enter_sp_lim)
self.sp_lim_max_ent.bind('<Return>', self.enter_sp_lim)
def sp_lim_toggle(self, andupdate=TRUE):
if self.sp_lim_state.get() == 1:
self.sp_lim_min_ent.configure(state=NORMAL)
self.sp_lim_min_lab.configure(state=NORMAL)
self.sp_lim_max_ent.configure(state=NORMAL)
self.sp_lim_max_lab.configure(state=NORMAL)
elif self.sp_lim_state.get() == 0:
self.sp_lim_min_ent.configure(state=DISABLED)
self.sp_lim_min_lab.configure(state=DISABLED)
self.sp_lim_max_ent.configure(state=DISABLED)
self.sp_lim_max_lab.configure(state=DISABLED)
if andupdate:
self.event_generate('<<update_magenta_graphs>>')
def enter_sp_lim(self, event):
self.event_generate('<<update_magenta_graphs>>')
class LocalPeakBox(LabelFrame):
'''The frame that allows users to specify the stimulus range to search
within for local peaks in splines.
'''
def __init__(self, loc_peak_state, peak_min, peak_max,
parent=None, text='Find Local Peak', padx=2, pady=2,
heading_font='TkDefaultFont', input_font='TkDefaultFont',
row=0, column=0, **kw):
self.loc_peak_state = loc_peak_state
self.peak_title_frame = Frame()
LabelFrame.__init__(self, parent, labelwidget=self.peak_title_frame,
padx=padx, pady=pady)
self.grid(row=row, column=column, sticky=EW)
self.peak_name = Label(self.peak_title_frame, text=text,
font=heading_font)
self.peak_name.grid(row=0, column=0)
self.peak_box = Checkbutton(self.peak_title_frame,
variable=loc_peak_state,
command=self.loc_peak_toggle)
self.peak_box.grid(row=0, column=1)
self.peak_btwn_lab1 = Label(self, text=' Between')
self.peak_btwn_lab1.grid(row=0, column=0)
self.peak_btwn_ent1 = Entry(self, width=4,
textvariable=peak_min,
font=input_font, state=DISABLED)
self.peak_btwn_ent1.grid(row=0, column=1, sticky=W)
self.peak_btwn_lab2 = Label(self, text='and')
self.peak_btwn_lab2.grid(row=0, column=2)
self.peak_btwn_ent2 = Entry(self, width=4,
textvariable=peak_max,
font=input_font, state=DISABLED)
self.peak_btwn_ent2.grid(row=0, column=3, sticky=W)
self.peak_btwn_ent1.bind('<Return>', self.enter_peak_btwn)
self.peak_btwn_ent2.bind('<Return>', self.enter_peak_btwn)
def loc_peak_toggle(self, andupdate=TRUE):
if self.loc_peak_state.get() == 1:
self.peak_btwn_ent1.configure(state=NORMAL)
self.peak_btwn_ent2.configure(state=NORMAL)
elif self.loc_peak_state.get() == 0:
self.peak_btwn_ent1.configure(state=DISABLED)
self.peak_btwn_ent2.configure(state=DISABLED)
if andupdate:
#self.event_generate('<<update_all_graphs>>')
self.event_generate('<<update_all_peaks>>')
self.event_generate('<<update_summary>>')
def enter_peak_btwn(self, event):
#self.event_generate('<<update_all_graphs>>')
self.event_generate('<<update_all_peaks>>')
self.event_generate('<<update_summary>>')
class ToleranceBox(LabelFrame):
'''The frame containing controls for Tolerance.'''
def __init__(self, tol_type, tol_drop, tol_floor, tol_absolute, tol_mode,
parent=None, text='Tolerance', padx=2, pady=0,
heading_font='TkDefaultFont', input_font='TkDefaultFont',
row=0, column=0, **kw):
LabelFrame.__init__(self, parent, text=text, padx=padx, pady=pady,
font=heading_font)
self.grid(row=row, column=column, sticky=EW)
self.tol_type = tol_type
self.tol_drop = tol_drop
self.tol_floor = tol_floor
self.tol_absolute = tol_absolute
self.tol_mode = tol_mode
self.tol_rel_sel = Radiobutton(self,
variable=self.tol_type,
value='relative',
command=self.change_tol_type)
self.tol_rel_sel.grid(row=0, column=0, sticky=E)
self.tol_rel_lab = Label(self, text='Drop from peak')
self.tol_rel_lab.grid(row=0, column=1, sticky=W)
self.tol_rel_ent = Entry(self, width=5,
textvariable=self.tol_drop, font=input_font)
self.tol_rel_ent.grid(row=0, column=2, sticky=W)
self.tol_floor_lab = Label(self, text='Floor')
self.tol_floor_lab.grid(row=1, column=1, sticky=E)
self.tol_floor_ent = Entry(self, width=5,
textvariable=self.tol_floor,
font=input_font)
self.tol_floor_ent.grid(row=1, column=2, sticky=W)
self.tol_abs_sel = Radiobutton(self,
variable=self.tol_type,
value='absolute',
command=self.change_tol_type)
self.tol_abs_sel.grid(row=2, column=0, sticky=E)
self.tol_abs_zone = Frame(self)
self.tol_abs_zone.grid(row=2, column=1, columnspan=2, sticky=W)
self.tol_abs_lab = Label(self.tol_abs_zone, text='At set value')
self.tol_abs_lab.grid(row=0, column=0, sticky=W)
self.tol_abs_ent = Entry(self.tol_abs_zone, width=5,
textvariable=self.tol_absolute,
font=input_font)
self.tol_abs_ent.grid(row=0, column=1, sticky=W)
self.tol_mode_zone = Frame(self)
self.tol_mode_zone.grid(row=3, column=0, sticky=W, columnspan=3)
self.tol_mode_lab = Label(self.tol_mode_zone, text='Mode')
self.tol_mode_lab.grid(row=0, column=0, sticky=E)
self.tol_mode_broad = Radiobutton(
self.tol_mode_zone, text='Broad', variable=self.tol_mode,
value='broad',
command=self.change_tol_mode)
# command=lambda: self.event_generate('<<update_all_graphs>>'))
self.tol_mode_broad.grid(row=0, column=1, sticky=W)
self.tol_mode_stct = Radiobutton(
self.tol_mode_zone, text='Strict', variable=tol_mode,
value='strict',
command=self.change_tol_mode)
self.tol_mode_stct.grid(row=0, column=2, sticky=W)
self.tol_rel_ent.bind('<Return>', self.enter_tol_setting)
self.tol_floor_ent.bind('<Return>', self.enter_tol_setting)
self.tol_abs_ent.bind('<Return>', self.enter_tol_setting)
self.change_tol_type(andupdate=False)
def change_tol_type(self, andupdate=True):
if self.tol_type.get() == 'relative':
self.tol_rel_lab.configure(state=NORMAL)
self.tol_rel_ent.configure(state=NORMAL)
self.tol_floor_lab.configure(state=NORMAL)
self.tol_floor_ent.configure(state=NORMAL)
self.tol_abs_lab.configure(state=DISABLED)
self.tol_abs_ent.configure(state=DISABLED)
elif self.tol_type.get() == 'absolute':
self.tol_rel_lab.configure(state=DISABLED)
self.tol_rel_ent.configure(state=DISABLED)
self.tol_floor_lab.configure(state=DISABLED)
self.tol_floor_ent.configure(state=DISABLED)
self.tol_abs_lab.configure(state=NORMAL)
self.tol_abs_ent.configure(state=NORMAL)
if andupdate:
#self.event_generate('<<update_all_graphs>>')
self.event_generate('<<update_all_tolerances>>')
self.event_generate('<<update_summary>>')
def enter_tol_setting(self, event):
#self.event_generate('<<update_all_graphs>>')
self.event_generate('<<update_all_tolerances>>')
self.event_generate('<<update_summary>>')
def change_tol_mode(self):
self.event_generate('<<update_all_graphs>>')
self.event_generate('<<update_summary>>')
class StrengthBox(LabelFrame):
'''The frame that allows users to change between Strength types.'''
def __init__(self, strength_mode,
parent=None, text='Strength', padx=2, pady=2,
heading_font='TkDefaultFont', row=0, column=0, **kw):
LabelFrame.__init__(self, parent, text=text, padx=padx, pady=pady,
font=heading_font)
self.grid(row=row, column=column, sticky=EW)
self.columnconfigure(0, weight=1)
self.strength_options = ('Height-Dependent', 'Height-Independent')
self.strength_selector = OptionMenu(
self, strength_mode, *self.strength_options,
command=self.update_summary_event)
self.strength_selector.grid(row=0, column=0, sticky=EW)
def update_summary_event(self, strength_option):
self.event_generate('<<update_summary>>')
class ControlPanel(Frame):
'''Control Panel contains all the stat readouts and the adjustable
settings, including the smoothing parameter box, the summary box, the view
settings, etc.
Inputs include variable names for all of the settings.
'''
def __init__(self, heading_font, input_font, summary_font, current_sp,
view_names, view_pts, view_pandtol, view_spline, view_se,
sp_lim, sp_min, sp_max,
loc_peak, peak_min, peak_max,
tol_type, tol_drop, tol_floor, tol_absolute, tol_mode,
strength_mode, parent=None, platform=platform, **kw):
Frame.__init__(self, parent, relief=SUNKEN, bd=1, padx=7, pady=7)
self.smoothing_box = SmoothingBox(self, heading_font=heading_font,
input_font=input_font,
current_sp=current_sp, row=0)
self.summary_box = SummaryBox(self, row=1, heading_font=heading_font,
summary_font=summary_font)
spacer_text = '--Settings--'
if platform == 'linux':
spacer_text = '\n' + spacer_text
self.set_lab = Label(self, text=spacer_text, pady=0, font=heading_font)
self.set_lab.grid(row=2, column=0)
self.view_box = ViewBox(self, row=3,
view_names_var=view_names,
view_pts_var=view_pts,
view_pandtol_var=view_pandtol,
view_spline_var=view_spline,
view_se_var=view_se,
heading_font=heading_font)
self.smoothing_limits_box = SmoothingLimitsBox(
self, row=4, heading_font=heading_font, input_font=input_font,
sp_lim_state=sp_lim, sp_min=sp_min, sp_max=sp_max)
self.peak_box = LocalPeakBox(parent=self, loc_peak_state=loc_peak,
peak_min=peak_min, peak_max=peak_max,
heading_font=heading_font,
input_font=input_font, row=5)
self.tolerance_box = ToleranceBox(parent=self, tol_type=tol_type,
tol_drop=tol_drop,
tol_floor=tol_floor,
tol_absolute=tol_absolute,
tol_mode=tol_mode,
heading_font=heading_font,
input_font=input_font,
row=6)
self.strength_box = StrengthBox(parent=self,
strength_mode=strength_mode,
heading_font=heading_font, row=7)
def update_summary(self, individual=None,
strength_mode=None, tol_mode=None):
self.summary_box.update_summary(individual, strength_mode, tol_mode)
def activate(self):
self.smoothing_box.activate()
self.active_mode = 'activated'
class FileMenu(Menubutton):
'''Defines the File menu at the top of the screen (and accompanying
functions).
'''
def __init__(self, file_opt, parent=None, row=0, column=0):
Menubutton.__init__(self, parent, text='File')
self.grid(row=row, column=column, sticky=W)
self.file_opt = file_opt
self.parent = parent
self.primary_menu = Menu(self, tearoff=0)
self.open_menu = Menu(self, tearoff=0)
self.open_menu.add_command(label='Horizontal...',
command=self.open_horizontal_file)
self.open_menu.add_command(label='Vertical...',
command=self.open_vertical_file)
self.primary_menu.add_cascade(label='Open Data File',
menu=self.open_menu)
self.primary_menu.add_separator()
self.primary_menu.add_command(label='Load Smoothing Values...',
command=self.open_sp,
state=DISABLED)
self.primary_menu.add_command(label='Save Smoothing Values...',
command=self.save_sp,
state=DISABLED)
self.primary_menu.add_command(label='Clear Smoothing Values',
command=self.clear_sps,
state=DISABLED)
self.primary_menu.add_separator()
self.primary_menu.add_command(label='Load Previous Settings',
command=self.open_sett)
self.primary_menu.add_command(label='Save Current Settings',
command=self.save_sett)
self.primary_menu.add_command(label='Restore Default Settings',
command=self.reset_sett)
self.primary_menu.add_separator()
self.primary_menu.add_command(label='Output Spline Figures...',
command=self.output_graphs,
state=DISABLED)
self.primary_menu.add_command(label='Output Spline Summaries...',
command=self.output_summaries,
state=DISABLED)
self.primary_menu.add_command(label='Output Spline Points...',
command=self.output_points,
state=DISABLED)
self.primary_menu.add_command(label='Output Tolerance Points...',
command=self.output_tol,
state=DISABLED)
self.primary_menu.add_separator()
self.primary_menu.add_command(label='Quit', command=self.quit)
self['menu'] = self.primary_menu
def activate_menu_options(self):
self.primary_menu.entryconfigure(2, state=NORMAL)
self.primary_menu.entryconfigure(3, state=NORMAL)
self.primary_menu.entryconfigure(4, state=NORMAL)
self.primary_menu.entryconfigure(10, state=NORMAL)
self.primary_menu.entryconfigure(11, state=NORMAL)
self.primary_menu.entryconfigure(12, state=NORMAL)
self.primary_menu.entryconfigure(13, state=NORMAL)
def _check_missing_stim(self, is_vertical=0):
'''Used when opening a new file. Checks whether any x-axis values
are missing.
'''
r = robjects.r
if not is_vertical:
stim_column = '1'
else:
stim_column = 'stim.column'
missing_stim = int(r('as.numeric(InCheck(NA, mydata[, %s]))'
% stim_column)[0])
if missing_stim:
error_text = ("Could not open the data file because there "
"seems to be one or more missing stimulus "
"values.")
messagebox.showerror('Error', error_text)
self.event_generate('<<add_message>>', x=106)
return False
else:
return True
def _check_num_datapoints(self, is_vertical):
'''Used when opening a new file. Checks whether there are enough data
points.
'''
r = robjects.r
r("minimum_datapoints <- 10")
if not is_vertical:
min_pts = int(r("""for (n in 1:length(name.vect)) {
response <- mydata[, (n + 1)]
num_datapoints = sum(!is.na(response))
minimum_datapoints <- min(minimum_datapoints,
num_datapoints)
}
return(minimum_datapoints)
""")[0])
else:
min_pts = int(r("""for (n in 1:length(name.vect)) {
response <- mydata[, resp.column][which(
mydata[, id.column] == name.vect[n])]
num_datapoints = sum(!is.na(response))
minimum_datapoints <- min(minimum_datapoints,
num_datapoints)
}
return(minimum_datapoints)
""")[0])
if min_pts < 3:
self.event_generate('<<add_message>>', x=103)
error_text = ("Not enough data to work with. PFunc needs a "
"minimum of three data points to make a single "
"spline. Make sure that each individual has at "
"least three responses.\n\n"
"If you are trying to make a preference function "
"by combining responses from different individuals, "
"then you should group those individuals together "
"in your data file. See README file "
"(or Help) for more."
)
messagebox.showerror('Error', error_text)
return False
return True
def check_data_formatting(self, datafile=None):
'''Used when opening a new data file. Checks whether file is .csv'''
if datafile is None:
return False
data_formatted_correctly = int(r("""mydata <- read.csv("%s")
if (ncol(mydata) == 1){
mydata <- read.delim("%s")
}
if (ncol(mydata) == 1){
return("0")
} else {
return("1")
}
""" % (datafile.name,
datafile.name))[0])
if not data_formatted_correctly:
error_text = ("The data file you selected is not formatted "
"correctly.\n\nMake sure it is saved as a "
".csv file.")
messagebox.showerror('Error', error_text)
self.event_generate('<<add_message>>', x=104)
return False
else:
return True
def open_horizontal_file(self):
datafile = filedialog.askopenfile(mode='r', **self.file_opt)
if self.check_data_formatting(datafile):
r = robjects.r
r("name.vect = names(mydata)[2: ncol(mydata)]")
is_vertical = 0
if (self._check_missing_stim(is_vertical) &
self._check_num_datapoints(is_vertical)):
r("""
max.resp <- max(mydata[ , 2:ncol(mydata)], na.rm = TRUE)
min.resp <- min(mydata[ , 2:ncol(mydata)], na.rm = TRUE)
resp.range <- max.resp - min.resp
max.y <- max.resp + (0.0375 * resp.range * 2)
min.y <- min.resp - (0.0375 * resp.range * 1)
max.stim <- max(mydata[ , 1], na.rm = TRUE)
min.stim <- min(mydata[ , 1], na.rm = TRUE)
stim.range <- max.stim - min.stim
max.x <- max.stim + (0.0375 * stim.range * 1)
min.x <- min.stim - (0.0375 * stim.range * 1)
range.bundle <- c(min.x, max.x, min.y, max.y)
""")
self.event_generate('<<open_data_file>>', x=is_vertical)
def open_vertical_file(self):
datafile = filedialog.askopenfile(mode='r', **self.file_opt)
if self.check_data_formatting(datafile):
self.id_column = StringVar()
self.stim_column = StringVar()
self.resp_column = StringVar()
popup = DataDefiner(datafile, self.id_column, self.stim_column,
self.resp_column, return_to=self,
parent=self.parent.parent)
def open_vertical_file2(self):
r = robjects.r
r("id.column <- which(names(mydata) == '%s')" % self.id_column.get())
r("stim.column <- which(names(mydata) == '%s')"
% self.stim_column.get())
r("resp.column <- which(names(mydata) == '%s')"
% self.resp_column.get())
r("""name.vect <- vector()
for (r in 1:nrow(mydata)){
ind.id <- as.character(mydata[, id.column][r])
if (!InCheck(ind.id, name.vect)){
name.vect = append(name.vect, ind.id)
}
}
""")
is_vertical = 1
if (self._check_missing_stim(is_vertical) &
self._check_num_datapoints(is_vertical)):
r("""
max.resp <- max(mydata[, resp.column])
min.resp <- min(mydata[, resp.column])
resp.range <- max.resp - min.resp
max.y <- max.resp + 0.0375 * resp.range
min.y <- min.resp - 0.0375 * resp.range
max.stim <- max(mydata[, stim.column])
min.stim <- min(mydata[, stim.column])
stim.range <- max.stim - min.stim
max.x <- max.stim + 0.0375 * stim.range
min.x <- min.stim - 0.0375 * stim.range
range.bundle <- c(min.x, max.x, min.y, max.y)
""")
self.event_generate('<<open_data_file>>', x=is_vertical)
def open_sp(self):
self.event_generate('<<open_smoothing_file>>')
def save_sp(self):
self.event_generate('<<save_smoothing_values>>')
def clear_sps(self):
self.event_generate('<<clear_smoothing_values>>')
def open_sett(self):
self.event_generate('<<load_settings>>')
def save_sett(self):
self.event_generate('<<save_settings>>')
def reset_sett(self):
self.event_generate('<<reset_settings>>')
def output_graphs(self):
self.event_generate('<<output_graphs>>')
def output_summaries(self):
self.event_generate('<<output_summaries>>')
def output_points(self):
self.event_generate('<<output_points>>')
def output_tol(self):
self.event_generate('<<output_tol>>')
def quit(self):
self.event_generate('<<quit>>')
class AdvancedMenu(Menubutton):
'''Defines the Advanced menu at the top of the screen (and accompanying
functions).
'''
def __init__(self, parent=None, row=0, column=0):
Menubutton.__init__(self, parent, text='Advanced')
self.grid(row=row, column=column, sticky=W)
self.primary_menu = Menu(self, tearoff=0)
self.primary_menu.add_command(label='Show Message Log',
command=self.message_log)
self.primary_menu.add_command(label='Construct Group-Level Spline...',
command=self.construct_group_spline,
state=DISABLED)
self['menu'] = self.primary_menu
def activate_menu_options(self):
self.primary_menu.entryconfigure(1, state=NORMAL)
def message_log(self):
self.event_generate('<<open_message_log>>')
def construct_group_spline(self):
self.event_generate('<<open_group_spline_window>>')
class HelpMenu(Menubutton):
'''Defines the Help menu at the top of the screen (and accompanying
functions).
'''
def __init__(self, parent=None, row=0, column=0):
Menubutton.__init__(self, parent, text='Help')
self.grid(row=row, column=column, sticky=W)
self.primary_menu = Menu(self, tearoff=0)
self.primary_menu.add_command(label='Help', command=self.open_help)
self.primary_menu.add_command(label='About', command=self.about_window)
self['menu'] = self.primary_menu
def about_window(self):
self.event_generate('<<create_about_window>>')
def open_help(self):
if 'README.pdf' in listdir():
if platform == 'win32':
startfile('README.pdf')
elif platform == 'darwin':
subprocess.call(['open', 'README.pdf'])
print("hi")
else: # linux
subprocess.call(['xdg-open', 'README.pdf'])
else:
warning_text = ("PFunc failed to locate and open README.pdf. "
"You can download a copy of this help file "
"from github.com/joccalor/pfunc")
self.warning = messagebox.showwarning('Warning', warning_text)
class MenuBar(Frame):
'''Defines the entire menu bar at the top of the screen.'''
def __init__(self, file_opt, parent=None, row=0, column=0):
Frame.__init__(self, parent)
self.parent = parent
self.grid(row=row, column=column, sticky=EW, columnspan=2)
self.columnconfigure(3, weight=1)
self.file_menu = FileMenu(parent=self, file_opt=file_opt)
self.advc_menu = AdvancedMenu(self, column=1)
self.help_menu = HelpMenu(self, column=2)
def activate(self):
self.file_menu.activate_menu_options()
self.advc_menu.activate_menu_options()
class PFuncToplevel(Toplevel):
'''A generic popup window for PFunc (a superclass)'''
def __init__(self, parent=None, **kw):
Toplevel.__init__(self, parent, takefocus=True, **kw)
try:
img = PhotoImage(file='PFuncIcon.gif')
self.tk.call('wm', 'iconphoto', self._w, img)
except:
a = 1
class DataDefiner(PFuncToplevel):
'''Used in opening a vertical file. Asks users to specify which columns
of the data contain certain data types.
'''
def __init__(self, datafile, id_column, stim_column, resp_column,
return_to, parent=None, **kw):
PFuncToplevel.__init__(self, parent)
self.return_to = return_to
self.datafile = datafile
self.transient(parent)
rootWd = int(parent.winfo_width()) / 4
rootHt = int(parent.winfo_height()) / 3
self.XPos = int(parent.winfo_geometry().split('+')[1]) + rootWd
self.YPos = int(parent.winfo_geometry().split('+')[2]) + rootHt
self.geometry('+%d+%d' % (self.XPos, self.YPos))
self.name_label = Label(self, text='Individual IDs: ')
self.name_label.grid(row=0, column=0)
self.xdata_label = Label(self, text='Stimuli (x-axis): ')
self.xdata_label.grid(row=1, column=0)
self.ydata_label = Label(self, text='Responses (y-axis): ')
self.ydata_label.grid(row=2, column=0)
self.column_names = list(r("names(mydata)"))
self.id_column = id_column
self.stim_column = stim_column
self.resp_column = resp_column
self.id_column.set('Select')
self.stim_column.set('Select')
self.resp_column.set('Select')
self.column_menu1 = OptionMenu(self, self.id_column,
*self.column_names)
self.column_menu1.grid(row=0, column=1)
self.column_menu2 = OptionMenu(self, self.stim_column,
*self.column_names)
self.column_menu2.grid(row=1, column=1)
self.column_menu3 = OptionMenu(self, self.resp_column,
*self.column_names)
self.column_menu3.grid(row=2, column=1)
self.spacer = Frame(self)
self.spacer.grid(row=3, column=0, columnspan=2)
self.okay_butt = Button(self, text='Okay', command=self.okay)
self.okay_butt.grid(row=4, column=0)
self.cancel_butt = Button(self, text='Cancel', command=self.cancel)
self.cancel_butt.grid(row=4, column=1)
self.column_defs = {'name': self.id_column,
'stim': self.stim_column,
'resp': self.resp_column}
def cancel(self):
self.destroy()
def okay(self):
if self.id_column.get() != 'Select' and\
self.id_column.get() != self.stim_column.get() and\
self.id_column.get() != self.resp_column.get() and\
self.stim_column.get() != 'Select' and\
self.stim_column.get() != self.resp_column.get() and\
self.resp_column.get() != 'Select':
self.destroy()
self.return_to.open_vertical_file2()
else:
warning_text = ("You must select columns in your data that "
"correspond to each of these three categories.")
self.warning = messagebox.showwarning('Warning', warning_text)
class GroupSplineWindow(PFuncToplevel):
'''Used for combining multiple splines into one group-level spline. Users
tell PFunc which individuals should be part of the group.
'''
def __init__(self, parent, individual_dict, combomode, input_font, **kw):
self.parent = parent
PFuncToplevel.__init__(self, self.parent)
self.transient(self.parent)
self.individual_dict = individual_dict
self.combomode = combomode
self.columnconfigure(0, weight=1)
self.rowconfigure(2, weight=1)
rootWd = int(parent.winfo_width()) / 2
rootHt = int(parent.winfo_height()) / 2
reqWd = int(self.winfo_reqwidth())
reqHt = int(self.winfo_reqheight())
XPos = int(parent.winfo_geometry().split('+')[1]) + rootWd - reqWd
YPos = int(parent.winfo_geometry().split('+')[2]) + rootHt - reqHt
self.geometry('+%d+%d' % (XPos, YPos))
self.newname = StringVar()
self.newname.set('spline%s' % str(len(individual_dict) + 1))
self.instructions = ("Select the individuals to be used\n"
"in this group-level spline.\n\n"
"Click and drag to select multiple\n"
"individuals at once. Hold down\n"
"ctrl to add or subtract individuals\n"
"from your selection.")
self.instruction_box = Label(self, text=self.instructions,
justify=LEFT, padx=5, pady=5)
self.instruction_box.grid(row=0, column=0, sticky=W)
self.namebox = Frame(self, pady=10, padx=20)
self.namebox.grid(row=1, column=0, sticky=EW)
self.namebox.columnconfigure(1, weight=1)
self.newname_lab = Label(self.namebox, text='Name')
self.newname_lab.grid(row=1, column=0, sticky=EW)
self.newname_ent = Entry(self.namebox, textvariable=self.newname,
width=15, font=input_font)
self.newname_ent.grid(row=1, column=1, sticky=W)
self.listframe = Frame(self, padx=20)
self.listframe.grid(row=2, column=0, sticky=NSEW)
self.listframe.columnconfigure(0, weight=1)
self.listframe.rowconfigure(0, weight=1)
self.namestring = ''
for i in self.individual_dict:
self.namestring = (self.namestring +
self.individual_dict[i].name + ' ')
self.namestring = self.namestring[:-1]
self.names = StringVar()
self.names.set(self.namestring)
self.listscroll = Scrollbar(self.listframe, orient=VERTICAL)
self.listscroll.grid(row=0, column=1, sticky=NS+W)
self.listbox = Listbox(self.listframe, listvariable=self.names,
height=15, selectmode=EXTENDED,
yscrollcommand=self.listscroll.set,
font=input_font)
self.listbox.grid(row=0, column=0, sticky=NSEW)
self.listscroll['command'] = self.listbox.yview
self.okayframe = Frame(self, padx=20, pady=5)
self.okayframe.grid(row=3, column=0)
self.okay_butt = Button(self.okayframe, text='Okay', command=self.okay)
self.okay_butt.grid(row=0, column=0, sticky=E)
self.cancel_butt = Button(self.okayframe, text='Cancel',
command=self.cancel)
self.cancel_butt.grid(row=0, column=1, sticky=W)
self.event_generate('<<open_message_log>>')
def cleanup_name(self):
name = self.newname.get()
new_name = ""
alphabet = "abcdefghijklmnopqrstuvwxyz"
replace_with_underscore = """ <>()[]{}#"'=+-!@#$%^&*`~,\|/?"""
if name[0].lower() not in alphabet:
name = 'x' + name[:]
for c in name:
if c in replace_with_underscore:
new_name += '_'
else:
new_name += c
self.newname.set(new_name)
def cancel(self):
self.destroy()
def okay(self):
self.cleanup_name()
self.output_dict = {'name': self.newname.get(), 'individual_nums': [],
'method': self.combomode.get(), }
for i in self.listbox.curselection():
self.output_dict['individual_nums'].append(i+1)
numsExist = len(self.output_dict['individual_nums']) > 0
namesExist = len(self.output_dict['name']) > 0
if numsExist and namesExist:
self.combine_spline()
self.destroy()
def combine_spline(self):
r('mylist <- list()')
for i in self.listbox.curselection():
tempind = self.individual_dict[i + 1]
tempx = str(tempind.spline_x.r_repr())
tempy = str(tempind.spline_y.r_repr())
r("""mylist$%s <- list('xvals' = %s,
'yvals' = %s)""" % (tempind.name, tempx, tempy))
if self.combomode.get() == 'none':
r("""
xvalues <- vector()
yvalues <- vector()
names <- vector()
for (i in 1:length(mylist)) {
xvalues <- c(xvalues, mylist[[i]]$xvals)
yvalues <- c(yvalues, mylist[[i]]$yvals)
names <- c(names, rep(names(mylist)[i],
length(mylist[[i]]$xvals)))
}
mydf <- data.frame(names=names, xvalues=xvalues,
%s=yvalues, stringsAsFactors=FALSE)
""" % self.newname.get())
else:
r("""
mydf <- data.frame(xvals=NA)
for(i in mylist){
for(j in i$xvals){
if(InCheck(j, mydf$xvals)){
} else {
mydf <- rbind(mydf, j)
}
}
}
for(i in 1:length(mylist)){
mydf <- cbind(mydf, NA)
names(mydf)[i+1] <- paste('col', i, sep='')
for(j in 1:length(mylist[[i]]$xvals)){
row <- which(mydf$xvals == mylist[[i]]$xvals[j])
mydf[row, i+1] <- mylist[[i]]$yvals[j]
}
}
mydf <- mydf[2:nrow(mydf), ]
mydf <- cbind(mydf, NA, NA)
names(mydf)[length(names(mydf))-1] <- "n"
names(mydf)[length(names(mydf))] <- "%s"
for(i in 1:nrow(mydf)){
jvec <- vector()
for(j in 2:(ncol(mydf)-2)){
if(!is.na(mydf[i, j])){
jvec[length(jvec) + 1] <- mydf[i, j]
}
mydf$n[i] <- length(jvec)
mydf$%s[i] <- %s(jvec)
}
}
""" % (self.newname.get(), self.newname.get(),
self.combomode.get()))
self.parent.event_generate('<<add_group_spline>>')
class PFuncMessages(PFuncToplevel):
'''Defines the popup window of messages that users can access from the
Advanced menu.
'''
def __init__(self, parent, messages, *kw):
self.parent = parent
PFuncToplevel.__init__(self, self.parent)
self.messages = messages
self.title('PFunc Message Log')
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.logArea = Text(self, height=8, width=32, wrap=WORD)
self.logArea.insert(END, self.messages.get())
self.logArea.grid(row=0, column=0, sticky=NSEW)
self.logArea.tag_add('message_tag', '@0,0', END)
self.logArea.tag_config('message_tag', lmargin2='32p')
self.logArea.see(END)
self.logScroll = Scrollbar(self, orient=VERTICAL,
command=self.logArea.yview)
self.logScroll.grid(row=0, column=1, sticky=NS+E)
self.logArea['yscrollcommand'] = self.logScroll.set
self.logArea.configure(state=DISABLED)
self._establish_placement()
def _establish_placement(self):
screenWd = int(self.parent.winfo_screenwidth())
reqWd = int(self.winfo_reqwidth())
reqHt = int(self.winfo_reqheight())
rootWd = int(self.parent.winfo_width())
rootHt = int(self.parent.winfo_height()) / 2
root_leftbound = int(self.parent.winfo_geometry().split('+')[1])
root_rightbound = int(root_leftbound + rootWd)
left_space = root_leftbound
right_space = screenWd - root_rightbound
if right_space > left_space:
xOption1 = root_rightbound + 20
xOption2 = screenWd - reqWd - 20
xPos = min(xOption1, xOption2)
else:
xOption1 = 20
xOption2 = root_leftbound - reqWd - 50
xPos = max(xOption1, xOption2)
yPos = int(self.parent.winfo_geometry().split('+')[2]) + rootHt - reqHt
self.geometry('+%d+%d' % (xPos, yPos))
def add_message(self, message_string):
self.logArea.configure(state=NORMAL)
self.logArea.insert(END, message_string)
self.logArea.tag_add('message_tag', '@0,0', END)
self.logArea.tag_config('message_tag', lmargin2='32p')
self.logArea.see(END)
self.logArea.configure(state=DISABLED)
class AboutWindow(PFuncToplevel):
def __init__(self, parent, title_font, *kw):
PFuncToplevel.__init__(self, padx=5, pady=5)
self.parent = parent
self.title_font = title_font
self.transient()
self.title('About PFunc')
self.initiate_text()
self.place_elements()
self.set_geometry()
def initiate_text(self):
self.title_text = 'PFunc'
self.subtitle_text = ('A tool for analyzing preference functions and '
'other function-valued traits.\n')
self.version_text = 'version 1.0.0 \n (2017-05-18)\n'
self.copyright_text = ('Copyright (C) 2016, 2017 Joseph Kilmer \n\n'
'PFunc is distributed under the GNU General '
'Public License v3. A full copy of\n'
'the license is available in the accompanying '
'file called COPYING.txt.\n\n'
'PFunc is free software: you can redistribute '
'it and/or modify it under the\n'
'terms of the GNU General Public License as '
'published by the Free Software\n'
'Foundation, either version 3 of the License, '
'or (at your option) any\n'
'later version.\n\n'
'PFunc is distributed in the hope that it will '
'be useful, but WITHOUT ANY\n'
'WARRANTY; without even the implied warranty '
'of MERCHANTABILITY or FITNESS FOR\n'
'A PARTICULAR PURPOSE. See the GNU General '
'Public License for more details.\n\n'
'You should have received a copy of the GNU '
'General Public License along with\n'
'this program. If not, see '
'http://www.gnu.org/licenses/.\n')
def place_elements(self):
try:
img = PhotoImage(file='PFuncIcon.gif')
self.pfunc_logo = Label(self, image=img)
self.pfunc_logo.image = img
self.pfunc_logo.grid(row=0, column=0)
except:
a = 1
self.title = Label(self, text=self.title_text, font=self.title_font)
self.title.grid(row=1, column=0)
self.subtitle = Label(self, text=self.subtitle_text)
self.subtitle.grid(row=2, column=0)
self.version = Label(self, text=self.version_text)
self.version.grid(row=3, column=0)
self.copyright = Label(self, text=self.copyright_text)
self.copyright.grid(row=4, column=0)
self.closebutton = Button(self, text='Close', command=self.destroy)
self.closebutton.grid(row=5, column=0)
def set_geometry(self):
rootWd = int(self.parent.root.winfo_width()) / 2
rootHt = int(self.parent.root.winfo_height()) / 2
reqWd = int(self.winfo_reqwidth())
reqHt = int(self.winfo_reqheight())
XPos = (int(self.parent.root.winfo_geometry().split('+')[1]) +
rootWd - reqWd)
YPos = (int(self.parent.root.winfo_geometry().split('+')[2]) +
rootHt - reqHt)
self.geometry('+%d+%d' % (XPos, YPos))
class MainApp():
'''This is the wrapper for the whole program. It contains and governs
all the individual pieces.
'''
def __init__(self):
self.root = Tk()
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
self._setup_fonts()
self._setup_dicts()
self._setup_variables()
self._setup_message_lookup()
self._setup_R()
self._setup_file_opt()
self._setup_event_bindings()
self._setup_window_geometry()
self.settings_to_default()
self.menu_bar = MenuBar(file_opt=self.file_opt, parent=self.root)
self.graph_zone = GraphArea(self.individual_dict, self.current_col,
self.current_page, self.view_names,
self.view_pts, self.view_pandtol,
self.view_spline, self.view_se,
self.tol_mode,
self.input_font, parent=self.root)
self.graph_zone.grid(row=1, column=0, sticky=NSEW)
self.control_panel = ControlPanel(heading_font=self.heading_font,
input_font=self.input_font,
summary_font=self.summary_font,
current_sp=self.current_sp,
view_names=self.view_names,
view_pts=self.view_pts,
view_pandtol=self.view_pandtol,
view_spline=self.view_spline,
view_se=self.view_se,
sp_lim=self.sp_lim,
sp_min=self.sp_min,
sp_max=self.sp_max,
loc_peak=self.loc_peak,
peak_min=self.peak_min,
peak_max=self.peak_max,
tol_type=self.tol_type,
tol_drop=self.tol_drop,
tol_floor=self.tol_floor,
tol_absolute=self.tol_absolute,
tol_mode=self.tol_mode,
strength_mode=self.strength_mode,
parent=self.root)
self.control_panel.grid(row=1, column=1, sticky=NSEW)
self.root.title('PFunc')
try:
img = PhotoImage(file='PFuncIcon.gif')
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
except:
a = 1
self.root.event_generate('<<add_message>>', x=100)
self.root.config(cursor='')
def _setup_fonts(self):
self.default_font = tkFont.nametofont('TkDefaultFont')
self.small_font = self.default_font.copy()
self.heading_font = self.default_font.copy()
self.summary_font = tkFont.nametofont('TkFixedFont')
self.summary_font.configure(size=9)
self.input_font = self.default_font.copy()
self.about_font1 = self.default_font.copy()
self.small_font.configure(size=8)
self.heading_font.configure(size=9)
self.about_font1.configure(size=20)
if platform == 'win32':
self.default_font.configure(size=8)
self.small_font.configure(size=7)
self.heading_font.configure(size=8)
self.summary_font = self.default_font.copy()
elif platform == 'darwin':
self.default_font.configure(size=10)
self.small_font.configure(size=9)
self.heading_font.configure(size=11)
self.input_font.configure(size=10)
self.summary_font = self.input_font
# self.summary_font = tkFont.nametofont('TkTextFont')
# self.summary_font.configure(size=10)
else: # including platform == 'linux'
self.input_font = tkFont.nametofont('TkTextFont')
def _setup_dicts(self):
self.sp_dict = {} # A dictionary of smoothing parameters
self.individual_dict = {} # A dictionary of PrefFunc objects
def _setup_variables(self):
self.view_pts = IntVar()
self.view_pandtol = IntVar()
self.view_spline = IntVar()
self.view_names = IntVar()
self.view_se = IntVar()
self.sp_lim = IntVar()
self.sp_min = StringVar()
self.sp_max = StringVar()
self.loc_peak = IntVar()
self.peak_min = StringVar()
self.peak_max = StringVar()
self.tol_type = StringVar()
self.tol_drop = StringVar()
self.tol_absolute = StringVar()
self.tol_mode = StringVar()
self.tol_floor = StringVar()
self.strength_mode = StringVar()
self.combomode = StringVar()
self.messages = StringVar()
self.current_sp = StringVar()
self.current_page = IntVar()
self.current_col = IntVar()
self.file_type = StringVar()
self.current_page.set(0)
self.vertColResp = StringVar()
def _setup_message_lookup(self):
self.message_lookup = {}
self.message_lookup[100] = ("Welcome to PFunc. Open a data file to "
"begin. See the Help menu or the README "
"file for help.")
self.message_lookup[101] = "Cleared previous smoothing values."
self.message_lookup[102] = "Opened a new file."
self.message_lookup[103] = ("Refused to open file because "
"one or more individuals had fewer than "
"three data points.")
self.message_lookup[104] = ("Failed to open file because it was not "
"properly formatted.")
self.message_lookup[105] = ("One or more individuals in this "
"dataset have fewer than 10 data points. "
"Consider lowering the minimum smoothing "
"value limit.")
self.message_lookup[106] = ("Failed to open file because there are "
"fewer stimuli than responses.")
def _setup_R(self):
current_directory = StringVar() # For some reason it must be StrinVar
current_directory.set(getcwd())
if platform == 'win32':
current_directory.set(path.dirname(path.realpath(argv[0])))
current_directory.set(current_directory.get().replace("\\", "/"))
r("setwd('%s')" % current_directory.get())
r("source('PFunc_RCode.R')")
def _setup_file_opt(self):
self.file_opt = {}
self.file_opt['defaultextension'] = '.csv'
self.file_opt['filetypes'] = [('all files', '.*'),
('csv files', '.csv'),
('text files', '.txt')]
self.file_opt['parent'] = self.root
self.file_opt['title'] = 'Select a file...'
def _setup_event_bindings(self):
self.root.bind('<<open_data_file>>', self.open_data_file)
self.root.bind('<<update_summary>>', self.update_summary)
self.root.bind('<<clear_display>>', self.clear_display)
self.root.bind('<<update_sp>>', self.update_sp)
self.root.bind('<<loosen>>', self.loosen)
self.root.bind('<<stiffen>>', self.stiffen)
self.root.bind('<<reset_sp>>', self.reset_sp)
self.root.bind('<<enter_sp>>', self.enter_sp)
self.root.bind('<<update_all_graphs>>', self.update_all_graphs)
self.root.bind('<<update_all_peaks>>', self.update_all_peaks)
self.root.bind('<<update_all_tolerances>>', self.update_all_tolerances)
self.root.bind('<<update_magenta_graphs>>', self.update_magenta_graphs)
self.root.bind('<<open_message_log>>', self.open_message_log)
self.root.bind('<<add_message>>', self.add_message)
self.root.bind('<<open_group_spline_window>>',
self.open_group_spline_window)
self.root.bind('<<add_group_spline>>', self.add_group_spline)
self.root.bind('<<open_smoothing_file>>', self.open_smoothing_file)
self.root.bind('<<save_smoothing_values>>', self.save_smoothing_values)
self.root.bind('<<clear_smoothing_values>>',
self.clear_smoothing_values)
self.root.bind('<<load_settings>>', self.load_settings)
self.root.bind('<<save_settings>>', self.save_settings)
self.root.bind('<<reset_settings>>', self.reset_settings)
self.root.bind('<<output_graphs>>', self.output_graphs)
self.root.bind('<<output_summaries>>', self.output_summaries)
self.root.bind('<<output_points>>', self.output_points)
self.root.bind('<<output_tol>>', self.output_tol)
self.root.bind('<<quit>>', self.quit)
self.root.bind('<<create_about_window>>', self.create_about_window)
def _setup_window_geometry(self):
if platform == 'darwin':
scwd = (self.root.winfo_screenwidth() - 717) / 2
if self.root.winfo_screenheight() > 612:
scht = (self.root.winfo_screenheight() - 612) / 2
else:
scht = self.root.winfo_screenheight()
elif platform == 'win32':
scwd = (self.root.winfo_screenwidth() - 746) / 2
if self.root.winfo_screenheight() > 660:
scht = (self.root.winfo_screenheight() - 600 - 60) / 2
else:
scht = self.root.winfo_screenheight()
else:
scwd = (self.root.winfo_screenwidth() - 767) / 2
if self.root.winfo_screenheight() > 607:
scht = (self.root.winfo_screenheight() - 607) / 2
else:
scht = self.root.winfo_screenheight()
self.root.geometry('+%d+%d' % (scwd, scht))
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(1, weight=1)
def settings_to_default(self, event=None):
self.view_names.set(0)
self.view_pts.set(1)
self.view_pandtol.set(1)
self.view_spline.set(1)
self.view_se.set(0)
self.tol_type.set('relative')
self.tol_drop.set('1/3')
self.tol_absolute.set('1')
self.tol_mode.set('broad')
self.tol_floor.set('0')
self.loc_peak.set(0)
self.peak_min.set('min')
self.peak_max.set('max')
if r("InCheck('min.stim', objects())")[0]:
self.peak_min.set(r("min.stim")[0])
self.peak_max.set(r("max.stim")[0])
self.strength_mode.set('Height-Dependent')
self.sp_lim.set(1)
self.sp_min.set('0.05')
self.sp_max.set('5')
self.combomode.set('none')
def _check_num_datapoints(self):
r = robjects.r
minimum_datapoints = 10
for i in self.individual_dict.values():
r('checkdata <- %s' % i.r_data_frame.r_repr())
num_datapoints = int(r('sum(!is.na(checkdata[, 2]))')[0])
minimum_datapoints = min(minimum_datapoints, num_datapoints)
if minimum_datapoints < 10:
self.root.event_generate('<<add_message>>', x=105)
def open_data_file(self, event=None):
r = robjects.r
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
# self.root.update()
self.graph_zone.current_slot = ''
self.graph_zone.page_dict.clear()
self.graph_zone.individual_dict.clear()
self.sp_dict.clear()
r("master.gam.list <- list()")
if self.graph_zone.view == 'mini' or self.graph_zone.view == 'mega':
self.root.event_generate('<<add_message>>', x=101)
self.graph_zone.loading_screen()
if event.x == 0:
self.file_type.set('horizontal')
num_ind = r("ncol(mydata)")[0] - 1
elif event.x == 1:
self.file_type.set('vertical')
num_ind = r("length(name.vect)")[0]
self.peak_min.set(r("min.stim")[0])
self.peak_max.set(r("max.stim")[0])
for i in range(1, num_ind + 1):
self.sp_dict[i] = StringVar()
self.sp_dict[i].set('-1')
if self.file_type.get() == 'horizontal':
r("""individual_df <- data.frame(stimulus = mydata[, 1],
response = mydata[, (%s + 1)])
""" % i)
elif self.file_type.get() == 'vertical':
r("""individual_df <- data.frame(
stimulus = mydata[, stim.column][which(mydata[, id.column]
== name.vect[%d])],
response = mydata[, resp.column][which(mydata[, id.column]
== name.vect[%d])])
""" % (i, i))
individual_df = r("""
# individual_df <- data.frame(stimulus = mydata[, 1],
# response = mydata[, (%s + 1)])
#names(individual_df)[2] <- name.vect[#s]
individual_name <- name.vect[%s]
individual_name_char1 <- strsplit(individual_name, "")[[1]][1]
allowed_characters <-
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ."
allowed_characters_split <- strsplit(allowed_characters,
"")[[1]]
if (!InCheck(individual_name_char1, allowed_characters_split)){
individual_name <- paste("X", individual_name, sep="")
}
names(individual_df)[2] <- individual_name
rejector <- vector()
for (r in 1:nrow(individual_df)) {
if (is.na(individual_df[r, 2])){
rejector <- c(rejector, r)
}
}
if (length(rejector) > 0) {
individual_df <- individual_df[-rejector, ]
}
individual_df
""" % (i, i))
self.individual_dict[i] = PrefFunc(
individual_df, i, self.sp_dict[i], self.current_sp,
self.sp_lim, self.sp_min, self.sp_max,
self.loc_peak, self.peak_min, self.peak_max,
self.tol_type, self.tol_drop, self.tol_absolute, self.tol_mode,
self.tol_floor, self.strength_mode)
self.clear_display()
self.num_pages = num_ind//9
if num_ind//9 != num_ind/9:
self.num_pages += 1
self.graph_zone.num_pages = self.num_pages
for p in range(1, (self.num_pages + 1)):
if p < self.num_pages:
self.graph_zone.page_dict[p] = list(range(1+9*(p-1),
10+9*(p-1)))
else:
remaining_ind = num_ind - (p-1)*9
ind_list = []
for r in range(1, (remaining_ind + 1)):
ind_list.append(r+9*(p-1))
self.graph_zone.page_dict[p] = ind_list
self.graph_zone.mini_graphs(1)
self.graph_zone.page_total.configure(text='/ %s' % self.num_pages)
self.current_page.set(1)
self.control_panel.activate()
self.menu_bar.activate()
self.root.event_generate('<<add_message>>', x=102)
self._check_num_datapoints()
self.root.config(cursor='')
# self.root.update()
def update_summary(self, event=None):
if self.current_col.get() != 0:
current_individual = self.individual_dict[self.current_col.get()]
self.sp_dict[self.current_col.get()] = \
current_individual.smoothing_value
else:
current_individual = None
self.control_panel.update_summary(individual=current_individual,
strength_mode=self.strength_mode,
tol_mode=self.tol_mode)
def update_sp(self, event=None):
if self.current_col.get() != 0:
self.current_sp.set(self.individual_dict[
self.current_col.get()].smoothing_value.get())
else:
self.current_sp.set('')
def clear_display(self, event=None):
self.current_sp.set('')
self.current_col.set(0)
self.update_summary(event=None)
def loosen(self, event=None):
col = self.current_col.get()
if col != 0:
self.individual_dict[col].loosen()
self.update_summary(event=None)
self.graph_zone.update_graph()
def stiffen(self, event=None):
col = self.current_col.get()
if col != 0:
self.individual_dict[col].stiffen()
self.update_summary(event=None)
self.graph_zone.update_graph()
def reset_sp(self, event=None):
col = self.current_col.get()
if col != 0:
self.individual_dict[col].reset_sp()
self.graph_zone.update_graph()
self.update_summary(event=None)
self.current_sp.set(
self.individual_dict[col].smoothing_value.get())
def enter_sp(self, event=None):
col = self.current_col.get()
if col != 0:
self.sp_dict[col].set(self.current_sp.get())
self.individual_dict[col].sp_status = 'cyan'
self.individual_dict[col].update()
self.graph_zone.update_graph()
self.update_summary(event=None)
def update_all_graphs(self, event=None):
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
if self.graph_zone.view == 'mini':
self.graph_zone.mini_graphs(self.current_page.get(),
and_deselect=False)
elif self.graph_zone.view == 'mega':
self.graph_zone.mega_graph(self.current_col.get())
self.root.config(cursor='')
def update_all_peaks(self, event=None):
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
for i in self.individual_dict:
self.individual_dict[i].update_peak()
self.update_all_graphs()
self.root.config(cursor='')
def update_all_tolerances(self, event=None):
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
for i in self.individual_dict:
self.individual_dict[i].update_tolerance()
self.update_all_graphs()
self.root.config(cursor='')
def update_magenta_graphs(self, event=None):
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
if self.graph_zone.num_pages > 0:
for i in self.individual_dict:
if self.individual_dict[i].sp_status == 'magenta':
# sp_lim_on = (self.sp_lim.get() == 1)
# sp_too_small = (
# self.individual_dict[i].smoothing_value.get()
# < self.sp_min.get())
# sp_too_big = (
# self.individual_dict[i].smoothing_value.get()
# > self.sp_max.get())
# if sp_lim_on and (sp_too_small or sp_too_big):
# self.individual_dict[i].reset_sp()
# elif not sp_lim_on:
# self.individual_dict[i].reset_sp()
self.individual_dict[i].reset_sp()
self.update_summary(self.current_col.get())
if self.graph_zone.view == 'mini' and self.current_col.get() != 0:
self.graph_zone.select_mini_graph(self.graph_zone.current_slot,
and_deselect=False)
self.update_all_graphs()
self.root.config(cursor='')
def open_message_log(self, event=None):
self.logWindow = PFuncMessages(self.root, self.messages)
def add_message(self, event=None):
message_code = event.x
message_string = self.message_lookup[message_code]
current_datetime = str(datetime.now())
spc_indx = current_datetime.find(" ")
time_str = current_datetime[spc_indx + 1: spc_indx+6]
if self.messages.get() == '':
message_string = time_str + ' ' + message_string
else:
message_string = '\n' + time_str + ' ' + message_string
self.messages.set(self.messages.get() + message_string)
for child in self.root.winfo_children():
if type(child) == PFuncMessages:
child.add_message(message_string)
def open_group_spline_window(self, event=None):
group_spline_window = GroupSplineWindow(self.root,
self.individual_dict,
self.combomode,
self.input_font)
def add_group_spline(self, event=None):
newsplinedf = r('mydf')
self.sp_dict[(len(self.sp_dict) + 1)] = StringVar()
self.sp_dict[len(self.sp_dict)].set('-1')
self.individual_dict[(len(self.individual_dict) + 1)] = \
PrefFunc(newsplinedf, len(self.individual_dict) + 1,
self.sp_dict[len(self.sp_dict)], self.current_sp,
self.sp_lim, self.sp_min, self.sp_max,
self.loc_peak, self.peak_min, self.peak_max,
self.tol_type, self.tol_drop, self.tol_absolute,
self.tol_mode, self.tol_floor, self.strength_mode,
spline_type='group')
if len(self.graph_zone.page_dict[len(self.graph_zone.page_dict)]) == 9:
self.graph_zone.page_dict[len(self.graph_zone.page_dict) + 1] = []
self.graph_zone.num_pages += 1
self.graph_zone.page_total.configure(text='/ %s'
% self.graph_zone.num_pages)
self.graph_zone.page_dict[len(self.graph_zone.page_dict)].append(
len(self.individual_dict))
self.graph_zone.deselect_mini_graph()
self.graph_zone.current_slot = ''
self.current_page.set(len(self.graph_zone.page_dict))
self.graph_zone.mini_graphs(len(self.graph_zone.page_dict))
def open_smoothing_file(self, event=None):
file_opt = options = {}
options['defaultextension'] = '.csv'
options['filetypes'] = [('all files', '.*'), ('csv files', '.csv'),
('text files', '.txt')]
options['parent'] = self.root
options['title'] = 'Select a file...'
spfile = filedialog.askopenfile(mode='r', **file_opt)
if spfile is not None:
for k in self.sp_dict.keys():
self.sp_dict[k].set('-1')
lines = spfile.readlines()
spfile.close()
for l in lines:
tempind = int(l.split(',')[0])
if tempind in self.individual_dict.keys():
newsp = str(l.split(',')[1][:-1])
self.sp_dict[tempind].set(newsp)
self.individual_dict[tempind].sp_status = 'cyan'
self.individual_dict[tempind].update()
if self.graph_zone.view == 'mini':
self.graph_zone.mini_graphs(self.current_page.get(),
and_deselect=False)
self.graph_zone.fig.canvas.draw()
elif self.graph_zone.view == 'mega':
self.graph_zone.update_mega_graph()
self.update_summary()
self.current_sp.set(self.individual_dict[
self.current_col.get()].smoothing_value)
def save_smoothing_values(self, event=None):
if platform == 'win32':
ext = ''
else:
ext = '.csv'
spfile = filedialog.asksaveasfile(mode='w',
initialfile='smoothing.csv',
defaultextension=ext,
filetypes=[('all files', '.*'),
('csv files', '.csv')],
parent=self.root,
title='Save smoothing values')
if spfile is not None:
for i in self.individual_dict.values():
if i.sp_status == 'cyan':
spfile.write('%d,%s\n'
% (i.id_number, i.smoothing_value.get()))
spfile.close()
def clear_smoothing_values(self, event=None):
for i in self.individual_dict:
if self.individual_dict[i].sp_status == 'cyan':
temp_individual_id = self.individual_dict[i].id_number
self.sp_dict[temp_individual_id].set('-1')
self.individual_dict[i].update()
self.individual_dict[i].sp_status = 'magenta'
self.graph_zone.mini_graphs(self.current_page.get(),
and_deselect=False)
self.graph_zone.fig.canvas.draw()
def load_settings(self, event=None):
usrSett = shelve.open('UserSettings')
if len(usrSett) > 0:
self.view_names.set(usrSett['view_names'])
self.view_pts.set(usrSett['view_pts'])
self.view_pandtol.set(usrSett['view_pandtol'])
self.view_spline.set(usrSett['view_spline'])
self.view_se.set(usrSett['view_se'])
self.tol_type.set(usrSett['tol_type'])
self.tol_drop.set(usrSett['tol_drop'])
self.tol_absolute.set(usrSett['tol_absolute'])
self.tol_mode.set(usrSett['tol_mode'])
self.tol_floor.set(usrSett['tol_floor'])
self.loc_peak.set(usrSett['loc_peak'])
self.peak_min.set(usrSett['peak_min'])
self.peak_max.set(usrSett['peak_max'])
self.strength_mode.set(usrSett['strength_mode'])
self.sp_lim.set(usrSett['sp_lim'])
self.sp_min.set(usrSett['sp_min'])
self.sp_max.set(usrSett['sp_max'])
usrSett.close()
self.control_panel.smoothing_limits_box.sp_lim_toggle(andupdate=False)
self.control_panel.peak_box.loc_peak_toggle(andupdate=False)
self.control_panel.tolerance_box.change_tol_type(andupdate=False)
self.update_magenta_graphs()
def save_settings(self, event=None):
usrSett = shelve.open('UserSettings')
usrSett['view_names'] = self.view_names.get()
usrSett['view_pts'] = self.view_pts.get()
usrSett['view_pandtol'] = self.view_pandtol.get()
usrSett['view_spline'] = self.view_spline.get()
usrSett['view_se'] = self.view_se.get()
usrSett['tol_type'] = self.tol_type.get()
usrSett['tol_drop'] = self.tol_drop.get()
usrSett['tol_absolute'] = self.tol_absolute.get()
usrSett['tol_mode'] = self.tol_mode.get()
usrSett['tol_floor'] = self.tol_floor.get()
usrSett['loc_peak'] = self.loc_peak.get()
usrSett['peak_min'] = self.peak_min.get()
usrSett['peak_max'] = self.peak_max.get()
usrSett['strength_mode'] = self.strength_mode.get()
usrSett['sp_lim'] = self.sp_lim.get()
usrSett['sp_min'] = self.sp_min.get()
usrSett['sp_max'] = self.sp_max.get()
usrSett.close()
def reset_settings(self, event=None):
self.settings_to_default()
self.control_panel.smoothing_limits_box.sp_lim_toggle(andupdate=False)
self.control_panel.peak_box.loc_peak_toggle(andupdate=False)
self.control_panel.tolerance_box.change_tol_type(andupdate=False)
self.update_magenta_graphs()
def output_graphs(self, event=None):
'''Create a pdf, svg or eps file via R of all the graphs.
This function pays attention to the current View settengs, and so if
data points are toggled off in the PFunc GUI, they will be absent from
this output as well.
'''
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
if platform == 'win32':
ext = ''
else:
ext = '.pdf'
graphfile = filedialog.asksaveasfile(mode='w',
initialfile='spline_graphs.pdf',
defaultextension=ext,
filetypes=[('all files', '.*'),
('pdf files', '.pdf'),
('eps files', '.eps'),
('svg files', '.svg')],
parent=self.root,
title='Select a file...')
if graphfile is not None:
if graphfile.name[-4:] == '.svg':
filetype_for_r = 'svg'
elif graphfile.name[-4:] == '.eps':
filetype_for_r = 'eps'
else:
filetype_for_r = 'pdf'
if filetype_for_r == 'pdf':
r('''pdf(file = '%s', onefile = TRUE)
par(mfrow = c(3, 3), mar = c(1.5, 1.1, 2, 1.1),
oma = c(1, 1.5, 0, 0.5))
min.resp <- %s
max.resp <- %s
resp.range <- max.resp - min.resp
max.y <- max.resp + 0.02 * resp.range
''' % (graphfile.name, self.individual_dict[1].axes_ranges[2],
self.individual_dict[1].axes_ranges[3]))
# isn't there a better way to handle min and max resp?
elif filetype_for_r == 'svg':
nrows = ceiling(len(self.individual_dict) / 3)
svg_height = str(nrows * (7/3))
r('''svg(file = '%s', height = %s)
par(mfrow = c(%s, 3), mar = c(1.5, 1.1, 2, 1.1),
oma = c(1, 1.5, 0, 0.5))
min.resp <- %s
max.resp <- %s
resp.range <- max.resp - min.resp
max.y <- max.resp + 0.02 * resp.range
''' % (graphfile.name, svg_height, nrows,
self.individual_dict[2].axes_ranges[2],
self.individual_dict[1].axes_ranges[3]))
elif filetype_for_r == 'eps':
nrows = ceiling(len(self.individual_dict) / 3)
eps_height = str(nrows * (7/3))
r('''setEPS()
postscript(file = '%s', height = %s, width = 7,
paper = 'special')
par(mfrow = c(%s, 3), mar = c(1.5, 1.1, 2, 1.1),
oma = c(1, 1.5, 0, 0.5))
min.resp <- %s
max.resp <- %s
resp.range <- max.resp - min.resp
max.y <- max.resp + 0.02 * resp.range
''' % (graphfile.name, eps_height, nrows,
self.individual_dict[2].axes_ranges[2],
self.individual_dict[1].axes_ranges[3]))
for i in self.individual_dict:
tempind = self.individual_dict[i]
self.draw_one_graph_in_r(self.individual_dict[i])
r('dev.off()')
graphfile.close()
self.root.config(cursor='')
def draw_one_graph_in_r(self, individual):
individual.update()
isSubmerged = individual.tolerance_height > individual.peak_resp
if self.tol_mode.get() == 'broad':
current_tolerance_points = (
individual.broad_tolerance_points.r_repr())
elif self.tol_mode.get() == 'strict':
current_tolerance_points = (
individual.strict_tolerance_points.r_repr())
r('''individual_data <- %s
peak_bundle <- list(peak.response = %s,
peak.preference = %s,
predicting.stimuli = data.frame(stim=%s),
predicted.response = %s,
predicted.se = %s)
tolerance_bundle <- list(tolerance.height = %s,
cross.points = %s,
submerged = %s)
ghost_bundle <- list()
is.flat <- CheckForFlat(individual_data, 2)
#is.flat <- CheckForFlat(#s, 2)
#if (sd(#s) == 0) {flat <- TRUE}
''' % (individual.r_data_frame.r_repr(),
individual.peak_resp,
individual.peak_pref,
individual.spline_x.r_repr(),
individual.spline_y.r_repr(),
individual.se.r_repr(),
individual.tolerance_height,
current_tolerance_points,
#individual.tolerance_points.r_repr(),
str(isSubmerged).upper(),
#individual.data_y.r_repr()
))
if self.view_names.get() == 1:
name = individual.name
else:
name = ''
r("""plot(NA, NA, main = "%s", xlab = "", ylab = "",
ylim = c(%s, %s), xlim = c(%s, %s), type = "l")
"""
% (name,
individual.axes_ranges[2], individual.axes_ranges[3],
individual.axes_ranges[0], individual.axes_ranges[1]))
groupcheck = 'FALSE'
if individual.type == 'group':
groupcheck = 'TRUE'
r('''GraphSpline(individual_data, peak_bundle, tolerance_bundle,
'%s', 2, %s,
%s, %s, TRUE, '%s', max.y,
FALSE, ghost_bundle, is.flat, %s,
2, forgui = TRUE, group = %s, graph.se = %s,
graph.spline = %s)
''' % (name, self.view_pts.get(),
self.view_pandtol.get(), self.view_pandtol.get(),
self.tol_mode.get(), individual.smoothing_value.get(),
groupcheck, self.view_se.get(), self.view_spline.get()))
def output_summaries(self, event=None):
'''Output a csv file with all of the spline measures listed in the
Summary box (peak preference, peak height, tolerance, etc.) for all
individuals in the dataset.
'''
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
if platform == 'win32':
ext = ''
else:
ext = '.csv'
summfile = filedialog.asksaveasfile(mode='w',
initialfile='spline_summaries.csv',
defaultextension=ext,
filetypes=[('all files', '.*'),
('csv files', '.csv')],
parent=self.root,
title='Save spline summaries...')
if summfile is not None:
r('''output <- data.frame(name = rep(NA, %s),
peak_pref=NA, peak_height=NA, tolerance=NA,
strength=NA,
#HD_strength=NA, HI_strength=NA,
responsiveness=NA, smoothing=NA)
''' % len(self.individual_dict))
for i in self.individual_dict:
tempind = self.individual_dict[i]
tempind.update()
if self.tol_mode.get() == 'broad':
temp_tolerance = self.individual_dict[i].broad_tolerance
elif self.tol_mode.get() == 'strict':
temp_tolerance = self.individual_dict[i].strict_tolerance
if self.strength_mode.get() == 'Height-Dependent':
temp_strength = tempind.hd_strength
elif self.strength_mode.get() == 'Height-Independent':
temp_strength = tempind.hi_strength
r('''output[%s, 1] <- '%s'
output[%s, 2:7] <- c(%s, %s, %s, %s, %s, %s)
''' % (i, tempind.name, i, tempind.peak_pref,
tempind.peak_resp,
temp_tolerance, temp_strength,
tempind.responsiveness, tempind.smoothing_value.get()))
r("write.csv(output, '%s', row.names = FALSE)" % summfile.name)
summfile.close()
self.root.config(cursor='')
def output_points(self, event=None):
'''Output a csv file of points that make up the splines in every graph.
The continuous curves of the splines are broken into 200 equally spaced
points. These points can then be used to plot splines in other
programs. x- and y-values are output for each individual, and if the
Standard Error setting is toggled on in the View settings, then
standard error points of the spline are output as well.
'''
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
if platform == 'win32':
ext = ''
else:
ext = '.csv'
pointfile = filedialog.asksaveasfile(mode='w',
initialfile='spline_points.csv',
defaultextension=ext,
filetypes=[('all files', '.*'),
('csv files', '.csv')],
parent=self.root,
title='Select a file...')
if pointfile is not None:
r("output <- data.frame(x = rep(NA, 201))")
for i in self.individual_dict:
tempind = self.individual_dict[i]
tempind.update()
r('''output$%s_stimulus <- %s
output$%s_response <- %s''' % (tempind.name,
tempind.spline_x.r_repr(),
tempind.name,
tempind.spline_y.r_repr()))
if self.view_se.get() == 1:
r('output$%s_se <- %s'
% (tempind.name, tempind.se.r_repr()))
r('output <- output[2:ncol(output)]')
r('write.csv(output, "%s", row.names = FALSE)' % pointfile.name)
pointfile.close()
self.root.config(cursor='')
def output_tol(self, event=None):
'''Tolerance is the width of the spline at a certain height. In the
graphs, it is represented by a horizontal blue line. Tolerance points
are the start and stop points of those blue lines. This function
outputs a csv file of these tolerance points for each individual.
Like in the output_points function, this is useful for plotting splines
in another program.
'''
try:
self.root.config(cursor='wait')
except:
self.root.config(cursor='watch')
if platform == 'win32':
ext = ''
else:
ext = '.csv'
pointfile = filedialog.asksaveasfile(
mode='w', initialfile='tolerance_points.csv', defaultextension=ext,
filetypes=[('all files', '.*'), ('csv files', '.csv')],
parent=self.root, title='Select a file...')
if pointfile is not None:
output_tol_table = ''
for i in range(1, len(self.individual_dict) + 1):
individual_name = self.individual_dict[i].name
if self.tol_mode.get() == 'broad':
individual_tol_pts = (
self.individual_dict[i].broad_tolerance_points)
elif self.tol_mode.get() == 'strict':
individual_tol_pts = (
self.individual_dict[i].strict_tolerance_points)
tol_pts_str = ''
for i in individual_tol_pts:
tol_pts_str = tol_pts_str + str(i) + ', '
tol_pts_str = tol_pts_str[: -2]
output_row = individual_name + ', ' + tol_pts_str
if i != (len(self.individual_dict) + 1):
output_row += '\n'
output_tol_table += output_row
pointfile.write(output_tol_table)
pointfile.close()
self.root.config(cursor='')
def quit(self, event=None):
self.root.quit()
def create_about_window(self, event=None):
self.about_window = AboutWindow(self, self.about_font1)
if __name__ == '__main__':
main_app = MainApp()
main_app.root.mainloop()
|
Joccalor/PFunc
|
PFunc.py
|
Python
|
gpl-3.0
| 130,625 | 0.000322 |
import random
from abc import ABC, abstractmethod
import logging
import numpy
import rlr
from typing import List
from typing_extensions import Protocol
import dedupe.sampling as sampling
import dedupe.core as core
import dedupe.training as training
import dedupe.datamodel as datamodel
from dedupe._typing import TrainingExample
logger = logging.getLogger(__name__)
class ActiveLearner(ABC):
@abstractmethod
def transform(self) -> None:
pass
@abstractmethod
def pop(self) -> TrainingExample:
pass
@abstractmethod
def mark(self) -> None:
pass
@abstractmethod
def __len__(self) -> int:
pass
class HasDataModel(Protocol):
data_model: datamodel.DataModel
class DedupeSampler(object):
def _sample(self: HasDataModel, data, blocked_proportion, sample_size) -> List[TrainingExample]:
blocked_sample_size = int(blocked_proportion * sample_size)
predicates = list(self.data_model.predicates(index_predicates=False))
data = sampling.randomDeque(data)
blocked_sample_keys = sampling.dedupeBlockedSample(blocked_sample_size,
predicates,
data)
random_sample_size = sample_size - len(blocked_sample_keys)
random_sample_keys = set(core.randomPairs(len(data),
random_sample_size))
data = dict(data)
return [(data[k1], data[k2])
for k1, k2
in blocked_sample_keys | random_sample_keys]
class RecordLinkSampler(object):
def _sample(self: HasDataModel, data_1, data_2, blocked_proportion, sample_size) -> List[TrainingExample]:
offset = len(data_1)
blocked_sample_size = int(blocked_proportion * sample_size)
predicates = list(self.data_model.predicates(index_predicates=False))
deque_1 = sampling.randomDeque(data_1)
deque_2 = sampling.randomDeque(data_2)
blocked_sample_keys = sampling.linkBlockedSample(blocked_sample_size,
predicates,
deque_1,
deque_2)
random_sample_size = sample_size - len(blocked_sample_keys)
random_sample_keys = core.randomPairsMatch(len(deque_1),
len(deque_2),
random_sample_size)
unique_random_sample_keys = {(a, b + offset)
for a, b in random_sample_keys}
return [(data_1[k1], data_2[k2])
for k1, k2
in blocked_sample_keys | unique_random_sample_keys]
class RLRLearner(ActiveLearner, rlr.RegularizedLogisticRegression):
def __init__(self, data_model):
super().__init__(alpha=1)
self.data_model = data_model
self._candidates: List[TrainingExample]
@property
def candidates(self) -> List[TrainingExample]:
return self._candidates
@candidates.setter
def candidates(self, new_candidates):
self._candidates = new_candidates
self.distances = self.transform(self._candidates)
random_pair = random.choice(self._candidates)
exact_match = (random_pair[0], random_pair[0])
self.fit_transform([exact_match, random_pair],
[1, 0])
def transform(self, pairs):
return self.data_model.distances(pairs)
def fit(self, X, y):
self.y = numpy.array(y)
self.X = X
super().fit(self.X, self.y, cv=False)
def fit_transform(self, pairs, y):
self.fit(self.transform(pairs), y)
def pop(self) -> TrainingExample:
if not len(self.candidates):
raise IndexError("No more unlabeled examples to label")
target_uncertainty = self._bias()
probabilities = self.candidate_scores()
distance_to_target = numpy.abs(target_uncertainty - probabilities)
uncertain_index = distance_to_target.argmin()
self.distances = numpy.delete(self.distances, uncertain_index, axis=0)
uncertain_pair = self.candidates.pop(uncertain_index)
return uncertain_pair
def _remove(self, index):
self.distances = numpy.delete(self.distances, index, axis=0)
def mark(self, pairs, y):
self.y = numpy.concatenate([self.y, y])
self.X = numpy.vstack([self.X, self.transform(pairs)])
self.fit(self.X, self.y)
def _bias(self):
positive = numpy.sum(self.y == 1)
n_examples = len(self.y)
bias = 1 - (positive / n_examples if positive else 0)
# When we have just a few examples we are okay with getting
# examples where the model strongly believes the example is
# going to be positive or negative. As we get more examples,
# prefer to ask for labels of examples the model is more
# uncertain of.
uncertainty_weight = min(positive, n_examples - positive)
bias_weight = 10
weighted_bias = 0.5 * uncertainty_weight + bias * bias_weight
weighted_bias /= uncertainty_weight + bias_weight
return weighted_bias
def candidate_scores(self):
return self.predict_proba(self.distances)
def __len__(self):
return len(self.candidates)
class DedupeRLRLearner(DedupeSampler, RLRLearner):
def __init__(self, data_model, data, blocked_proportion, sample_size):
super().__init__(data_model)
self.candidates = self._sample(data, blocked_proportion, sample_size)
class RecordLinkRLRLearner(RecordLinkSampler, RLRLearner):
def __init__(self, data_model, data_1, data_2, blocked_proportion, sample_size):
super.__init__(data_model)
self.candidates = self._sample(data_1, data_2, blocked_proportion, sample_size)
class BlockLearner(object):
def __init__(self, data_model, candidates, *args):
self.data_model = data_model
self.candidates = candidates
self.current_predicates = ()
self._cached_labels = None
self._old_dupes = []
self.block_learner: training.BlockLearner
def fit_transform(self, pairs, y):
dupes = [pair for label, pair in zip(y, pairs) if label]
new_dupes = [pair for pair in dupes if pair not in self._old_dupes]
new_uncovered = (not all(self.predict(new_dupes)))
if new_uncovered:
self.current_predicates = self.block_learner.learn(dupes,
recall=1.0)
self._cached_labels = None
self._old_dupes = dupes
def candidate_scores(self):
if self._cached_labels is None:
labels = self.predict(self.candidates)
self._cached_labels = numpy.array(labels).reshape(-1, 1)
return self._cached_labels
def predict(self, candidates):
labels = []
for record_1, record_2 in candidates:
for predicate in self.current_predicates:
keys = predicate(record_2, target=True)
if keys:
if set(predicate(record_1)) & set(keys):
labels.append(1)
break
else:
labels.append(0)
return labels
def _remove(self, index):
if self._cached_labels is not None:
self._cached_labels = numpy.delete(self._cached_labels,
index,
axis=0)
class DedupeBlockLearner(BlockLearner):
def __init__(self, data_model,
candidates,
data,
index_include):
super().__init__(data_model, candidates)
index_data = Sample(data, 50000)
sampled_records = Sample(index_data, 5000)
preds = self.data_model.predicates()
self.block_learner = training.DedupeBlockLearner(preds,
sampled_records,
index_data)
examples_to_index = candidates.copy()
if index_include:
examples_to_index += index_include
self._index_predicates(examples_to_index)
def _index_predicates(self, candidates):
blocker = self.block_learner.blocker
records = core.unique((record for pair in candidates for record in pair))
for field in blocker.index_fields:
unique_fields = {record[field] for record in records}
blocker.index(unique_fields, field)
for pred in blocker.index_predicates:
pred.freeze(records)
class RecordLinkBlockLearner(BlockLearner):
def __init__(self,
data_model,
candidates,
data_1,
data_2,
index_include):
super().__init__(data_model, candidates)
sampled_records_1 = Sample(data_1, 600)
index_data = Sample(data_2, 50000)
sampled_records_2 = Sample(index_data, 600)
preds = self.data_model.predicates(canopies=False)
self.block_learner = training.RecordLinkBlockLearner(preds,
sampled_records_1,
sampled_records_2,
index_data)
examples_to_index = candidates.copy()
if index_include:
examples_to_index += index_include
self._index_predicates(examples_to_index)
def _index_predicates(self, candidates):
blocker = self.block_learner.blocker
A, B = zip(*candidates)
A = core.unique(A)
B = core.unique(B)
for field in blocker.index_fields:
unique_fields = {record[field] for record in B}
blocker.index(unique_fields, field)
for pred in blocker.index_predicates:
pred.freeze(A, B)
class DisagreementLearner(ActiveLearner):
classifier: RLRLearner
blocker: BlockLearner
candidates: List[TrainingExample]
def _common_init(self):
self.learners = (self.classifier, self.blocker)
self.y = numpy.array([])
self.pairs = []
def pop(self) -> TrainingExample:
if not len(self.candidates):
raise IndexError("No more unlabeled examples to label")
probs_l = []
for learner in self.learners:
probabilities = learner.candidate_scores()
probs_l.append(probabilities)
probs = numpy.concatenate(probs_l, axis=1)
# where do the classifers disagree?
disagreement = numpy.std(probs > 0.5, axis=1).astype(bool)
if disagreement.any():
conflicts = disagreement.nonzero()[0]
target = numpy.random.uniform(size=1)
uncertain_index = conflicts[numpy.argmax(probs[conflicts][:, 0] - target)]
else:
uncertain_index = numpy.std(probs, axis=1).argmax()
logger.debug("Classifier: %.2f, Covered: %s",
probs[uncertain_index][0],
bool(probs[uncertain_index][1]))
uncertain_pair = self.candidates.pop(uncertain_index)
for learner in self.learners:
learner._remove(uncertain_index)
return uncertain_pair
def mark(self, pairs, y):
self.y = numpy.concatenate([self.y, y])
self.pairs.extend(pairs)
for learner in self.learners:
learner.fit_transform(self.pairs, self.y)
def __len__(self):
return len(self.candidates)
def transform(self):
pass
def learn_predicates(self, recall, index_predicates):
dupes = [pair for label, pair in zip(self.y, self.pairs) if label]
if not index_predicates:
old_preds = self.blocker.block_learner.blocker.predicates.copy()
no_index_predicates = [pred for pred in old_preds
if not hasattr(pred, 'index')]
self.blocker.block_learner.blocker.predicates = no_index_predicates
learned_preds = self.blocker.block_learner.learn(dupes,
recall=recall,
candidate_types='random forest')
self.blocker.block_learner.blocker.predicates = old_preds
else:
learned_preds = self.blocker.block_learner.learn(dupes,
recall=recall,
candidate_types='random forest')
return learned_preds
class DedupeDisagreementLearner(DedupeSampler, DisagreementLearner):
def __init__(self,
data_model,
data,
blocked_proportion,
sample_size,
index_include):
self.data_model = data_model
data = core.index(data)
self.candidates = self._sample(data, blocked_proportion, sample_size)
random_pair = random.choice(self.candidates)
exact_match = (random_pair[0], random_pair[0])
index_include = index_include.copy()
index_include.append(exact_match)
self.blocker = DedupeBlockLearner(data_model,
self.candidates,
data,
index_include)
self.classifier = RLRLearner(self.data_model)
self.classifier.candidates = self.candidates
self._common_init()
self.mark([exact_match] * 4 + [random_pair],
[1] * 4 + [0])
class RecordLinkDisagreementLearner(RecordLinkSampler, DisagreementLearner):
def __init__(self,
data_model,
data_1,
data_2,
blocked_proportion,
sample_size,
index_include):
self.data_model = data_model
data_1 = core.index(data_1)
offset = len(data_1)
data_2 = core.index(data_2, offset)
self.candidates = self._sample(data_1,
data_2,
blocked_proportion,
sample_size)
random_pair = random.choice(self.candidates)
exact_match = (random_pair[0], random_pair[0])
index_include = index_include.copy()
index_include.append(exact_match)
self.blocker = RecordLinkBlockLearner(data_model,
self.candidates,
data_1,
data_2,
index_include)
self.classifier = RLRLearner(self.data_model)
self.classifier.candidates = self.candidates
self._common_init()
self.mark([exact_match] * 4 + [random_pair],
[1] * 4 + [0])
class Sample(dict):
def __init__(self, d, sample_size):
if len(d) <= sample_size:
super().__init__(d)
else:
sample = random.sample(d.keys(), sample_size)
super().__init__({k: d[k] for k in sample})
|
datamade/dedupe
|
dedupe/labeler.py
|
Python
|
mit
| 15,590 | 0.000513 |
#!/usr/bin/env python2
from gimpfu import *
import time
import re
def preview (image, delay, loops, force_delay, ignore_hidden, restore_hide):
if not image:
raise "No image given."
layers = image.layers
nlayers = len (layers)
visible = []
length = []
i = 0
while i < nlayers:
visible += [pdb.gimp_item_get_visible (layers [i])]
if visible [i]:
pdb.gimp_item_set_visible (layers [i], False)
name = pdb.gimp_item_get_name (layers [i])
l = None
if not force_delay:
l = re.search ("\([0-9]+ms\)", name)
if l:
l = tuple (map (sum, zip (l.span (), tuple ([+1, -3]))))
l = name [slice (*l)]
if not l:
l = delay
length += [float (l) / 1000.0]
i += 1
j = 0
while j < loops:
while i > 0:
i -= 1
if (not ignore_hidden) or visible [i]:
pdb.gimp_item_set_visible (layers [i], True)
pdb.gimp_displays_flush ()
time.sleep (length [i])
j += 1
# unhides everything for optimized
if j < loops:
while i < nlayers:
if (not ignore_hidden) or visible [i]:
pdb.gimp_item_set_visible (layers [i], False)
i += 1
else:
i = nlayers
i = nlayers
if restore_hide:
while i > 0:
i -= 1
if visible [i]:
pdb.gimp_item_set_visible (layers [i], True)
register(
"preview",
"preview",
"Preview the animation of a gif",
"Roger Bongers",
"Roger Bongers",
"2016",
"Preview...",
"*",
[
(PF_IMAGE, "image", "The image to modify", None),
(PF_INT32, "delay", "The default length in ms of each frame", 100),
(PF_INT32, "loops", "The number of times to loop the animation", 1),
(PF_BOOL, "force-delay", "Force the default length on every frame", 0),
(PF_BOOL, "ignore-hidden", "Ignore currently hidden items", 0),
(PF_BOOL, "restore-hide", "Restore the hidden status after preview", 0),
],
[],
preview,
menu = "<Image>/Filters/Animation")
main()
|
rbong/gimptools
|
preview.py
|
Python
|
gpl-2.0
| 2,246 | 0.01959 |
# coding: utf-8
"""
Provides functions for finding and testing for locally `(k, l)`-connected
graphs.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nDan Schult (dschult@colgate.edu)"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
_all__ = ['kl_connected_subgraph', 'is_kl_connected']
import copy
import networkx as nx
def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False):
"""Returns the maximum locally `(k, l)`-connected subgraph of ``G``.
A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the
graph there are at least `l` edge-disjoint paths of length at most `k`
joining `u` to `v`.
Parameters
----------
G : NetworkX graph
The graph in which to find a maximum locally `(k, l)`-connected
subgraph.
k : integer
The maximum length of paths to consider. A higher number means a looser
connectivity requirement.
l : integer
The number of edge-disjoint paths. A higher number means a stricter
connectivity requirement.
low_memory : bool
If this is ``True``, this function uses an algorithm that uses slightly
more time but less memory.
same_as_graph : bool
If this is ``True`` then return a tuple of the form ``(H, is_same)``,
where ``H`` is the maximum locally `(k, l)`-connected subgraph and
``is_same`` is a Boolean representing whether ``G`` is locally `(k,
l)`-connected (and hence, whether ``H`` is simply a copy of the input
graph ``G``).
Returns
-------
NetworkX graph or two-tuple
If ``same_as_graph`` is ``True``, then this function returns a
two-tuple as described above. Otherwise, it returns only the maximum
locally `(k, l)`-connected subgraph.
See also
--------
is_kl_connected
References
----------
.. [1]: Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid
Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg,
2004. 89--104.
"""
H=copy.deepcopy(G) # subgraph we construct by removing from G
graphOK=True
deleted_some=True # hack to start off the while loop
while deleted_some:
deleted_some=False
for edge in H.edges():
(u,v)=edge
### Get copy of graph needed for this search
if low_memory:
verts=set([u,v])
for i in range(k):
[verts.update(G.neighbors(w)) for w in verts.copy()]
G2=G.subgraph(list(verts))
else:
G2=copy.deepcopy(G)
###
path=[u,v]
cnt=0
accept=0
while path:
cnt += 1 # Found a path
if cnt>=l:
accept=1
break
# record edges along this graph
prev=u
for w in path:
if prev!=w:
G2.remove_edge(prev,w)
prev=w
# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
try:
path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
except nx.NetworkXNoPath:
path = False
# No Other Paths
if accept==0:
H.remove_edge(u,v)
deleted_some=True
if graphOK: graphOK=False
# We looked through all edges and removed none of them.
# So, H is the maximal (k,l)-connected subgraph of G
if same_as_graph:
return (H,graphOK)
return H
def is_kl_connected(G, k, l, low_memory=False):
"""Returns ``True`` if and only if ``G`` is locally `(k, l)`-connected.
A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the
graph there are at least `l` edge-disjoint paths of length at most `k`
joining `u` to `v`.
Parameters
----------
G : NetworkX graph
The graph to test for local `(k, l)`-connectedness.
k : integer
The maximum length of paths to consider. A higher number means a looser
connectivity requirement.
l : integer
The number of edge-disjoint paths. A higher number means a stricter
connectivity requirement.
low_memory : bool
If this is ``True``, this function uses an algorithm that uses slightly
more time but less memory.
Returns
-------
bool
Whether the graph is locally `(k, l)`-connected subgraph.
See also
--------
kl_connected_subgraph
References
----------
.. [1]: Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid
Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg,
2004. 89--104.
"""
graphOK=True
for edge in G.edges():
(u,v)=edge
### Get copy of graph needed for this search
if low_memory:
verts=set([u,v])
for i in range(k):
[verts.update(G.neighbors(w)) for w in verts.copy()]
G2=G.subgraph(verts)
else:
G2=copy.deepcopy(G)
###
path=[u,v]
cnt=0
accept=0
while path:
cnt += 1 # Found a path
if cnt>=l:
accept=1
break
# record edges along this graph
prev=u
for w in path:
if w!=prev:
G2.remove_edge(prev,w)
prev=w
# path=shortest_path(G2,u,v,k) # ??? should "Cutoff" be k+1?
try:
path=nx.shortest_path(G2,u,v) # ??? should "Cutoff" be k+1?
except nx.NetworkXNoPath:
path = False
# No Other Paths
if accept==0:
graphOK=False
break
# return status
return graphOK
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/algorithms/hybrid.py
|
Python
|
bsd-2-clause
| 6,084 | 0.010355 |
__author__ = 'Ostico <ostico@gmail.com>'
import sys
import os
import unittest
from pyorient.exceptions import *
from pyorient import OrientSocket
from pyorient.messages.database import *
from pyorient.messages.commands import *
from pyorient.messages.cluster import *
from pyorient.messages.records import *
from pyorient.messages.connection import *
from pyorient.constants import DB_TYPE_DOCUMENT, QUERY_SYNC, \
STORAGE_TYPE_PLOCAL, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY
os.environ['DEBUG'] = "0"
os.environ['DEBUG_VERBOSE'] = "0"
if os.path.realpath( '../' ) not in sys.path:
sys.path.insert( 0, os.path.realpath( '../' ) )
if os.path.realpath( '.' ) not in sys.path:
sys.path.insert( 0, os.path.realpath( '.' ) )
class RawMessages_2_TestCase(unittest.TestCase):
""" Command Test Case """
def test_record_object(self):
x = OrientRecord()
assert x._rid is None
assert x._version is None
assert x._class is None
def test_record_load(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
def _test_callback(record):
assert record is not []
assert record._rid is not None # assert no exception
req_msg = RecordLoadMessage( connection )
res = req_msg.prepare( [ "#11:0", "*:2", _test_callback ] ) \
.send().fetch_response()
assert res._rid == "#11:0"
assert res._class == 'followed_by'
assert res._in != 0
assert res._out != 0
def test_record_count_with_no_opened_db(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
conn_msg = ConnectMessage( connection )
session_id = conn_msg.prepare( ("root", "root") )\
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
try:
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert False # we expect an exception because we need a db opened
except PyOrientDatabaseException:
assert True
def test_record_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
session_id = connection.session_id
assert session_id != -1
count_msg = DbCountRecordsMessage( connection )
res = count_msg.prepare().send().fetch_response()
assert res is not 0
assert res > 0
def test_record_create_update(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
assert session_id == connection.session_id
assert session_id != -1
# ##################
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_GRAPH, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_GRAPH, "")
).send().fetch_response()
assert len(cluster_info) != 0
try:
create_class = CommandMessage(connection)
cluster = create_class.prepare((QUERY_CMD, "create class my_class "
"extends V"))\
.send().fetch_response()[0]
except PyOrientCommandException:
# class my_class already exists
pass
# classes are not allowed in record create/update/load
rec = { '@my_class': { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' } }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( cluster, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
rec = { '@my_class': { 'alloggio': 'albergo', 'lavoro': 'ufficio', 'vacanza': 'montagna' } }
update_success = ( RecordUpdateMessage(connection) )\
.prepare( ( cluster, rec_position._rid, rec ) )\
.send().fetch_response()
assert update_success[0] != 0
if connection.protocol <= 21:
return unittest.skip("Protocol {!r} does not works well".format(
connection.protocol )) # skip test
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + rec_position._rid ] )\
.send().fetch_response()
# res = [ ( RecordLoadMessage(connection) ).prepare(
# [ rec_position._rid ]
# ).send().fetch_response() ]
print("%r" % res[0]._rid)
print("%r" % res[0]._class)
print("%r" % res[0]._version)
print("%r" % res[0].alloggio)
print("%r" % res[0].lavoro)
print("%r" % res[0].vacanza)
assert res[0]._rid == '#11:0'
# assert res[0]._class == 'my_class'
assert res[0]._version >= 0
assert res[0].alloggio == 'albergo'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'montagna'
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_record_delete(self):
connection = OrientSocket( "localhost", 2424 )
conn_msg = ConnectMessage( connection )
assert connection.protocol != -1
session_id = conn_msg.prepare( ("root", "root") ) \
.send().fetch_response()
print("Sid: %s" % session_id)
assert session_id == connection.session_id
assert session_id != -1
db_name = "my_little_test"
msg = DbExistsMessage( connection )
exists = msg.prepare( [db_name] ).send().fetch_response()
print("Before %r" % exists)
try:
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
assert True
except PyOrientCommandException as e:
print(str(e))
finally:
( DbCreateMessage( connection ) ).prepare(
(db_name, DB_TYPE_DOCUMENT, STORAGE_TYPE_MEMORY)
).send().fetch_response()
msg = DbOpenMessage( connection )
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
assert len(cluster_info) != 0
rec = { 'alloggio': 'casa', 'lavoro': 'ufficio', 'vacanza': 'mare' }
rec_position = ( RecordCreateMessage(connection) )\
.prepare( ( 1, rec ) )\
.send().fetch_response()
print("New Rec Position: %s" % rec_position._rid)
assert rec_position._rid is not None
######################## Check Success
res = ( CommandMessage( connection ) )\
.prepare( [ QUERY_SYNC, "select from " + str(rec_position._rid) ] )\
.send().fetch_response()
import re
assert re.match( '#1:[0-9]', res[0]._rid )
assert res[0]._class is None
assert res[0]._version >= 0
assert res[0].alloggio == 'casa'
assert res[0].lavoro == 'ufficio'
assert res[0].vacanza == 'mare'
######################## Delete Rid
del_msg = (RecordDeleteMessage(connection))
deletion = del_msg.prepare( ( 1, rec_position._rid ) )\
.send().fetch_response()
assert deletion is True
# now try a failure in deletion for wrong rid
del_msg = (RecordDeleteMessage(connection))
deletion = del_msg.prepare( ( 1, 11111 ) )\
.send().fetch_response()
assert deletion is False
sid = ( ConnectMessage( connection ) ).prepare( ("root", "root") ) \
.send().fetch_response()
# at the end drop the test database
( DbDropMessage( connection ) ).prepare([db_name]) \
.send().fetch_response()
def test_data_cluster_count(self):
connection = OrientSocket( "localhost", 2424 )
assert connection.session_id == -1
# ##################
msg = DbOpenMessage( connection )
db_name = "GratefulDeadConcerts"
cluster_info = msg.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
print(cluster_info)
assert len(cluster_info) != 0
assert connection.session_id != -1
count_msg = DataClusterCountMessage( connection )
res1 = count_msg.set_count_tombstones(1)\
.prepare( [ (0,1,2,3,4,5) ] ).send().fetch_response()
assert res1 is not 0
assert res1 > 0
count_msg = DataClusterCountMessage( connection )
res2 = count_msg.set_count_tombstones(1)\
.prepare( [ (0,1,2,3,4,5), 1 ] ).send().fetch_response()
assert res2 is not 0
assert res2 > 0
count_msg = DataClusterCountMessage( connection )
res3 = count_msg.set_count_tombstones(1).set_cluster_ids( (0,1,2,3,4,5) )\
.prepare().send().fetch_response()
assert res3 is not 0
assert res3 > 0
assert res1 == res2
assert res3 == res2
assert res3 == res1
def test_query_async(self):
connection = OrientSocket( 'localhost', 2424 )
open_msg = DbOpenMessage(connection)
open_msg.set_db_name('GratefulDeadConcerts')\
.set_user('admin').set_pass('admin').prepare()\
.send().fetch_response()
def _test_callback(record):
assert record is not []
assert record._rid is not None # assert no exception
try_select_async = CommandMessage(connection)
try_select_async.set_command_type(QUERY_ASYNC)\
.set_query("select from followed_by")\
.set_limit(50)\
.set_fetch_plan("*:0")\
.set_callback( _test_callback )\
.prepare()\
response = try_select_async.send().fetch_response()
assert response is None
def test_wrong_data_range(self):
connection = OrientSocket( 'localhost', 2424 )
db_name = "GratefulDeadConcerts"
db = DbOpenMessage(connection)
cluster_info = db.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
datarange = DataClusterDataRangeMessage(connection)
try:
value = datarange.prepare(32767).send().fetch_response()
except PyOrientCommandException as e:
print(repr(str(e)))
assert "IndexOutOfBoundsException" in str(e)
def test_data_range(self):
connection = OrientSocket( 'localhost', 2424 )
db_name = "GratefulDeadConcerts"
db = DbOpenMessage(connection)
_, clusters, _ = db.prepare(
(db_name, "admin", "admin", DB_TYPE_DOCUMENT, "")
).send().fetch_response()
clusters.sort(key=lambda cluster: cluster.id)
for cluster in clusters:
# os.environ['DEBUG'] = '0' # silence debug
datarange = DataClusterDataRangeMessage(connection)
value = datarange.prepare(cluster.id).send().fetch_response()
print("Cluster Name: %s, ID: %u: %s "\
% (cluster.name, cluster.id, value))
assert value is not []
assert value is not None
# x = RawMessages_2_TestCase('test_wrong_data_range').run()
|
mogui/pyorient
|
tests/test_raw_messages_2.py
|
Python
|
apache-2.0
| 12,897 | 0.016283 |
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
class InvalidFormula(Exception):
pass
class InvalidFormulaComponent(InvalidFormula):
pass
|
clarkperkins/stackdio
|
stackdio/api/formulas/exceptions.py
|
Python
|
apache-2.0
| 753 | 0 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
from parlai.core.build_data import DownloadableFile
import parlai.core.build_data as build_data
import os
RESOURCES = [
DownloadableFile(
'http://parl.ai/downloads/dialog_babi/dialog_babi.tar.gz',
'dialog_babi.tar.gz',
'bb36155ccd41eac91f806446c5728ee90374e5596156a9f7c1b86f8342cfc383',
)
]
def build(opt):
dpath = os.path.join(opt['datapath'], 'dialog-bAbI')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
|
facebookresearch/ParlAI
|
parlai/tasks/dialog_babi/build.py
|
Python
|
mit
| 1,182 | 0 |
# -*- coding: UTF-8 -*-
from django.conf import settings as dsettings
from django.contrib.auth import models as authModels
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import HttpResponse, Http404
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import slugify
from microblog import models, settings
from taggit.models import Tag, TaggedItem
from decorator import decorator
try:
import json
except ImportError:
import simplejson as json
def render_json(f):
"""
decoratore da applicare ad una vista per serializzare in json il risultato.
"""
if dsettings.DEBUG:
ct = 'text/plain'
j = lambda d: json.dumps(d, indent=2)
else:
ct = 'application/json'
j = json.dumps
def wrapper(func, *args, **kw):
try:
result = func(*args, **kw)
except Exception, e:
result = j(str(e))
status = 500
else:
if isinstance(result, HttpResponse):
return result
else:
result = j(result)
status = 200
return HttpResponse(content=result, content_type=ct, status=status)
return decorator(wrapper, f)
def post_list(request):
return render(request, 'microblog/post_list.html', {})
def category(request, category):
category = get_object_or_404(models.Category, name=category)
return render_to_response(
'microblog/category.html',
{
'category': category,
},
context_instance=RequestContext(request)
)
def post_list_by_year(request, year, month=None):
return render_to_response(
'microblog/list_by_year.html',
{
'year': year,
'month': month,
},
context_instance=RequestContext(request)
)
def tag(request, tag):
tag = get_object_or_404(Tag, name=tag)
return render_to_response(
'microblog/tag.html',
{
'tag': tag,
},
context_instance=RequestContext(request)
)
def author(request, author):
user = [
u for u in authModels.User.objects.all()
if slugify('%s-%s' % (u.first_name, u.last_name)) == author
]
if not user:
raise Http404()
else:
user = user[0]
return render_to_response(
'microblog/author.html',
{
'author': user,
},
context_instance=RequestContext(request)
)
def _paginate_posts(post_list, request):
if settings.MICROBLOG_POST_LIST_PAGINATION:
paginator = Paginator(post_list, settings.MICROBLOG_POST_PER_PAGE)
try:
page = int(request.GET.get("page", "1"))
except ValueError:
page = 1
try:
posts = paginator.page(page)
except (EmptyPage, InvalidPage):
posts = paginator.page(1)
else:
paginator = Paginator(post_list, len(post_list) or 1)
posts = paginator.page(1)
return posts
def _posts_list(request, featured=False):
if settings.MICROBLOG_LANGUAGE_FALLBACK_ON_POST_LIST:
lang = None
else:
lang = request.LANGUAGE_CODE
return models.Post.objects\
.byLanguage(lang)\
.byFeatured(featured)\
.published()
def _post_detail(request, content):
if not settings.MICROBLOG_POST_FILTER([content.post], request.user):
raise Http404()
return render_to_response(
'microblog/post_detail.html',
{
'post': content.post,
'content': content
},
context_instance=RequestContext(request)
)
def _trackback_ping(request, content):
def success():
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>0</error></response>')
return HttpResponse(content=x, content_type='text/xml')
def failure(message=''):
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>1</error><message>%s</message></response>') % message
return HttpResponse(content=x, content_type='text/xml', status=400)
if request.method != 'POST':
return failure('only POST method is supported')
if not request.POST.get('url'):
return failure('url argument is mandatory')
t = {
'url': request.POST['url'],
'blog_name': request.POST.get('blog_name', ''),
'title': request.POST.get('title', ''),
'excerpt': request.POST.get('excerpt', ''),
}
from microblog.moderation import moderate
if not moderate(request, 'trackback', t['title'], url=t['url']):
return failure('moderated')
content.new_trackback(**t)
return success()
@render_json
def _comment_count(request, content):
post = content.post
if settings.MICROBLOG_COMMENT == 'comment':
import django_comments as comments
from django.contrib.contenttypes.models import ContentType
model = comments.get_model()
q = model.objects.filter(
content_type=ContentType.objects.get_for_model(post),
object_pk=post.id,
is_public=True
)
return q.count()
else:
import httplib2
from urllib import quote
h = httplib2.Http()
params = {
'forum_api_key': settings.MICROBLOG_COMMENT_DISQUS_FORUM_KEY,
'url': content.get_url(),
}
args = '&'.join('%s=%s' % (k, quote(v)) for k, v in params.items())
url = settings.MICROBLOG_COMMENT_DISQUS_API_URL + 'get_thread_by_url?%s' % args
resp, page = h.request(url)
if resp.status != 200:
return -1
page = json.loads(page)
if not page['succeeded']:
return -1
elif page['message'] is None:
return 0
else:
return page['message']['num_comments']
def _post404(f):
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except models.PostContent.DoesNotExist:
raise Http404()
return wrapper
if settings.MICROBLOG_URL_STYLE == 'date':
def _get(slug, year, month, day):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndDate(slug, year, month, day)
@_post404
def post_detail(request, year, month, day, slug):
return _post_detail(
request,
content=_get(slug, year, month, day)
)
@_post404
def trackback_ping(request, year, month, day, slug):
return _trackback_ping(
request,
content=_get(slug, year, month, day)
)
@_post404
def comment_count(request, year, month, day, slug):
return _comment_count(
request,
content = _get(slug, year, month, day)
)
elif settings.MICROBLOG_URL_STYLE == 'category':
def _get(slug, category):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndCategory(slug, category)
@_post404
def post_detail(request, category, slug):
return _post_detail(
request,
content=_get(slug, category),
)
@_post404
def trackback_ping(request, category, slug):
return _trackback_ping(
request,
content=_get(slug, category),
)
@_post404
def comment_count(request, category, slug):
return _comment_count(
request,
content=_get(slug, category),
)
|
barrachri/epcon
|
microblog/views.py
|
Python
|
bsd-2-clause
| 7,631 | 0.003014 |
#!/usr/bin/env python
import os
import numpy as np
import math
import fnmatch
from my_spectrogram import my_specgram
from collections import OrderedDict
from scipy.io import wavfile
import matplotlib.pylab as plt
from pylab import rcParams
from sklearn.model_selection import train_test_split
rcParams['figure.figsize'] = 6, 3
SCRIPT_DIR = os.getcwd()
INPUT_FOLDER = 'Input_audio_wav_16k/'
OUTPUT_FOLDER = 'Input_spectrogram_16k/'
languages = os.listdir(INPUT_FOLDER)
languages.sort()
audio_dict = OrderedDict()
for l in languages:
audio_dict[l] = sorted(os.listdir(INPUT_FOLDER + l))
def plot_spectrogram(audiopath, plotpath=None, NFFT_window=0.025,
noverlap_window=0.023, freq_min=None, freq_max=None,
axis='off'):
fs, data = wavfile.read(audiopath)
data = data / data.max()
center = data.mean() * 0.2
data = data + np.random.normal(center, abs(center * 0.5), len(data))
NFFT = pow(2, int(math.log(int(fs*NFFT_window), 2) + 0.5)) # 25ms window, nearest power of 2
noverlap = int(fs*noverlap_window)
fc = int(np.sqrt(freq_min*freq_max))
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
Pxx, freqs, bins, im = my_specgram(data, NFFT=NFFT, Fs=fs,
Fc=fc, detrend=None,
window=np.hanning(NFFT),
noverlap=noverlap, cmap='Greys',
xextent=None,
pad_to=None, sides='default',
scale_by_freq=None,
minfreq=freq_min, maxfreq=freq_max)
plt.axis(axis)
im.axes.axis('tight')
im.axes.get_xaxis().set_visible(False)
im.axes.get_yaxis().set_visible(False)
if plotpath:
plt.savefig(plotpath, bbox_inches='tight',
transparent=False, pad_inches=0, dpi=96)
else:
plt.show()
plt.clf()
# same as training but no added noise
def plot_spectrogram_val(audiopath, plotpath=None, NFFT_window=0.025,
noverlap_window=0.023, freq_min=None, freq_max=None,
axis='off'):
fs, data = wavfile.read(audiopath)
data = data / data.max()
NFFT = pow(2, int(math.log(int(fs*NFFT_window), 2) + 0.5)) # 25ms window, nearest power of 2
noverlap = int(fs*noverlap_window)
fc = int(np.sqrt(freq_min*freq_max))
# Pxx is the segments x freqs array of instantaneous power, freqs is
# the frequency vector, bins are the centers of the time bins in which
# the power is computed, and im is the matplotlib.image.AxesImage
# instance
Pxx, freqs, bins, im = my_specgram(data, NFFT=NFFT, Fs=fs,
Fc=fc, detrend=None,
window=np.hanning(NFFT),
noverlap=noverlap, cmap='Greys',
xextent=None,
pad_to=None, sides='default',
scale_by_freq=None,
minfreq=freq_min, maxfreq=freq_max)
plt.axis(axis)
im.axes.axis('tight')
im.axes.get_xaxis().set_visible(False)
im.axes.get_yaxis().set_visible(False)
if plotpath:
plt.savefig(plotpath, bbox_inches='tight',
transparent=False, pad_inches=0, dpi=96)
else:
plt.show()
plt.clf()
# create spectrograms of randomly drawn samples from each language
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result[0]
random_wav = []
for key in audio_dict:
random_wav.append(sorted(np.random.choice(audio_dict[key], 500, replace=False)))
training_list = []
validation_list = []
for i in range(0, len(random_wav)):
x_train, x_val = train_test_split(random_wav[i],
test_size=0.4,
random_state=42)
training_list.append(x_train)
validation_list.append(x_val)
if not os.path.exists(OUTPUT_FOLDER + 'Training'):
os.makedirs(OUTPUT_FOLDER + 'Training')
print('Successfully created a training folder!')
print('Populating training folder with spectrograms...')
for i in range(0, len(training_list)):
if not os.path.exists(OUTPUT_FOLDER + 'Training/' + str(languages[i])):
os.makedirs(OUTPUT_FOLDER + 'Training/' + str(languages[i]))
print('Successfully created a {} training folder!'.format(languages[i]))
print('Populating {} training folder with spectrograms...'.format(languages[i]))
for j in range(0, len(training_list[i])):
for k in range(0, 3):
plot_spectrogram(find(training_list[i][j], INPUT_FOLDER),
plotpath=OUTPUT_FOLDER + 'Training/' +
str(languages[i]) + '/' +
str(training_list[i][j][:-4]) + '_' +
str(k) + '.jpeg',
NFFT_window=0.025, noverlap_window=0.023,
freq_min=0, freq_max=5500)
print('Done with {}.'.format(training_list[i][j][:-4]))
if not os.path.exists(OUTPUT_FOLDER + 'Validation'):
os.makedirs(OUTPUT_FOLDER + 'Validation')
print('Successfully created a validation folder!')
print('Populating validation folder with spectrograms...')
for i in range(0, len(validation_list)):
if not os.path.exists(OUTPUT_FOLDER + 'Validation/' + str(languages[i])):
os.makedirs(OUTPUT_FOLDER + 'Validation/' + str(languages[i]))
print('Successfully created a {} validation folder!'.format(languages[i]))
print('Populating {} validation folder with spectrograms...'.format(languages[i]))
for j in range(0, len(validation_list[i])):
for k in range(0, 1):
plot_spectrogram_val(find(validation_list[i][j], INPUT_FOLDER),
plotpath=OUTPUT_FOLDER + 'Validation/' +
str(languages[i]) + '/' +
str(validation_list[i][j][:-4]) + '_' +
str(k) + '.jpeg',
NFFT_window=0.025, noverlap_window=0.023,
freq_min=0, freq_max=5500)
print('Done with {}.'.format(validation_list[i][j][:-4]))
|
nick-monto/SpeechRecog_CNN
|
create_spectrograms_16k.py
|
Python
|
mit
| 6,802 | 0.002205 |
#!/usr/bin/env python
import argparse
import bz2
import gzip
import os.path
import sys
from csvkit import CSVKitReader
from csvkit.exceptions import ColumnIdentifierError, RequiredHeaderError
def lazy_opener(fn):
def wrapped(self, *args, **kwargs):
self._lazy_open()
fn(*args, **kwargs)
return wrapped
class LazyFile(object):
"""
A proxy for a File object that delays opening it until
a read method is called.
Currently this implements only the minimum methods to be useful,
but it could easily be expanded.
"""
def __init__(self, init, *args, **kwargs):
self.init = init
self.f = None
self._is_lazy_opened = False
self._lazy_args = args
self._lazy_kwargs = kwargs
def __getattr__(self, name):
if not self._is_lazy_opened:
self.f = self.init(*self._lazy_args, **self._lazy_kwargs)
self._is_lazy_opened = True
return getattr(self.f, name)
def __iter__(self):
return self
def close(self):
self.f.close()
self.f = None
self._is_lazy_opened = False
def next(self):
if not self._is_lazy_opened:
self.f = self.init(*self._lazy_args, **self._lazy_kwargs)
self._is_lazy_opened = True
return self.f.next()
class CSVFileType(object):
"""
An argument factory like argparse.FileType with compression support.
"""
def __init__(self, mode='rb'):
"""
Initialize the factory.
"""
self._mode = mode
def __call__(self, path):
"""
Build a file-like object from the specified path.
"""
if path == '-':
if 'r' in self._mode:
return sys.stdin
elif 'w' in self._mode:
return sys.stdout
else:
raise ValueError('Invalid path "-" with mode {0}'.format(self._mode))
else:
(_, extension) = os.path.splitext(path)
if extension == '.gz':
return LazyFile(gzip.open, path, self._mode)
if extension == '.bz2':
return LazyFile(bz2.BZ2File, path, self._mode)
else:
return LazyFile(open, path, self._mode)
class CSVKitUtility(object):
description = ''
epilog = ''
override_flags = ''
def __init__(self, args=None, output_file=None):
"""
Perform argument processing and other setup for a CSVKitUtility.
"""
self._init_common_parser()
self.add_arguments()
self.args = self.argparser.parse_args(args)
self.reader_kwargs = self._extract_csv_reader_kwargs()
self.writer_kwargs = self._extract_csv_writer_kwargs()
self._install_exception_handler()
if output_file is None:
self.output_file = sys.stdout
else:
self.output_file = output_file
# Ensure SIGPIPE doesn't throw an exception
# Prevents [Errno 32] Broken pipe errors, e.g. when piping to 'head'
# To test from the shell:
# python -c "for i in range(5000): print 'a,b,c'" | csvlook | head
# Without this fix you will see at the end:
# [Errno 32] Broken pipe
# With this fix, there should be no error
# For details on Python and SIGPIPE, see http://bugs.python.org/issue1652
try:
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except (ImportError, AttributeError):
#Do nothing on platforms that don't have signals or don't have SIGPIPE
pass
def add_arguments(self):
"""
Called upon initialization once the parser for common arguments has been constructed.
Should be overriden by individual utilities.
"""
raise NotImplementedError('add_arguments must be provided by each subclass of CSVKitUtility.')
def main(self):
"""
Main loop of the utility.
Should be overriden by individual utilities and explicitly called by the executing script.
"""
raise NotImplementedError(' must be provided by each subclass of CSVKitUtility.')
def _init_common_parser(self):
"""
Prepare a base argparse argument parser so that flags are consistent across different shell command tools.
If you want to constrain which common args are present, you can pass a string for 'omitflags'. Any argument
whose single-letter form is contained in 'omitflags' will be left out of the configured parser. Use 'f' for
file.
"""
self.argparser = argparse.ArgumentParser(description=self.description, epilog=self.epilog)
# Input
if 'f' not in self.override_flags:
self.argparser.add_argument('file', metavar="FILE", nargs='?', type=CSVFileType(), default=sys.stdin,
help='The CSV file to operate on. If omitted, will accept input on STDIN.')
if 'd' not in self.override_flags:
self.argparser.add_argument('-d', '--delimiter', dest='delimiter',
help='Delimiting character of the input CSV file.')
if 't' not in self.override_flags:
self.argparser.add_argument('-t', '--tabs', dest='tabs', action='store_true',
help='Specifies that the input CSV file is delimited with tabs. Overrides "-d".')
if 'q' not in self.override_flags:
self.argparser.add_argument('-q', '--quotechar', dest='quotechar',
help='Character used to quote strings in the input CSV file.')
if 'u' not in self.override_flags:
self.argparser.add_argument('-u', '--quoting', dest='quoting', type=int, choices=[0,1,2,3],
help='Quoting style used in the input CSV file. 0 = Quote Minimal, 1 = Quote All, 2 = Quote Non-numeric, 3 = Quote None.')
if 'b' not in self.override_flags:
self.argparser.add_argument('-b', '--doublequote', dest='doublequote', action='store_true',
help='Whether or not double quotes are doubled in the input CSV file.')
if 'p' not in self.override_flags:
self.argparser.add_argument('-p', '--escapechar', dest='escapechar',
help='Character used to escape the delimiter if --quoting 3 ("Quote None") is specified and to escape the QUOTECHAR if --doublequote is not specified.')
if 'z' not in self.override_flags:
self.argparser.add_argument('-z', '--maxfieldsize', dest='maxfieldsize', type=int,
help='Maximum length of a single field in the input CSV file.')
if 'e' not in self.override_flags:
self.argparser.add_argument('-e', '--encoding', dest='encoding', default='utf-8',
help='Specify the encoding the input CSV file.')
if 'S' not in self.override_flags:
self.argparser.add_argument('-S', '--skipinitialspace', dest='skipinitialspace', default=False, action='store_true',
help='Ignore whitespace immediately following the delimiter.')
if 'H' not in self.override_flags:
self.argparser.add_argument('-H', '--no-header-row', dest='no_header_row', action='store_true',
help='Specifies that the input CSV file has no header row. Will create default headers.')
if 'v' not in self.override_flags:
self.argparser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Print detailed tracebacks when errors occur.')
# Output
if 'l' not in self.override_flags:
self.argparser.add_argument('-l', '--linenumbers', dest='line_numbers', action='store_true',
help='Insert a column of line numbers at the front of the output. Useful when piping to grep or as a simple primary key.')
# Input/Output
if 'zero' not in self.override_flags:
self.argparser.add_argument('--zero', dest='zero_based', action='store_true',
help='When interpreting or displaying column numbers, use zero-based numbering instead of the default 1-based numbering.')
def _extract_csv_reader_kwargs(self):
"""
Extracts those from the command-line arguments those would should be passed through to the input CSV reader(s).
"""
kwargs = {}
if self.args.encoding:
kwargs['encoding'] = self.args.encoding
if self.args.tabs:
kwargs['delimiter'] = '\t'
elif self.args.delimiter:
kwargs['delimiter'] = self.args.delimiter
if self.args.quotechar:
kwargs['quotechar'] = self.args.quotechar
if self.args.quoting:
kwargs['quoting'] = self.args.quoting
if self.args.doublequote:
kwargs['doublequote'] = self.args.doublequote
if self.args.escapechar:
kwargs['escapechar'] = self.args.escapechar
if self.args.maxfieldsize:
kwargs['maxfieldsize'] = self.args.maxfieldsize
if self.args.skipinitialspace:
kwargs['skipinitialspace'] = self.args.skipinitialspace
return kwargs
def _extract_csv_writer_kwargs(self):
"""
Extracts those from the command-line arguments those would should be passed through to the output CSV writer.
"""
kwargs = {}
if 'l' not in self.override_flags and self.args.line_numbers:
kwargs['line_numbers'] = True
return kwargs
def _install_exception_handler(self):
"""
Installs a replacement for sys.excepthook, which handles pretty-printing uncaught exceptions.
"""
def handler(t, value, traceback):
if self.args.verbose:
sys.__excepthook__(t, value, traceback)
else:
# Special case handling for Unicode errors, which behave very strangely
# when cast with unicode()
if t == UnicodeDecodeError:
sys.stderr.write('Your file is not "%s" encoded. Please specify the correct encoding with the -e flag. Use the -v flag to see the complete error.\n' % self.args.encoding)
else:
sys.stderr.write('%s\n' % unicode(value).encode('utf-8'))
sys.excepthook = handler
def print_column_names(self):
"""
Pretty-prints the names and indices of all columns to a file-like object (usually sys.stdout).
"""
if self.args.no_header_row:
raise RequiredHeaderError, 'You cannot use --no-header-row with the -n or --names options.'
f = self.args.file
output = self.output_file
try:
zero_based=self.args.zero_based
except:
zero_based=False
rows = CSVKitReader(f, **self.reader_kwargs)
column_names = rows.next()
for i, c in enumerate(column_names):
if not zero_based:
i += 1
output.write('%3i: %s\n' % (i, c))
def match_column_identifier(column_names, c, zero_based=False):
"""
Determine what column a single column id (name or index) matches in a series of column names.
Note that integer values are *always* treated as positional identifiers. If you happen to have
column names which are also integers, you must specify them using a positional index.
"""
if isinstance(c, basestring) and not c.isdigit() and c in column_names:
return column_names.index(c)
else:
try:
c = int(c)
if not zero_based:
c -= 1
# Fail out if neither a column name nor an integer
except:
raise ColumnIdentifierError('Column identifier "%s" is neither an integer, nor a existing column\'s name.' % c)
# Fail out if index is 0-based
if c < 0:
raise ColumnIdentifierError('Column 0 is not valid; columns are 1-based.')
# Fail out if index is out of range
if c >= len(column_names):
raise ColumnIdentifierError('Index %i is beyond the last named column, "%s" at index %i.' % (c, column_names[-1], len(column_names) - 1))
return c
def parse_column_identifiers(ids, column_names, zero_based=False, excluded_columns=None):
"""
Parse a comma-separated list of column indices AND/OR names into a list of integer indices.
Ranges of integers can be specified with two integers separated by a '-' or ':' character. Ranges of
non-integers (e.g. column names) are not supported.
Note: Column indices are 1-based.
"""
columns = []
# If not specified, start with all columns
if not ids:
columns = range(len(column_names))
if columns and not excluded_columns:
return columns
if not columns:
for c in ids.split(','):
c = c.strip()
try:
columns.append(match_column_identifier(column_names, c, zero_based))
except ColumnIdentifierError:
if ':' in c:
a,b = c.split(':',1)
elif '-' in c:
a,b = c.split('-',1)
else:
raise
try:
if a:
a = int(a)
else:
a = 1
if b:
b = int(b) + 1
else:
b = len(column_names)
except ValueError:
raise ColumnIdentifierError("Invalid range %s. Ranges must be two integers separated by a - or : character.")
for x in range(a,b):
columns.append(match_column_identifier(column_names, x, zero_based))
excludes = []
if excluded_columns:
for c in excluded_columns.split(','):
c = c.strip()
try:
excludes.append(match_column_identifier(column_names, c, zero_based))
except ColumnIdentifierError:
if ':' in c:
a,b = c.split(':',1)
elif '-' in c:
a,b = c.split('-',1)
else:
raise
try:
if a:
a = int(a)
else:
a = 1
if b:
b = int(b) + 1
else:
b = len(column_names)
except ValueError:
raise ColumnIdentifierError("Invalid range %s. Ranges must be two integers separated by a - or : character.")
for x in range(a,b):
excludes.append(match_column_identifier(column_names, x, zero_based))
return [c for c in columns if c not in excludes]
|
cypreess/csvkit
|
csvkit/cli.py
|
Python
|
mit
| 15,243 | 0.007479 |
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sun OS specific tests. These are implicitly run by test_psutil.py."""
import psutil
from test_psutil import *
class SunOSSpecificTestCase(unittest.TestCase):
def test_swap_memory(self):
out = sh('swap -l -k')
lines = out.strip().split('\n')[1:]
if not lines:
raise ValueError('no swap device(s) configured')
total = free = 0
for line in lines:
line = line.split()
t, f = line[-2:]
t = t.replace('K', '')
f = f.replace('K', '')
total += int(int(t) * 1024)
free += int(int(f) * 1024)
used = total - free
psutil_swap = psutil.swap_memory()
self.assertEqual(psutil_swap.total, total)
self.assertEqual(psutil_swap.used, used)
self.assertEqual(psutil_swap.free, free)
def test_main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(SunOSSpecificTestCase))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not test_main():
sys.exit(1)
|
szaydel/psutil
|
test/_sunos.py
|
Python
|
bsd-3-clause
| 1,322 | 0.000756 |
#!/usr/bin/env python
# Blink an LED using the RPi.GPIO library.
import RPi.GPIO as GPIO
from time import sleep
# Use GPIO numbering:
GPIO.setmode(GPIO.BCM)
# Set pin GPIO 14 to be output:
GPIO.setup(14, GPIO.OUT)
try:
while True:
GPIO.output(14, GPIO.HIGH)
sleep(.5)
GPIO.output(14, GPIO.LOW)
sleep(.5)
# If we get a Ctrl-C, clean up so we don't get warnings from other programs:
except KeyboardInterrupt:
GPIO.cleanup()
|
akkana/pi-zero-w-book
|
ch2/blink-rpigpio.py
|
Python
|
gpl-2.0
| 468 | 0 |
#!/usr/bin/python3
import os
import sys
import subprocess
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lutris.util.wineregistry import WineRegistry
PREFIXES_PATH = os.path.expanduser("~/Games/wine/prefixes")
def get_registries():
registries = []
directories = os.listdir(PREFIXES_PATH)
directories.append(os.path.expanduser("~/.wine"))
for prefix in directories:
for path in os.listdir(os.path.join(PREFIXES_PATH, prefix)):
if path.endswith(".reg"):
registries.append(os.path.join(PREFIXES_PATH, prefix, path))
return registries
def check_registry(registry_path):
with open(registry_path, 'r') as registry_file:
original_content = registry_file.read()
try:
registry = WineRegistry(registry_path)
except:
sys.stderr.write("Error parsing {}\n".format(registry_path))
raise
content = registry.render()
if content != original_content:
wrong_path = os.path.join(os.path.dirname(__file__), 'error.reg')
with open(wrong_path, 'w') as wrong_reg:
wrong_reg.write(content)
print("Content of parsed registry doesn't match: {}".format(registry_path))
subprocess.call(["meld", registry_path, wrong_path])
sys.exit(2)
registries = get_registries()
for registry in registries:
check_registry(registry)
print("All {} registry files validated!".format(len(registries)))
|
RobLoach/lutris
|
tests/check_prefixes.py
|
Python
|
gpl-3.0
| 1,465 | 0.002048 |
from __future__ import print_function
from numpy import pi, arange, sin
import numpy as np
import time
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle
from bokeh.models import (
Plot, DataRange1d, DatetimeAxis,
ColumnDataSource, PanTool, WheelZoomTool
)
from bokeh.resources import INLINE
x = arange(-2 * pi, 2 * pi, 0.1)
y = sin(x)
# Create an array of times, starting at the current time, and extending
# for len(x) number of hours.
times = np.arange(len(x)) * 3600000 + time.time()
source = ColumnDataSource(
data=dict(x=x, y=y, times=times)
)
xdr = DataRange1d(sources=[source.columns("times")])
ydr = DataRange1d(sources=[source.columns("y")])
plot = Plot(x_range=xdr, y_range=ydr, min_border=80)
circle = Circle(x="times", y="y", fill_color="red", size=5, line_color="black")
plot.add_glyph(source, circle)
plot.add_layout(DatetimeAxis(), 'below')
plot.add_layout(DatetimeAxis(), 'left')
plot.add_tools(PanTool(), WheelZoomTool())
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "dateaxis.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Date Axis Example"))
print("Wrote %s" % filename)
view(filename)
|
zrhans/python
|
exemplos/Examples.lnk/bokeh/glyphs/dateaxis.py
|
Python
|
gpl-2.0
| 1,293 | 0 |
import numpy as np
from square import Square
from constants import SQUARE_SIZE, BOARD_SIZE
class ChessboardFrame():
def __init__(self, img):
self.img = img
def square_at(self, i):
y = BOARD_SIZE - ((i / 8) % 8) * SQUARE_SIZE - SQUARE_SIZE
x = (i % 8) * SQUARE_SIZE
return Square(i, self.img[y:y+SQUARE_SIZE, x:x+SQUARE_SIZE, :])
|
joeymeyer/raspberryturk
|
raspberryturk/core/vision/chessboard_frame.py
|
Python
|
mit
| 371 | 0.002695 |
class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
stack = []
length = len(num) - k
for c in num:
while k and stack and stack[-1] > c:
stack.pop()
k -= 1
stack.append(c)
return ''.join(stack[:length]).lstrip('0') or '0'
|
Mlieou/leetcode_python
|
leetcode/python/ex_402.py
|
Python
|
mit
| 405 | 0.002469 |
""" Manage the TVTK scenes. """
# Enthought library imports.
from tvtk.pyface.tvtk_scene import TVTKScene
from pyface.workbench.api import WorkbenchWindow
from traits.api import HasTraits, List, Instance, Property
from traits.api import implements, on_trait_change
from tvtk.plugins.scene.scene_editor import SceneEditor
# Local imports.
from i_scene_manager import ISceneManager
class SceneManager(HasTraits):
""" Manage the TVTK scenes. """
implements(ISceneManager)
#### 'SceneManager' interface #############################################
# The currently active scene (None, if no scene is active).
current_scene = Property(Instance(TVTKScene))
# A list of all open scenes.
scenes = List(TVTKScene)
# The workbench window that the manager is in (there is one scene manager
# per workbench window).
window = Instance(WorkbenchWindow)
#### Private interface ####################################################
# Shadow trait for the 'current_scene' property.
_current_scene = Instance(TVTKScene)
###########################################################################
# 'SceneManager' interface.
###########################################################################
#### Trait properties #####################################################
def _get_current_scene(self):
""" Property getter. """
scene_count = len(self.scenes)
if scene_count == 0:
scene = None
elif scene_count == 1:
scene = self.scenes[0]
else:
scene = self._current_scene
return scene
def _set_current_scene(self, scene):
""" Property setter. """
self._current_scene = scene
return
#### Trait change handlers ################################################
@on_trait_change('window:editor_opened')
def _on_editor_opened(self, obj, trait_name, old, new):
""" Dynamic trait change handler. """
if isinstance(new, SceneEditor):
self.scenes.append(new.scene)
return
@on_trait_change('window:editor_closing')
def _on_editor_closed(self, obj, trait_name, old, new):
""" Dynamic trait change handler. """
if isinstance(new, SceneEditor):
self.scenes.remove(new.scene)
return
@on_trait_change('window:active_editor')
def _on_active_editor_changed(self, obj, trait_name, old, new):
""" Dynamic trait change handler. """
if isinstance(new, SceneEditor):
self.current_scene = new.scene
else:
self.current_scene = None
return
#### EOF ######################################################################
|
liulion/mayavi
|
tvtk/plugins/scene/scene_manager.py
|
Python
|
bsd-3-clause
| 2,756 | 0.002177 |
#! /usr/bin/env python
import sys
g = {}
n = {}
for line in sys.stdin:
(n1, n2, p, q, t, tg, x) = line.strip().split(' ')
t = int(t)
x = float(x)
key = ' '.join((n1,n2,p,q))
if not key in n:
n[key] = 0
g[key] = 0
n[key] += t
g[key] += x*t
for key in n:
print key, n[key], g[key]/n[key]
|
vbeffara/Simulations
|
tools/massage-box.py
|
Python
|
gpl-3.0
| 341 | 0.01173 |
try:
# Python 3
import tkinter as tk
import tkinter.messagebox as tkm
import tkinter.simpledialog as tkd
except ImportError:
# Python 2
import Tkinter as tk
import tkMessageBox as tkm
import tkSimpleDialog as tkd
import networkx as nx
from networkx_viewer.graph_canvas import GraphCanvas
from networkx_viewer.tokens import TkPassthroughEdgeToken, TkPassthroughNodeToken
from networkx_viewer.autocomplete_entry import AutocompleteEntry
class ViewerApp(tk.Tk):
"""Example simple GUI to plot a NetworkX Graph"""
def __init__(self, graph, **kwargs):
"""Additional keyword arguments beyond graph are passed down to the
GraphCanvas. See it's docs for details"""
tk.Tk.__init__(self)
self.geometry('1000x600')
self.title('NetworkX Viewer')
bottom_row = 10
self.columnconfigure(0, weight=1)
self.rowconfigure(bottom_row, weight=1)
self.canvas = GraphCanvas(graph, width=400, height=400, **kwargs)
self.canvas.grid(row=0, column=0, rowspan=bottom_row+2, sticky='NESW')
self.canvas.onNodeSelected = self.onNodeSelected
self.canvas.onEdgeSelected = self.onEdgeSelected
r = 0 # Current row
tk.Label(self, text='Nodes:').grid(row=r, column=1, sticky='W')
self.node_entry = AutocompleteEntry(self.canvas.dataG.nodes)
self.node_entry.bind('<Return>',self.add_node, add='+')
self.node_entry.bind('<Control-Return>', self.buildNewShortcut, add='+')
self.node_entry.grid(row=r, column=2, columnspan=2, sticky='NESW', pady=2)
tk.Button(self, text='+', command=self.add_node, width=2).grid(
row=r, column=4,sticky=tk.NW,padx=2,pady=2)
r += 1
nlsb = tk.Scrollbar(self, orient=tk.VERTICAL)
self.node_list = tk.Listbox(self, yscrollcommand=nlsb.set, height=5)
self.node_list.grid(row=r, column=1, columnspan=3, sticky='NESW')
self.node_list.bind('<Delete>',lambda e: self.node_list.delete(tk.ANCHOR))
nlsb.config(command=self.node_list.yview)
nlsb.grid(row=r, column=4, sticky='NWS')
r += 1
tk.Label(self, text='Neighbors Levels:').grid(row=r, column=1,
columnspan=2, sticky=tk.NW)
self.level_entry = tk.Entry(self, width=4)
self.level_entry.insert(0,'1')
self.level_entry.grid(row=r, column=3, sticky=tk.NW, padx=5)
r += 1
tk.Button(self, text='Build New', command=self.onBuildNew).grid(
row=r, column=1)
tk.Button(self, text='Add to Existing', command=self.onAddToExisting
).grid(row=r, column=2, columnspan=2)
r += 1
line = tk.Canvas(self, height=15, width=200)
line.create_line(0,13,250,13)
line.create_line(0,15,250,15)
line.grid(row=r, column=1, columnspan=4, sticky='NESW')
r += 1
tk.Label(self, text='Filters:').grid(row=r, column=1, sticky=tk.W)
self.filter_entry = tk.Entry(self)
self.filter_entry.bind('<Return>',self.add_filter, add='+')
self.filter_entry.grid(row=r, column=2, columnspan=2, sticky='NESW', pady=2)
tk.Button(self, text='+', command=self.add_filter, width=2).grid(
row=r, column=4,sticky=tk.NW,padx=2,pady=2)
r += 1
flsb = tk.Scrollbar(self, orient=tk.VERTICAL)
self.filter_list = tk.Listbox(self, yscrollcommand=flsb.set, height=5)
self.filter_list.grid(row=r, column=1, columnspan=3, sticky='NESW')
self.filter_list.bind('<Delete>',self.remove_filter)
flsb.config(command=self.node_list.yview)
flsb.grid(row=r, column=4, sticky='NWS')
r += 1
tk.Button(self, text='Clear',command=self.remove_filter).grid(
row=r, column=1, sticky='W')
tk.Button(self, text='?', command=self.filter_help
).grid(row=r, column=4, stick='NESW', padx=2)
r += 1
line2 = tk.Canvas(self, height=15, width=200)
line2.create_line(0,13,250,13)
line2.create_line(0,15,250,15)
line2.grid(row=r, column=1, columnspan=4, sticky='NESW')
r += 1
self.lbl_attr = tk.Label(self, text='Attributes',
wraplength=200, anchor=tk.SW, justify=tk.LEFT)
self.lbl_attr.grid(row=r, column=1, columnspan=4, sticky='NW')
r += 1
self.tbl_attr = PropertyTable(self, {})
self.tbl_attr.grid(row=r, column=1, columnspan=4, sticky='NESW')
assert r == bottom_row, "Set bottom_row to %d" % r
self._build_menu()
def _build_menu(self):
self.menubar = tk.Menu(self)
self.config(menu=self.menubar)
view = tk.Menu(self.menubar, tearoff=0)
view.add_command(label='Undo', command=self.canvas.undo, accelerator="Ctrl+Z")
self.bind_all("<Control-z>", lambda e: self.canvas.undo()) # Implement accelerator
view.add_command(label='Redo', command=self.canvas.redo)
view.add_separator()
view.add_command(label='Center on node...', command=self.center_on_node)
view.add_separator()
view.add_command(label='Reset Node Marks', command=self.reset_node_markings)
view.add_command(label='Reset Edge Marks', command=self.reset_edge_markings)
view.add_command(label='Redraw Plot', command=self.canvas.replot)
view.add_separator()
view.add_command(label='Grow display one level...', command=self.grow_all)
self.menubar.add_cascade(label='View', menu=view)
def center_on_node(self):
node = NodeDialog(self, "Name of node to center on:").result
if node is None: return
self.canvas.center_on_node(node)
def reset_edge_markings(self):
for u,v,k,d in self.canvas.dispG.edges(data=True, keys=True):
token = d['token']
if token.is_marked:
self.canvas.mark_edge(u,v,k)
def reset_node_markings(self):
for u,d in self.canvas.dispG.nodes(data=True):
token = d['token']
if token.is_marked:
self.canvas.mark_node(u)
def add_node(self, event=None):
node = self.node_entry.get()
if node.isdigit() and self.canvas.dataG.has_node(int(node)):
node = int(node)
if self.canvas.dataG.has_node(node):
self.node_list.insert(tk.END, node)
self.node_entry.delete(0, tk.END)
else:
tkm.showerror("Node not found", "Node '%s' not in graph."%node)
def add_filter(self, event=None, filter_lambda=None):
if filter_lambda is None:
filter_lambda = self.filter_entry.get()
if self.canvas.add_filter(filter_lambda):
# We successfully added the filter; add to list and clear entry
self.filter_list.insert(tk.END, filter_lambda)
self.filter_entry.delete(0, tk.END)
def filter_help(self, event=None):
msg = ("Enter a lambda function which returns True if you wish\n"
"to show nodes with ONLY a given property.\n"
"Parameters are:\n"
" - u, the node's name, and \n"
" - d, the data dictionary.\n\n"
"Example: \n"
" d.get('color',None)=='red'\n"
"would show only red nodes.\n"
"Example 2:\n"
" str(u).is_digit()\n"
"would show only nodes which have a numerical name.\n\n"
"Multiple filters are ANDed together.")
tkm.showinfo("Filter Condition", msg)
def remove_filter(self, event=None):
all_items = self.filter_list.get(0, tk.END)
if event is None:
# When no event passed, this function was called via the "clear"
# button.
items = all_items
else:
# Remove currently selected item
items = (self.filter_list.get(tk.ANCHOR),)
for item in items:
self.canvas.remove_filter(item)
idx = all_items.index(item)
self.filter_list.delete(idx)
all_items = self.filter_list.get(0, tk.END)
def grow_all(self):
"""Grow all visible nodes one level"""
for u, d in self.canvas.dispG.copy().nodes.items():
if not d['token'].is_complete:
self.canvas.grow_node(u)
def get_node_list(self):
"""Get nodes in the node list and clear"""
# See if we forgot to hit the plus sign
if len(self.node_entry.get()) != 0:
self.add_node()
nodes = self.node_list.get(0, tk.END)
self.node_list.delete(0, tk.END)
return nodes
def onBuildNew(self):
nodes = self.get_node_list()
if len(nodes) == 2:
self.canvas.plot_path(nodes[0], nodes[1], levels=self.level)
else:
self.canvas.plot(nodes, levels=self.level)
def onAddToExisting(self):
"""Add nodes to existing plot. Prompt to include link to existing
if possible"""
home_nodes = set(self.get_node_list())
self.canvas.plot_additional(home_nodes, levels=self.level)
def buildNewShortcut(self, event=None):
# Add node intelligently then doe a build new
self.node_entry.event_generate('<Return>') # Resolve current
self.onBuildNew()
def goto_path(self, event):
frm = self.node_entry.get()
to = self.node_entry2.get()
self.node_entry.delete(0, tk.END)
self.node_entry2.delete(0, tk.END)
if frm == '':
tkm.showerror("No From Node", "Please enter a node in both "
"boxes to plot a path. Enter a node in only the first box "
"to bring up nodes immediately adjacent.")
return
if frm.isdigit() and int(frm) in self.canvas.dataG.nodes():
frm = int(frm)
if to.isdigit() and int(to) in self.canvas.dataG.nodes():
to = int(to)
self.canvas.plot_path(frm, to, levels=self.level)
def onNodeSelected(self, node_name, node_dict):
self.tbl_attr.build(node_dict)
self.lbl_attr.config(text="Attributes of node '%s'"%node_name)
def onEdgeSelected(self, edge_name, edge_dict):
self.tbl_attr.build(edge_dict)
self.lbl_attr.config(text="Attributes of edge between '%s' and '%s'"%
edge_name[:2])
@property
def level(self):
try:
l = int(self.level_entry.get())
except ValueError:
tkm.showerror("Invalid Level", "Please specify a level between "
"greater than or equal to 0")
raise
return l
class TkPassthroughViewerApp(ViewerApp):
def __init__(self, graph, **kwargs):
ViewerApp.__init__(self, graph,
NodeTokenClass=TkPassthroughNodeToken,
EdgeTokenClass=TkPassthroughEdgeToken, **kwargs)
class PropertyTable(tk.Frame):
"""A pure Tkinter scrollable frame that actually works!
* Use the 'interior' attribute to place widgets inside the scrollable frame
* Construct and pack/place/grid normally
* This frame only allows vertical scrolling
"""
def __init__(self, parent, property_dict, *args, **kw):
tk.Frame.__init__(self, parent, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
self.vscrollbar = vscrollbar = tk.Scrollbar(self, orient=tk.VERTICAL)
vscrollbar.pack(fill=tk.Y, side=tk.RIGHT, expand=tk.FALSE)
self.canvas = canvas = tk.Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=vscrollbar.set)
canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.TRUE)
vscrollbar.config(command=canvas.yview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = tk.Frame(canvas)
self.interior_id = canvas.create_window(0, 0, window=interior,
anchor='nw')
self.interior.bind('<Configure>', self._configure_interior)
self.canvas.bind('<Configure>', self._configure_canvas)
self.build(property_dict)
def build(self, property_dict):
for c in self.interior.winfo_children():
c.destroy()
# Filter property dict
property_dict = {k: v for k, v in property_dict.items()
if self._key_filter_function(k)}
# Prettify key/value pairs for display
property_dict = {self._make_key_pretty(k): self._make_value_pretty(v)
for k, v in property_dict.items()}
# Sort by key and filter
dict_values = sorted(property_dict.items(), key=lambda x: x[0])
for n,(k,v) in enumerate(dict_values):
tk.Label(self.interior, text=k, borderwidth=1, relief=tk.SOLID,
wraplength=75, anchor=tk.E, justify=tk.RIGHT).grid(
row=n, column=0, sticky='nesw', padx=1, pady=1, ipadx=1)
tk.Label(self.interior, text=v, borderwidth=1,
wraplength=125, anchor=tk.W, justify=tk.LEFT).grid(
row=n, column=1, sticky='nesw', padx=1, pady=1, ipadx=1)
def _make_key_pretty(self, key):
"""Make key of property dictionary displayable
Used by build function to make key displayable on the table.
Args:
key (object)
Key of property dictionary from dataG
Returns:
label (str)
String representation of key. Might be made shorter or with
different name if desired.
"""
return str(key)
def _make_value_pretty(self, value):
"""Make key of property dictionary displayable
Used by build function to make key displayable on the table.
Args:
key (object)
Key of property dictionary from dataG
Returns:
label (str)
String representation of key. Might be made shorter or with
different name if desired.
"""
label = str(value)
if len(label) > 255:
label = label[:253] + '...'
return label
def _key_filter_function(self, key):
"""Function to determine if key should be displayed.
Called by build for each key in the propery dict. Overwrite
with your implementation if you want to hide specific keys (all
starting "_" for example).
Args:
key (object)
Key of property dictionary from dataG
Returns:
display (bool)
True if the key-value pair associate with this key should
be displayed
"""
# Should be more specifically implemented when subclassed
return True # Show all keys
def _configure_interior(self, event):
"""
track changes to the canvas and frame width and sync them,
also updating the scrollbar
"""
# update the scrollbars to match the size of the inner frame
size = (self.interior.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if self.interior.winfo_reqwidth() != self.canvas.winfo_width():
# update the canvas's width to fit the inner frame
self.canvas.config(width=self.interior.winfo_reqwidth())
def _configure_canvas(self, event):
if self.interior.winfo_reqwidth() != self.canvas.winfo_width():
# update the inner frame's width to fill the canvas
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
class NodeDialog(tk.Toplevel):
def __init__(self, main_window, msg='Please enter a node:'):
tk.Toplevel.__init__(self)
self.main_window = main_window
self.title('Node Entry')
self.geometry('170x160')
self.rowconfigure(3, weight=1)
tk.Label(self, text=msg).grid(row=0, column=0, columnspan=2,
sticky='NESW',padx=5,pady=5)
self.posibilities = [d['dataG_id'] for n,d in
main_window.canvas.dispG.nodes(data=True)]
self.entry = AutocompleteEntry(self.posibilities, self)
self.entry.bind('<Return>', lambda e: self.destroy(), add='+')
self.entry.grid(row=1, column=0, columnspan=2, sticky='NESW',padx=5,pady=5)
tk.Button(self, text='Ok', command=self.destroy).grid(
row=3, column=0, sticky='ESW',padx=5,pady=5)
tk.Button(self, text='Cancel', command=self.cancel).grid(
row=3, column=1, sticky='ESW',padx=5,pady=5)
# Make modal
self.winfo_toplevel().wait_window(self)
def destroy(self):
res = self.entry.get()
if res not in self.posibilities:
res = None
self.result = res
tk.Toplevel.destroy(self)
def cancel(self):
self.entry.delete(0,tk.END)
self.destroy()
|
jsexauer/networkx_viewer
|
networkx_viewer/viewer.py
|
Python
|
gpl-3.0
| 17,151 | 0.005073 |
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
import os
from collections import defaultdict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from mmdet.core import INSTANCE_OFFSET
from .api_wrappers import COCO, pq_compute_multi_core
from .builder import DATASETS
from .coco import CocoDataset
try:
import panopticapi
from panopticapi.evaluation import VOID
from panopticapi.utils import id2rgb
except ImportError:
panopticapi = None
id2rgb = None
VOID = None
__all__ = ['CocoPanopticDataset']
class COCOPanoptic(COCO):
"""This wrapper is for loading the panoptic style annotation file.
The format is shown in the CocoPanopticDataset class.
Args:
annotation_file (str): Path of annotation file.
"""
def __init__(self, annotation_file=None):
if panopticapi is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
super(COCOPanoptic, self).__init__(annotation_file)
def createIndex(self):
# create index
print('creating index...')
# anns stores 'segment_id -> annotation'
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann, img_info in zip(self.dataset['annotations'],
self.dataset['images']):
img_info['segm_file'] = ann['file_name']
for seg_ann in ann['segments_info']:
# to match with instance.json
seg_ann['image_id'] = ann['image_id']
seg_ann['height'] = img_info['height']
seg_ann['width'] = img_info['width']
img_to_anns[ann['image_id']].append(seg_ann)
# segment_id is not unique in coco dataset orz...
if seg_ann['id'] in anns.keys():
anns[seg_ann['id']].append(seg_ann)
else:
anns[seg_ann['id']] = [seg_ann]
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])
print('index created!')
self.anns = anns
self.imgToAnns = img_to_anns
self.catToImgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def load_anns(self, ids=[]):
"""Load anns with the specified ids.
self.anns is a list of annotation lists instead of a
list of annotations.
Args:
ids (int array): integer ids specifying anns
Returns:
anns (object array): loaded ann objects
"""
anns = []
if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):
# self.anns is a list of annotation lists instead of
# a list of annotations
for id in ids:
anns += self.anns[id]
return anns
elif type(ids) == int:
return self.anns[ids]
@DATASETS.register_module()
class CocoPanopticDataset(CocoDataset):
"""Coco dataset for Panoptic segmentation.
The annotation format is shown as follows. The `ann` field is optional
for testing.
.. code-block:: none
[
{
'filename': f'{image_id:012}.png',
'image_id':9
'segments_info': {
[
{
'id': 8345037, (segment_id in panoptic png,
convert from rgb)
'category_id': 51,
'iscrowd': 0,
'bbox': (x1, y1, w, h),
'area': 24315,
'segmentation': list,(coded mask)
},
...
}
}
},
...
]
"""
CLASSES = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',
'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff',
'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light',
'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'
]
THING_CLASSES = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
STUFF_CLASSES = [
'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain',
'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house',
'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',
'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',
'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',
'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged',
'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',
'paper-merged', 'food-other-merged', 'building-other-merged',
'rock-merged', 'wall-other-merged', 'rug-merged'
]
PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230),
(106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70),
(0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0),
(175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255),
(0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157),
(110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118),
(255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182),
(0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255),
(78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255),
(134, 134, 103), (145, 148, 174), (255, 208, 186),
(197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255),
(151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105),
(166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149),
(179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205),
(147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0),
(119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88),
(95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118),
(219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15),
(127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0),
(95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122),
(191, 162, 208), (255, 255, 128), (147, 211, 203),
(150, 100, 100), (168, 171, 172), (146, 112, 198),
(210, 170, 100), (92, 136, 89), (218, 88, 184), (241, 129, 0),
(217, 17, 255), (124, 74, 181), (70, 70, 70), (255, 228, 255),
(154, 208, 0), (193, 0, 92), (76, 91, 113), (255, 180, 195),
(106, 154, 176),
(230, 150, 140), (60, 143, 255), (128, 64, 128), (92, 82, 55),
(254, 212, 124), (73, 77, 174), (255, 160, 98), (255, 255, 255),
(104, 84, 109), (169, 164, 131), (225, 199, 255), (137, 54, 74),
(135, 158, 223), (7, 246, 231), (107, 255, 200), (58, 41, 149),
(183, 121, 142), (255, 73, 97), (107, 142, 35), (190, 153, 153),
(146, 139, 141),
(70, 130, 180), (134, 199, 156), (209, 226, 140), (96, 36, 108),
(96, 96, 96), (64, 170, 64), (152, 251, 152), (208, 229, 228),
(206, 186, 171), (152, 161, 64), (116, 112, 0), (0, 114, 143),
(102, 102, 156), (250, 141, 255)]
def load_annotations(self, ann_file):
"""Load annotation from COCO Panoptic style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCOPanoptic(ann_file)
self.cat_ids = self.coco.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.categories = self.coco.cats
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
info['segm_file'] = info['filename'].replace('jpg', 'png')
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
# filter out unmatched images
ann_info = [i for i in ann_info if i['image_id'] == img_id]
return self._parse_ann_info(self.data_infos[idx], ann_info)
def _parse_ann_info(self, img_info, ann_info):
"""Parse annotations and load panoptic ground truths.
Args:
img_info (int): Image info of an image.
ann_info (list[dict]): Annotation info of an image.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, seg_map.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_mask_infos = []
for i, ann in enumerate(ann_info):
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w, y1 + h]
category_id = ann['category_id']
contiguous_cat_id = self.cat2label[category_id]
is_thing = self.coco.load_cats(ids=category_id)[0]['isthing']
if is_thing:
is_crowd = ann.get('iscrowd', False)
if not is_crowd:
gt_bboxes.append(bbox)
gt_labels.append(contiguous_cat_id)
else:
gt_bboxes_ignore.append(bbox)
is_thing = False
mask_info = {
'id': ann['id'],
'category': contiguous_cat_id,
'is_thing': is_thing
}
gt_mask_infos.append(mask_info)
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_mask_infos,
seg_map=img_info['segm_file'])
return ann
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
ids_with_ann = []
# check whether images have legal thing annotations.
for lists in self.coco.anns.values():
for item in lists:
category_id = item['category_id']
is_thing = self.coco.load_cats(ids=category_id)[0]['isthing']
if not is_thing:
continue
ids_with_ann.append(item['image_id'])
ids_with_ann = set(ids_with_ann)
valid_inds = []
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _pan2json(self, results, outfile_prefix):
"""Convert panoptic results to COCO panoptic json style."""
label2cat = dict((v, k) for (k, v) in self.cat2label.items())
pred_annotations = []
outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')
for idx in range(len(self)):
img_id = self.img_ids[idx]
segm_file = self.data_infos[idx]['segm_file']
pan = results[idx]
pan_labels = np.unique(pan)
segm_info = []
for pan_label in pan_labels:
sem_label = pan_label % INSTANCE_OFFSET
# We reserve the length of self.CLASSES for VOID label
if sem_label == len(self.CLASSES):
continue
# convert sem_label to json label
cat_id = label2cat[sem_label]
is_thing = self.categories[cat_id]['isthing']
mask = pan == pan_label
area = mask.sum()
segm_info.append({
'id': int(pan_label),
'category_id': cat_id,
'isthing': is_thing,
'area': int(area)
})
# evaluation script uses 0 for VOID label.
pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID
pan = id2rgb(pan).astype(np.uint8)
mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file))
record = {
'image_id': img_id,
'segments_info': segm_info,
'file_name': segm_file
}
pred_annotations.append(record)
pan_json_results = dict(annotations=pred_annotations)
return pan_json_results
def results2json(self, results, outfile_prefix):
"""Dump the panoptic results to a COCO panoptic style json file.
Args:
results (dict): Testing results of the dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.panoptic.json"
Returns:
dict[str: str]: The key is 'panoptic' and the value is
corresponding filename.
"""
result_files = dict()
pan_results = [result['pan_results'] for result in results]
pan_json_results = self._pan2json(pan_results, outfile_prefix)
result_files['panoptic'] = f'{outfile_prefix}.panoptic.json'
mmcv.dump(pan_json_results, result_files['panoptic'])
return result_files
def evaluate_pan_json(self,
result_files,
outfile_prefix,
logger=None,
classwise=False):
"""Evaluate PQ according to the panoptic results json file."""
imgs = self.coco.imgs
gt_json = self.coco.img_ann_map # image to annotations
gt_json = [{
'image_id': k,
'segments_info': v,
'file_name': imgs[k]['segm_file']
} for k, v in gt_json.items()]
pred_json = mmcv.load(result_files['panoptic'])
pred_json = dict(
(el['image_id'], el) for el in pred_json['annotations'])
# match the gt_anns and pred_anns in the same image
matched_annotations_list = []
for gt_ann in gt_json:
img_id = gt_ann['image_id']
if img_id not in pred_json.keys():
raise Exception('no prediction for the image'
' with id: {}'.format(img_id))
matched_annotations_list.append((gt_ann, pred_json[img_id]))
gt_folder = self.seg_prefix
pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')
pq_stat = pq_compute_multi_core(matched_annotations_list, gt_folder,
pred_folder, self.categories,
self.file_client)
metrics = [('All', None), ('Things', True), ('Stuff', False)]
pq_results = {}
for name, isthing in metrics:
pq_results[name], classwise_results = pq_stat.pq_average(
self.categories, isthing=isthing)
if name == 'All':
pq_results['classwise'] = classwise_results
classwise_results = None
if classwise:
classwise_results = {
k: v
for k, v in zip(self.CLASSES, pq_results['classwise'].values())
}
print_panoptic_table(pq_results, classwise_results, logger=logger)
return parse_pq_results(pq_results)
def evaluate(self,
results,
metric='PQ',
logger=None,
jsonfile_prefix=None,
classwise=False,
**kwargs):
"""Evaluation in COCO Panoptic protocol.
Args:
results (list[dict]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Only
support 'PQ' at present. 'pq' will be regarded as 'PQ.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to print classwise evaluation results.
Default: False.
Returns:
dict[str, float]: COCO Panoptic style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
# Compatible with lowercase 'pq'
metrics = ['PQ' if metric == 'pq' else metric for metric in metrics]
allowed_metrics = ['PQ'] # todo: support other metrics like 'bbox'
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
outfile_prefix = os.path.join(tmp_dir.name, 'results') \
if tmp_dir is not None else jsonfile_prefix
if 'PQ' in metrics:
eval_pan_results = self.evaluate_pan_json(result_files,
outfile_prefix, logger,
classwise)
eval_results.update(eval_pan_results)
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
def parse_pq_results(pq_results):
"""Parse the Panoptic Quality results."""
result = dict()
result['PQ'] = 100 * pq_results['All']['pq']
result['SQ'] = 100 * pq_results['All']['sq']
result['RQ'] = 100 * pq_results['All']['rq']
result['PQ_th'] = 100 * pq_results['Things']['pq']
result['SQ_th'] = 100 * pq_results['Things']['sq']
result['RQ_th'] = 100 * pq_results['Things']['rq']
result['PQ_st'] = 100 * pq_results['Stuff']['pq']
result['SQ_st'] = 100 * pq_results['Stuff']['sq']
result['RQ_st'] = 100 * pq_results['Stuff']['rq']
return result
def print_panoptic_table(pq_results, classwise_results=None, logger=None):
"""Print the panoptic evaluation results table.
Args:
pq_results(dict): The Panoptic Quality results.
classwise_results(dict | None): The classwise Panoptic Quality results.
The keys are class names and the values are metrics.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
"""
headers = ['', 'PQ', 'SQ', 'RQ', 'categories']
data = [headers]
for name in ['All', 'Things', 'Stuff']:
numbers = [
f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq']
]
row = [name] + numbers + [pq_results[name]['n']]
data.append(row)
table = AsciiTable(data)
print_log('Panoptic Evaluation Results:\n' + table.table, logger=logger)
if classwise_results is not None:
class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}'
for k in ['pq', 'sq', 'rq'])
for name, metrics in classwise_results.items()]
num_columns = min(8, len(class_metrics) * 4)
results_flatten = list(itertools.chain(*class_metrics))
headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4)
results_2d = itertools.zip_longest(
*[results_flatten[i::num_columns] for i in range(num_columns)])
data = [headers]
data += [result for result in results_2d]
table = AsciiTable(data)
print_log(
'Classwise Panoptic Evaluation Results:\n' + table.table,
logger=logger)
|
open-mmlab/mmdetection
|
mmdet/datasets/coco_panoptic.py
|
Python
|
apache-2.0
| 24,271 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# =================================================================
# =================================================================
# NOTE: notify message MUST follow these rules:
#
# - Messages must be wrappered with _() for translation
#
# - Replacement variables must be wrappered with brackets
#
# - Replacement variables must be from the following list:'
# {instance_id}
# {instance_name}
# {host_name}
# {source_host_name}
# {target_host_name}
# {error}
from paxes_nova import _
PAUSE_SUCCESS = (_("Pause of virtual machine {instance_name} on host "
"{host_name} was successful."))
PAUSE_ERROR = (_("Pause of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
SUSPEND_SUCCESS = (_("Suspend of virtual machine {instance_name} on host "
"{host_name} was successful."))
SUSPEND_ERROR = (_("Suspend of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESUME_SUCCESS = (_("Resume of virtual machine {instance_name} on host "
"{host_name} was successful."))
RESUME_ERROR = (_("Resume of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
DEPLOY_SUCCESS = (_("Deploy of virtual machine {instance_name} on host "
"{host_name} was successful."))
DEPLOY_ERROR = (_("Deploy of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
START_SUCCESS = (_("Start of virtual machine {instance_name} on host "
"{host_name} was successful."))
START_ERROR = (_("Start of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
STOP_SUCCESS = (_("Stop of virtual machine {instance_name} on host "
"{host_name} was successful."))
STOP_ERROR = (_("Stop of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESTART_SUCCESS = (_("Restart of virtual machine {instance_name} on host "
"{host_name} was successful."))
RESTART_ERROR = (_("Restart of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
LPM_SUCCESS = (_("Migration of virtual machine {instance_name} from host "
"{source_host_name} to host {target_host_name} was "
"successful."))
LPM_ERROR = (_("Migration of virtual machine {instance_name} to host "
"{target_host_name} failed with exception: {error}"))
LPM_ERROR_DEST = (_("Migration of virtual machine {instance_name} to host "
"{host_name} failed with exception: {error}"))
DELETE_ERROR = (_("Delete of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
DELETE_SUCCESS = (_("Delete of virtual machine {instance_name} on host "
"{host_name} was successful. "))
RESIZE_ERROR = (_("Resize of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
RESIZE_SUCCESS = (_("Resize of virtual machine {instance_name} on host "
"{host_name} was successful."))
CAPTURE_SUCCESS = (_("Capture of virtual machine {instance_name} on host "
"{host_name} was successful"))
CAPTURE_ERROR = (_("Capture of virtual machine {instance_name} on host "
"{host_name} failed with exception: {error}"))
ATTACH_SUCCESS = (_("Volume {volume_id} was successfully attached to "
"virtual machine {instance_name}."))
ATTACH_ERROR = (_("Volume {volume_id} could not be attached to "
"virtual machine {instance_name}. Error message: {error}"))
DETACH_SUCCESS = (_("Volume {volume_id} was successfully detached from "
"virtual machine {instance_name}."))
DETACH_ERROR = (_("Volume {volume_id} could not be detached from "
"virtual machine {instance_name}. Error message: {error}"))
|
windskyer/k_nova
|
paxes_nova/compute/notify_messages.py
|
Python
|
apache-2.0
| 4,325 | 0 |
"""
WSGI config for crowd_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crowd_server.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
codeaudit/ampcrowd
|
ampcrowd/crowd_server/wsgi.py
|
Python
|
apache-2.0
| 399 | 0.002506 |
# -*- coding: utf-8 -*-
# author: Alfred
import os
import re
DB_MODULE_PATTERN = re.compile(r'db2charts_models\.(?P<module>.*)_models')
class DB2ChartsRouter(object):
def db_for_module(self, module):
match = DB_MODULE_PATTERN.match(module)
if match:
return match.groupdict()['module']
return None
def db_for_read(self, model, **hints):
return self.db_for_module(model.__module__)
def db_for_write(self, model, **hints):
return self.db_for_module(model.__module__)
def allow_migrate(self, db, app_label, model=None, **hints):
return False
|
Alfredx/django-db2charts
|
db2charts/router.py
|
Python
|
mit
| 619 | 0.003231 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# To use this service you will need a D7 Networks account from their website
# at https://d7networks.com/
#
# After you've established your account you can get your api login credentials
# (both user and password) from the API Details section from within your
# account profile area: https://d7networks.com/accounts/profile/
import re
import six
import requests
import base64
from json import dumps
from json import loads
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyType
from ..utils import parse_list
from ..utils import parse_bool
from ..AppriseLocale import gettext_lazy as _
# Extend HTTP Error Messages
D7NETWORKS_HTTP_ERROR_MAP = {
401: 'Invalid Argument(s) Specified.',
403: 'Unauthorized - Authentication Failure.',
412: 'A Routing Error Occured',
500: 'A Serverside Error Occured Handling the Request.',
}
# Some Phone Number Detection
IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
# Priorities
class D7SMSPriority(object):
"""
D7 Networks SMS Message Priority
"""
LOW = 0
MODERATE = 1
NORMAL = 2
HIGH = 3
D7NETWORK_SMS_PRIORITIES = (
D7SMSPriority.LOW,
D7SMSPriority.MODERATE,
D7SMSPriority.NORMAL,
D7SMSPriority.HIGH,
)
class NotifyD7Networks(NotifyBase):
"""
A wrapper for D7 Networks Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'D7 Networks'
# The services URL
service_url = 'https://d7networks.com/'
# All notification requests are secure
secure_protocol = 'd7sms'
# Allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_d7networks'
# D7 Networks batch notification URL
notify_batch_url = 'http://rest-api.d7networks.com/secure/sendbatch'
# D7 Networks single notification URL
notify_url = 'http://rest-api.d7networks.com/secure/send'
# The maximum length of the body
body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
# Define object templates
templates = (
'{schema}://{user}:{password}@{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'user': {
'name': _('Username'),
'type': 'string',
'required': True,
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
'required': True,
},
'target_phone': {
'name': _('Target Phone No'),
'type': 'string',
'prefix': '+',
'regex': (r'^[0-9\s)(+-]+$', 'i'),
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'priority': {
'name': _('Priority'),
'type': 'choice:int',
'min': D7SMSPriority.LOW,
'max': D7SMSPriority.HIGH,
'values': D7NETWORK_SMS_PRIORITIES,
# The website identifies that the default priority is low; so
# this plugin will honor that same default
'default': D7SMSPriority.LOW,
},
'batch': {
'name': _('Batch Mode'),
'type': 'bool',
'default': False,
},
'to': {
'alias_of': 'targets',
},
'source': {
# Originating address,In cases where the rewriting of the sender's
# address is supported or permitted by the SMS-C. This is used to
# transmit the message, this number is transmitted as the
# originating address and is completely optional.
'name': _('Originating Address'),
'type': 'string',
'map_to': 'source',
},
'from': {
'alias_of': 'source',
},
})
def __init__(self, targets=None, priority=None, source=None, batch=False,
**kwargs):
"""
Initialize D7 Networks Object
"""
super(NotifyD7Networks, self).__init__(**kwargs)
# The Priority of the message
if priority not in D7NETWORK_SMS_PRIORITIES:
self.priority = self.template_args['priority']['default']
else:
self.priority = priority
# Prepare Batch Mode Flag
self.batch = batch
# Setup our source address (if defined)
self.source = None \
if not isinstance(source, six.string_types) else source.strip()
# Parse our targets
self.targets = list()
for target in parse_list(targets):
# Validate targets and drop bad ones:
result = IS_PHONE_NO.match(target)
if result:
# Further check our phone # for it's digit count
# if it's less than 10, then we can assume it's
# a poorly specified phone no and spit a warning
result = ''.join(re.findall(r'\d+', result.group('phone')))
if len(result) < 11 or len(result) > 14:
self.logger.warning(
'Dropped invalid phone # '
'({}) specified.'.format(target),
)
continue
# store valid phone number
self.targets.append(result)
continue
self.logger.warning(
'Dropped invalid phone # ({}) specified.'.format(target))
if len(self.targets) == 0:
msg = 'There are no valid targets identified to notify.'
self.logger.warning(msg)
raise TypeError(msg)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Depending on whether we are set to batch mode or single mode this
redirects to the appropriate handling
"""
# error tracking (used for function return)
has_error = False
auth = '{user}:{password}'.format(
user=self.user, password=self.password)
if six.PY3:
# Python 3's versio of b64encode() expects a byte array and not
# a string. To accomodate this, we encode the content here
auth = auth.encode('utf-8')
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Accept': 'application/json',
'Authorization': 'Basic {}'.format(base64.b64encode(auth))
}
# Our URL varies depending if we're doing a batch mode or not
url = self.notify_batch_url if self.batch else self.notify_url
# use the list directly
targets = list(self.targets)
while len(targets):
if self.batch:
# Prepare our payload
payload = {
'globals': {
'priority': self.priority,
'from': self.source if self.source else self.app_id,
},
'messages': [{
'to': self.targets,
'content': body,
}],
}
# Reset our targets so we don't keep going. This is required
# because we're in batch mode; we only need to loop once.
targets = []
else:
# We're not in a batch mode; so get our next target
# Get our target(s) to notify
target = targets.pop(0)
# Prepare our payload
payload = {
'priority': self.priority,
'content': body,
'to': target,
'from': self.source if self.source else self.app_id,
}
# Some Debug Logging
self.logger.debug(
'D7 Networks POST URL: {} (cert_verify={})'.format(
url, self.verify_certificate))
self.logger.debug('D7 Networks Payload: {}' .format(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
url,
data=dumps(payload),
headers=headers,
verify=self.verify_certificate,
)
if r.status_code not in (
requests.codes.created, requests.codes.ok):
# We had a problem
status_str = \
NotifyBase.http_response_code_lookup(
r.status_code, D7NETWORKS_HTTP_ERROR_MAP)
try:
# Update our status response if we can
json_response = loads(r.content)
status_str = json_response.get('message', status_str)
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response.
# We will just use the status we already have.
pass
self.logger.warning(
'Failed to send D7 Networks SMS notification to {}: '
'{}{}error={}.'.format(
', '.join(target) if self.batch else target,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
if self.batch:
count = len(self.targets)
try:
# Get our message delivery count if we can
json_response = loads(r.content)
count = int(json_response.get(
'data', {}).get('messageCount', -1))
except (AttributeError, TypeError, ValueError):
# ValueError = r.content is Unparsable
# TypeError = r.content is None
# AttributeError = r is None
# We could not parse JSON response. Assume that
# our delivery is okay for now.
pass
if count != len(self.targets):
has_error = True
self.logger.info(
'Sent D7 Networks batch SMS notification to '
'{} of {} target(s).'.format(
count, len(self.targets)))
else:
self.logger.info(
'Sent D7 Networks SMS notification to {}.'.format(
target))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending D7 Networks:%s ' % (
', '.join(self.targets)) + 'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
'batch': 'yes' if self.batch else 'no',
}
if self.priority != self.template_args['priority']['default']:
args['priority'] = str(self.priority)
if self.source:
args['from'] = self.source
return '{schema}://{user}:{password}@{targets}/?{args}'.format(
schema=self.secure_protocol,
user=NotifyD7Networks.quote(self.user, safe=''),
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
targets='/'.join(
[NotifyD7Networks.quote(x, safe='') for x in self.targets]),
args=NotifyD7Networks.urlencode(args))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
# Initialize our targets
results['targets'] = list()
# The store our first target stored in the hostname
results['targets'].append(NotifyD7Networks.unquote(results['host']))
# Get our entries; split_path() looks after unquoting content for us
# by default
results['targets'].extend(
NotifyD7Networks.split_path(results['fullpath']))
# Set our priority
if 'priority' in results['qsd'] and len(results['qsd']['priority']):
_map = {
'l': D7SMSPriority.LOW,
'0': D7SMSPriority.LOW,
'm': D7SMSPriority.MODERATE,
'1': D7SMSPriority.MODERATE,
'n': D7SMSPriority.NORMAL,
'2': D7SMSPriority.NORMAL,
'h': D7SMSPriority.HIGH,
'3': D7SMSPriority.HIGH,
}
try:
results['priority'] = \
_map[results['qsd']['priority'][0].lower()]
except KeyError:
# No priority was set
pass
# Support the 'from' and 'source' variable so that we can support
# targets this way too.
# The 'from' makes it easier to use yaml configuration
if 'from' in results['qsd'] and len(results['qsd']['from']):
results['source'] = \
NotifyD7Networks.unquote(results['qsd']['from'])
if 'source' in results['qsd'] and len(results['qsd']['source']):
results['source'] = \
NotifyD7Networks.unquote(results['qsd']['source'])
# Get Batch Mode Flag
results['batch'] = \
parse_bool(results['qsd'].get('batch', False))
# Support the 'to' variable so that we can support targets this way too
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyD7Networks.parse_list(results['qsd']['to'])
return results
|
SickGear/SickGear
|
lib/apprise/plugins/NotifyD7Networks.py
|
Python
|
gpl-3.0
| 16,906 | 0 |
import platform
import socket
import sys
import os
from mule_local.JobGeneration import *
from mule.JobPlatformResources import *
from . import JobPlatformAutodetect
def _whoami(depth=1):
"""
String of function name to recycle code
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch14s08.html
Returns
-------
string
Return function name
"""
return sys._getframe(depth).f_code.co_name
def p_gen_script_info(jg : JobGeneration):
return """#
# Generating function: """+_whoami(2)+"""
# Platform: """+get_platform_id()+"""
# Job id: """+jg.getUniqueID()+"""
#
"""
def get_platform_autodetect():
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
return JobPlatformAutodetect.autodetect()
def get_platform_id():
"""
Return platform ID
Returns
-------
string
unique ID of platform
"""
return "cheyenne_intel"
def get_platform_resources():
"""
Return information about hardware
"""
r = JobPlatformResources()
r.num_cores_per_node = 36
# Physical number of nodes, maybe the limit is different
r.num_nodes = 4032
r.num_cores_per_socket = 18
# 12h limit
r.max_wallclock_seconds = 60*60*12
return r
def jobscript_setup(jg : JobGeneration):
"""
Setup data to generate job script
"""
return
def jobscript_get_header(jg : JobGeneration):
"""
These headers typically contain the information on e.g. Job exection, number of compute nodes, etc.
Returns
-------
string
multiline text for scripts
"""
job_id = jg.getUniqueID()
p = jg.parallelization
time_str = p.get_max_wallclock_seconds_hh_mm_ss()
# Available queues:
# premium (only use this in extreme cases)
# regular
# economy
queue = 'economy'
# Use regular queue if we need more than 32 nodes
# Otherwise, the job doesn't seem to be scheduled
if p.num_nodes >= 32:
queue = 'premium'
elif p.num_nodes >= 16:
queue = 'regular'
#
# See https://www.lrz.de/services/compute/linux-cluster/batch_parallel/example_jobs/
#
content = """#! /bin/bash
#
## project code
#PBS -A NCIS0002
#PBS -q """+queue+"""
## wall-clock time (hrs:mins:secs)
#PBS -l walltime="""+time_str+"""
## select: number of nodes
## ncpus: number of CPUs per node
## mpiprocs: number of ranks per node
#PBS -l select="""+str(p.num_nodes)+""":ncpus="""+str(p.num_cores_per_node)+""":mpiprocs="""+str(p.num_ranks_per_node)+""":ompthreads="""+str(p.num_threads_per_rank)+"\n"
#"default": 2301000
#"turbo": 2301000
#"rated": 2300000
#"slow": 1200000
if p.force_turbo_off:
content += "#PBS -l select=cpufreq=2300000\n"
content += """#
#PBS -N """+job_id[0:100]+"""
#PBS -o """+jg.p_job_stdout_filepath+"""
#PBS -e """+jg.p_job_stderr_filepath+"""
#source /etc/profile.d/modules.sh
#module load openmpi
"""+("module load mkl" if jg.compile.mkl==True or jg.compile.mkl=='enable' else "")+"""
"""+p_gen_script_info(jg)+"""
echo
echo "hostname"
hostname
echo
echo
echo "lscpu -e"
lscpu -e
echo
echo
echo "CPU Frequencies (uniquely reduced):"
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | sort -u
echo
"""
if jg.compile.threading != 'off':
content += """
export OMP_NUM_THREADS="""+str(p.num_threads_per_rank)+"""
"""
# if jg.compile.sweet_mpi != 'enable':
if True:
#
# https://software.intel.com/en-us/node/522691
if p.core_oversubscription:
if p.core_affinity != None:
if p.core_affinity == 'compact':
content += "export KMP_AFFINITY=granularity=fine,compact\n"
elif p.core_affinity == 'scatter':
content += "export KMP_AFFINITY=granularity=fine,scatter\n"
else:
Exception("Affinity '"+str(p.core_affinity)+"' not supported")
else:
#raise Exception("Please specify core_affinity!")
content += "# No core affinity selected\n"
else:
if p.core_affinity != None:
content += "\necho \"Affnity: "+str(p.core_affinity)+"\"\n"
if p.core_affinity == 'compact':
content += "export KMP_AFFINITY=granularity=fine,compact,1,0\n"
elif p.core_affinity == 'scatter':
content += "export KMP_AFFINITY=granularity=fine,scatter\n"
else:
raise Exception("Affinity '"+str(p.core_affinity)+"' not supported")
else:
#raise Exception("Please specify core_affinity!")
content += "# No core affinity selected\n"
if p.core_affinity != None:
content += "export KMP_AFFINITY=\"verbose,$KMP_AFFINITY\"\n"
return content
def jobscript_get_exec_prefix(jg : JobGeneration):
"""
Prefix before executable
Returns
-------
string
multiline text for scripts
"""
content = ""
content += jg.runtime.get_jobscript_plan_exec_prefix(jg.compile, jg.runtime)
content += """
EXEC=\""""+jg.compile.getProgramPath()+"""\"
PARAMS=\""""+jg.runtime.getRuntimeOptions()+"""\"
"""
return content
def jobscript_get_exec_command(jg : JobGeneration):
"""
Prefix to executable command
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
mpiexec = ""
#
# Only use MPI exec if we are allowed to do so
# We shouldn't use mpiexec for validation scripts
#
if not p.mpiexec_disabled:
# Use mpiexec_mpt for Intel MPI
#mpiexec = "mpiexec_mpt -n "+str(p.num_ranks)
# Use mpiexec for GNU
if jg.compile.sweet_mpi == 'enable':
mpiexec = "mpiexec_mpt -n "+str(p.num_ranks)
mpiexec += " omplace "
mpiexec += " -nt "+str(p.num_threads_per_rank)+" "
mpiexec += " -tm intel"
mpiexec += " -vv"
if mpiexec[-1] != ' ':
mpiexec += ' '
#
# Fix the mess on Cheyenne!
#
# We prefix the current LD_LIBRARY_PATH with the one from the shell where the job was submitted
# This is required since Cheyenne scripts mess around with the existing path in a way
# which results in e.g. the system-wide installed fftw to be loaded.
#
# What we basically accomplish here is to suggest to really first
# lookup the MULE local_software/local/lib directory, then the system libraries
#
sweet_ld_library_path = os.getenv('MULE_LD_LIBRARY_PATH')
if sweet_ld_library_path == None:
raise Exception("Environment variable MULE_LD_LIBRARY_PATH not found!")
content = """
# Make sure that MULE library path is really known
export LD_LIBRARY_PATH=\""""+sweet_ld_library_path+""":$LD_LIBRARY_PATH\"
echo
echo "LD_LIBRARY_PATH"
echo "${LD_LIBRARY_PATH}"
echo
echo
echo "ldd"
ldd $EXEC
echo
E=\""""+mpiexec+"""${EXEC} ${PARAMS}\"
echo
echo "Executing..."
echo "$E"
$E || exit 1
"""
return content
def jobscript_get_exec_suffix(jg : JobGeneration):
"""
Suffix before executable
Returns
-------
string
multiline text for scripts
"""
content = """
echo
echo "CPU Frequencies (uniquely reduced):"
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq | sort -u
echo
"""
content += jg.runtime.get_jobscript_plan_exec_suffix(jg.compile, jg.runtime)
return content
def jobscript_get_footer(jg : JobGeneration):
"""
Footer at very end of job script
Returns
-------
string
multiline text for scripts
"""
content = ""
return content
def jobscript_get_compile_command(jg : JobGeneration):
"""
Compile command(s)
This is separated here to put it either
* into the job script (handy for workstations)
or
* into a separate compile file (handy for clusters)
Returns
-------
string
multiline text with compile command to generate executable
"""
content = """
SCONS="scons """+jg.compile.getSConsParams()+' -j 4"'+"""
echo "$SCONS"
$SCONS || exit 1
"""
return content
|
schreiberx/sweet
|
mule/platforms/50_cheyenne_intel/JobPlatform.py
|
Python
|
mit
| 8,312 | 0.006136 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<lang>[a-z]{2})?$', views.index, name='index'),
url(r'^sign/$', views.sign, name='sign'),
url(r'^confirm/([0-9a-z]{64})/$', views.confirm, name='confirm'),
]
|
sandervenema/netzpolitik
|
petitions/urls.py
|
Python
|
gpl-2.0
| 251 | 0 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalClassification',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'field1': {
'fieldname': u'field1',
'n': 121,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
u'classification': {
'classifierOnly': True,
'fieldname': u'classification',
'n': 121,
'name': u'classification',
'type': 'SDRCategoryEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': False,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
'randomSP': 0,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : False,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nta/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '0',
},
'anomalyParams': {
u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None
},
'trainSPNetOnlyIfRequested': False,
},
'dataSource': 'fillInBySubExperiment',
'errorMetric': 'fillInBySubExperiment'
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'grok',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : { u'info': u'testSpatialClassification',
u'streams': [ { u'columns': [u'*'],
u'info': u'spatialClassification',
u'source': config['dataSource']}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'classification', u'predictionSteps': [0]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field='classification', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': config['errorMetric'],
'window': 100,
'steps': 0}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
tkaitchuck/nupic
|
examples/opf/experiments/spatial_classification/base/description.py
|
Python
|
gpl-3.0
| 14,847 | 0.002694 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for VariableMgr."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import operator
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
PS_SHADOW_VAR_PREFIX = 'ps_var'
AutoLossScaleParams = pycoll.namedtuple(
'AutoLossScaleParams',
[
# If true, enable automatic loss scaling.
'enable_auto_loss_scale',
# The value to scale the loss before computing gradients.
'loss_scale',
# Number of normal steps with the current `loss_scale`.
'loss_scale_normal_steps',
# Increase loss scale every n steps.
'inc_loss_scale_every_n',
# If true, the current worker is chief. The current implementation
# relies on the chief to update loss_scale value, but in future, we
# might change this to ask the parameter server to update loss_scales
# for better performance.
# TODO(tanmingxing): remove this if loss_scale is updated in ps.
'is_chief',
])
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n):
"""Returns the update op for loss scaling variables.
We maintain the counter `loss_scale_normal_steps` to count the number of steps
we have been using the current `loss_scale`. In most cases, this function
increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
and reset `loss_scale_normal_steps` to zero.
This op is only called if the gradients don't have any infs or nans. Instead,
if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
reset `loss_scale_normal_steps` to zero.
Args:
loss_scale: a tf.Variable represneting the loss_scale value.
loss_scale_normal_steps: a tf.Variable representing the number of training
steps that have run since the loss_scale last changed.
inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
increased every `inc_loss_scale_every_n` steps, unless the gradients have
infs or nans.
Returns:
An op for updating `loss_scale` and `loss_scale_normal_steps`.
"""
def increment_loss_scale_normal_steps_func():
return tf.group(loss_scale_normal_steps.assign_add(1))
def increase_loss_scale_func():
return tf.group(
tf.assign(loss_scale_normal_steps, 0),
tf.assign(loss_scale, loss_scale * 2))
# true_fn and false_fn must have the same type.
return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
increment_loss_scale_normal_steps_func,
increase_loss_scale_func)
def append_gradients_with_loss_scale(training_ops, get_apply_gradients_ops_func,
loss_scale_params, grad_has_inf_nan):
"""Selectively appends gradients update ops with loss scaling.
Args:
training_ops: a list of training ops to be executed.
get_apply_gradients_ops_func: a function that returns a list of ops for
applying gradients. Here, we must pass a function instead of the actual
list of ops; otherwise, those ops would be executed unconditionally due to
the semantics of tf.cond.
loss_scale_params: An AutoLossScaleParams tuple.
grad_has_inf_nan: Boolean tensor indicating whether the gradients have infs
or nans.
"""
is_chief = loss_scale_params.is_chief
loss_scale = loss_scale_params.loss_scale
loss_scale_normal_steps = loss_scale_params.loss_scale_normal_steps
inc_loss_scale_every_n = loss_scale_params.inc_loss_scale_every_n
enable_auto_loss_scale = loss_scale_params.enable_auto_loss_scale
if loss_scale is None or not enable_auto_loss_scale or not is_chief:
training_ops.extend(get_apply_gradients_ops_func())
else:
# If nans/infs occurred, skip applying gradients and instead update
# loss_scale (halve loss_scale and reset loss_scale_normal_steps to zero).
def update_op_if_nan_or_inf():
"""Update loss_scale and discard gradients if nans/infs occurred."""
return tf.group(
tf.assign(loss_scale, loss_scale / 2.),
tf.assign(loss_scale_normal_steps, 0))
# Otherwise, apply gradients, and update loss_scale and
# loss_scale_normal_steps.
def update_op_if_no_nan_or_inf():
"""Apply gradients, and update loss scaling."""
return tf.group(
get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n),
*get_apply_gradients_ops_func())
# TODO(tanmingxing): Add support for independent and distributed all_reduce.
assert grad_has_inf_nan is not None
update_op = tf.cond(
grad_has_inf_nan,
update_op_if_nan_or_inf,
update_op_if_no_nan_or_inf,
name='cond_if_grad_has_inf_nan'
)
training_ops.append(update_op)
# To be used with custom_getter on tf.get_variable.
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
Note that variable creation only happen when building the model graph on the
first device (see how it sets the 'reuse' parameter in
VariableMgr.*.create_outer_variable_scope()). That means, for all other
devices, the variable scope will reuse the variables created before, which
requires that we set the caching_device correctly as otherwise it may not be
able to find the previously created variable and will create a new one. This
requires when building the model graph on different devices, variables with
the same name should have same size.
TODO(laigd): consider adding tests or verification logic to enforce this, or
refactor it.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
class OverrideToLocalVariableIfNotPsVar(object):
# args and kwargs come from the custom_getter interface for Tensorflow
# variables, and matches tf.get_variable's signature, with the addition of
# 'getter' at the beginning.
def __call__(self, getter, name, *args, **kwargs):
if name.startswith(PS_SHADOW_VAR_PREFIX):
return getter(*args, **kwargs)
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = [tf.GraphKeys.GLOBAL_VARIABLES]
else:
collections = collections[:]
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.append(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
return getter(name, *args, **kwargs)
class ParamServerDeviceSetter(object):
"""Helper class to assign variables on the least loaded ps-device."""
def __init__(self, worker_device, ps_devices):
"""Initializer for ParamServerDevicSetter.
Args:
worker_device: the device to use for computer ops.
ps_devices: a list of device to use for Variable ops. Each variable is
assigned to the least loaded device.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return self.worker_device
device_index, _ = min(enumerate(self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
self.ps_sizes[device_index] += var_size
return device_name
class StagedModelVariable(object):
"""Staging variable wrapper that decouples reads and updates.
This class represents a variable through a staging buffer. Reads from this
variable directly gets from the staging buffer. Updates are stacked into
another staging buffer, and will be processed later.
"""
def __init__(self, real_var, var_stage_get, variable_mgr):
"""Initializer for the model variables through a staging buffer.
Args:
real_var: the underlying real variable.
var_stage_get: the read op from the staging buffer.
variable_mgr: the parent variable-manager.
"""
self.real_var = real_var
self.var_stage_get = var_stage_get
self.variable_mgr = variable_mgr
def _value(self):
"""The read access of this variable. The content from the staging buffer."""
return self.var_stage_get
def _ref(self):
"""Return the underlying variable ref, required by tf.colocate_with."""
return self.real_var._ref() # pylint: disable=protected-access
def read_value(self):
"""Mimics tf.Variable.read_value()."""
return tf.identity(self.var_stage_get, name='read')
@property
def dtype(self):
"""Return the non-reference dtype."""
return self.var_stage_get.dtype
def assign_sub(self, delta, name=None, read_value=True):
"""Mimic the updates to the variable.
Args:
delta: is pushed into a staging buffer and will be pumped later.
name: currently ignored; names of ops and the StagingArea are
computed without using this pass name.
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
The actual updates. The colocation constraint will be reapplied.
"""
# This parameter is ignored: the StagingArea only supports setting
# the shared name, not the names of individual ops it uses.
del name
# colocate_with(None, True) clears the colocation constraints.
# Push the delta into a staging buffer.
with ops.colocate_with(None, True), tf.device(self.var_stage_get.device):
delta_staging_area = data_flow_ops.StagingArea(
[self.var_stage_get.dtype], shapes=[self.var_stage_get.shape])
delta_put_op = delta_staging_area.put([delta])
self.variable_mgr.staging_delta_ops.append(delta_put_op)
delta_get_op = delta_staging_area.get()[0]
# Return the actual updates. The colocation constraint will be reapplied.
return self.real_var.assign_sub(delta_get_op, read_value=read_value)
@staticmethod
# pylint: disable=bad-staticmethod-argument,invalid-name
def _TensorConversionFunction(self, dtype=None, name=None, as_ref=False):
"""Utility function for converting a StagedModelVariable to a Tensor."""
del dtype, name # unused: this function returns the cached ref or value.
if as_ref:
return self._ref()
else:
return self._value()
ops.register_tensor_conversion_function(
StagedModelVariable, StagedModelVariable._TensorConversionFunction) # pylint: disable=protected-access
class StagedVariableGetter(object):
"""A variable getter through staging buffers on devices.
Instead of a caching device, this getter tracks where the variable is used.
And on each device, it goes through a staging buffer.
"""
def __init__(self, device_num, devices, cpu_device, variable_mgr):
"""Initializer for StagedVariableGetter.
Args:
device_num: the current device index.
devices: a list of all the devices to build towers.
cpu_device: a cpu_device for this replica. If None, no cpu-caching is
done.
variable_mgr: the parent variable manager.
"""
self.device_num = device_num
self.devices = devices
self.cpu_device = cpu_device
self.variable_mgr = variable_mgr
def __call__(self, getter, name, *args, **kwargs):
staging_ops = self.variable_mgr.staging_vars_on_devices[self.device_num]
if name in staging_ops:
put_op, get_op = staging_ops[name]
return get_op
real_var = getter(name, *args, **kwargs)
shape = kwargs['shape']
dtype = kwargs['dtype']
trainable = kwargs['trainable']
if self.cpu_device:
with tf.device(self.cpu_device):
# This helps copying the weights from the parameter to this server only
# once.
if name in self.variable_mgr.staged_vars_on_cpu:
cpu_var = self.variable_mgr.staged_vars_on_cpu[name]
else:
cpu_var = tf.identity(real_var)
self.variable_mgr.staged_vars_on_cpu[name] = cpu_var
var_to_stage = cpu_var
else:
var_to_stage = tf.identity(real_var) # de-reference the variable.
with tf.device(self.devices[self.device_num]):
staging_area = data_flow_ops.StagingArea([dtype], shapes=[shape])
put_op = staging_area.put([var_to_stage])
get_op = staging_area.get()[0]
staging_ops[name] = (put_op, get_op)
if trainable:
# For trainable variables, they are managed separatedly through
# apply_gradients.
return get_op
else:
# For other shadow variables, the access is decoupled through a wrapper
# class.
return StagedModelVariable(real_var, get_op, self.variable_mgr)
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
writable):
"""Return the set of trainable variables on the specified device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether the returned variables is writable or read-only.
Returns:
Return the set of trainable variables on the specified device.
"""
del abs_device_num
params_refs = tf.trainable_variables()
if writable:
return params_refs
params = []
for param in params_refs:
var_name = param.name.split(':')[0]
_, var_get_op = self.variable_mgr.staging_vars_on_devices[rel_device_num][
var_name]
params.append(var_get_op)
return params
def aggregate_gradients_using_copy_with_device_selection(
benchmark_cnn, tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, controlling device for the aggregation.
Args:
benchmark_cnn: benchmark_cnn class.
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
if benchmark_cnn.local_parameter_device_flag == 'gpu':
avail_devices = benchmark_cnn.raw_devices
else:
avail_devices = [benchmark_cnn.param_server_device]
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
# The following two functions are copied from
# tensorflow/python/eager/backprop.py. We do not directly use them as they are
# not exported and subject to change at any time.
def flatten_nested_indexed_slices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = flatten_nested_indexed_slices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(grad.indices,
g.indices),
g.dense_shape)
def aggregate_indexed_slices_gradients(grads):
"""Aggregates gradients containing `IndexedSlices`s."""
if len(grads) < 1:
return None
elif len(grads) == 1:
return grads[0]
else:
grads = [g for g in grads if g is not None]
# If any gradient is a `Tensor`, sum them up and return a dense tensor
# object.
if any(isinstance(g, ops.Tensor) for g in grads):
return math_ops.add_n(grads)
# The following `_as_indexed_slices_list` casts ids of IndexedSlices into
# int64. It is to make sure the inputs of `concat` all have same the data
# type.
grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access
grads = [flatten_nested_indexed_slices(x) for x in grads]
# Form IndexedSlices out of the concatenated values and indices.
concat_grad = ops.IndexedSlices(
array_ops.concat([x.values for x in grads], axis=0),
array_ops.concat([x.indices for x in grads], axis=0),
grads[0].dense_shape)
return concat_grad
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
if any(isinstance(g, tf.IndexedSlices) for g in grads):
# TODO(reedwm): All-reduce IndexedSlices more effectively.
grad = aggregate_indexed_slices_gradients(grads)
else:
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.scalar_mul(1.0 / len(grads), grad)
v = grad_and_vars[0][1]
if check_inf_nan:
with tf.name_scope('check_for_inf_and_nan'):
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
# This class is copied from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/contrib/training/python/training/device_setter.py#L56.
# We copy it since contrib has been removed from TensorFlow.
class GreedyLoadBalancingStrategy(object):
"""Returns the least-loaded ps task for op placement.
The load is calculated by a user-specified load function passed in at
construction. There are no units for load, and the load function is
responsible for providing an internally consistent measure.
Note that this strategy is very sensitive to the exact order in which
ps ops (typically variables) are created, as it greedily places ops
on the least-loaded ps at the point each op is processed.
One reasonable heuristic is the `byte_size_load_fn`, which
estimates load as the number of bytes that would be used to store and
transmit the entire variable. More advanced load functions
could consider the difference in access patterns across ops, or trade
off CPU-intensive ops with RAM-intensive ops with network bandwidth.
This class is intended to be used as a `ps_strategy` in
`tf.compat.v1.train.replica_device_setter`.
"""
def __init__(self, num_tasks, load_fn):
"""Create a new `LoadBalancingStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
load_fn: A callable that takes an `Operation` and returns a
numeric load value for that op.
"""
self._num_tasks = num_tasks
self._load_fn = load_fn
self._ps_loads = np.zeros(num_tasks)
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: A `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Greedily
places the op on the least-loaded ps task so far, as determined
by the load function.
"""
task = np.argmin(self._ps_loads)
self._ps_loads[task] += self._load_fn(op)
return task
# This function is copied from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/contrib/training/python/training/device_setter.py#L105.
# We copy it since contrib has been removed from TensorFlow.
def byte_size_load_fn(op):
"""Load function that computes the byte size of a single-output `Operation`.
This is intended to be used with `"Variable"` ops, which have a single
`Tensor` output with the contents of the variable. However, it can also be
used for calculating the size of any op that has a single output.
Intended to be used with `GreedyLoadBalancingStrategy`.
Args:
op: An `Operation` with a single output, typically a "Variable" op.
Returns:
The number of bytes in the output `Tensor`.
Raises:
ValueError: if `op` does not have a single output, or if the shape of the
single output is not fully-defined.
"""
if len(op.outputs) != 1:
raise ValueError('Op %s must have a single output' % op)
output = op.outputs[0]
elem_size = output.dtype.size
shape = output.get_shape()
if not shape.is_fully_defined():
# Due to legacy behavior, scalar "Variable" ops have output Tensors that
# have unknown shape when the op is created (and hence passed to this
# load function for placement), even though the scalar shape is set
# explicitly immediately afterward.
shape = tensor_shape.TensorShape(op.get_attr('shape'))
shape.assert_is_fully_defined()
return shape.num_elements() * elem_size
|
tensorflow/benchmarks
|
scripts/tf_cnn_benchmarks/variable_mgr_util.py
|
Python
|
apache-2.0
| 26,469 | 0.005743 |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_context import context
CONF = cfg.CONF
class RequestContext(context.RequestContext):
"""User security context object
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, project=None, **kwargs):
if project:
kwargs['tenant'] = project
self.project = project
super(RequestContext, self).__init__(**kwargs)
def to_dict(self):
out_dict = super(RequestContext, self).to_dict()
out_dict['roles'] = self.roles
if out_dict.get('tenant'):
out_dict['project'] = out_dict['tenant']
out_dict.pop('tenant')
return out_dict
@classmethod
def from_dict(cls, values):
return cls(**values)
def get_context():
"""A helper method to get a blank context (useful for tests)."""
return RequestContext(user_id=None,
project_id=None,
roles=[],
is_admin=False,
overwrite=False)
|
att-comdev/deckhand
|
deckhand/context.py
|
Python
|
apache-2.0
| 1,765 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Docente'
db.create_table('cadastro_docente', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('matricula', self.gf('django.db.models.fields.CharField')(max_length=7, unique=True)),
('nome', self.gf('django.db.models.fields.CharField')(max_length=100, unique=True)),
))
db.send_create_signal('cadastro', ['Docente'])
# Adding model 'Disciplina'
db.create_table('cadastro_disciplina', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('codigo', self.gf('django.db.models.fields.CharField')(max_length=7)),
('nivel', self.gf('django.db.models.fields.CharField')(max_length=11)),
('multicampia', self.gf('django.db.models.fields.BooleanField')(default=False)),
('tipo', self.gf('django.db.models.fields.CharField')(max_length=11)),
('cargahoraria', self.gf('django.db.models.fields.IntegerField')(max_length=3)),
('estudantes', self.gf('django.db.models.fields.IntegerField')(max_length=3)),
))
db.send_create_signal('cadastro', ['Disciplina'])
# Adding model 'Pesquisa'
db.create_table('cadastro_pesquisa', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.CharField')(max_length=20)),
('financiador', self.gf('django.db.models.fields.CharField')(max_length=20)),
('estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('estudantes_pos', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_pibic', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('voluntarios', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('parceria', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('parceria_inter', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('cadastro', ['Pesquisa'])
# Adding model 'Extensao'
db.create_table('cadastro_extensao', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.CharField')(max_length=20)),
('financiador', self.gf('django.db.models.fields.CharField')(max_length=20)),
('estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('estudantes_pos', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_pibex', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('voluntarios', self.gf('django.db.models.fields.IntegerField')(max_length=2, blank=True)),
('parceria', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('parceria_inter', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('cadastro', ['Extensao'])
# Adding model 'Atividade'
db.create_table('cadastro_atividade', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('docente', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cadastro.Docente'])),
('afastamento', self.gf('django.db.models.fields.BooleanField')(default=True)),
('cargo', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('comissoes', self.gf('django.db.models.fields.IntegerField')()),
('semestre', self.gf('django.db.models.fields.CharField')(max_length=6)),
))
db.send_create_signal('cadastro', ['Atividade'])
# Adding M2M table for field disciplinas on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_disciplinas')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('disciplina', models.ForeignKey(orm['cadastro.disciplina'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'disciplina_id'])
# Adding M2M table for field pesquisa on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_pesquisa')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('pesquisa', models.ForeignKey(orm['cadastro.pesquisa'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'pesquisa_id'])
# Adding M2M table for field extensao on 'Atividade'
m2m_table_name = db.shorten_name('cadastro_atividade_extensao')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('atividade', models.ForeignKey(orm['cadastro.atividade'], null=False)),
('extensao', models.ForeignKey(orm['cadastro.extensao'], null=False))
))
db.create_unique(m2m_table_name, ['atividade_id', 'extensao_id'])
def backwards(self, orm):
# Deleting model 'Docente'
db.delete_table('cadastro_docente')
# Deleting model 'Disciplina'
db.delete_table('cadastro_disciplina')
# Deleting model 'Pesquisa'
db.delete_table('cadastro_pesquisa')
# Deleting model 'Extensao'
db.delete_table('cadastro_extensao')
# Deleting model 'Atividade'
db.delete_table('cadastro_atividade')
# Removing M2M table for field disciplinas on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_disciplinas'))
# Removing M2M table for field pesquisa on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_pesquisa'))
# Removing M2M table for field extensao on 'Atividade'
db.delete_table(db.shorten_name('cadastro_atividade_extensao'))
models = {
'cadastro.atividade': {
'Meta': {'object_name': 'Atividade'},
'afastamento': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cargo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'comissoes': ('django.db.models.fields.IntegerField', [], {}),
'disciplinas': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Disciplina']", 'symmetrical': 'False'}),
'docente': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Docente']"}),
'extensao': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Extensao']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pesquisa': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Pesquisa']", 'symmetrical': 'False'}),
'semestre': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'cadastro.disciplina': {
'Meta': {'object_name': 'Disciplina'},
'cargahoraria': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'estudantes': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multicampia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nivel': ('django.db.models.fields.CharField', [], {'max_length': '11'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '11'})
},
'cadastro.docente': {
'Meta': {'object_name': 'Docente'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matricula': ('django.db.models.fields.CharField', [], {'max_length': '7', 'unique': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True'})
},
'cadastro.extensao': {
'Meta': {'object_name': 'Extensao'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'bolsistas_pibex': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'bolsistas_ppq': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'estudantes_graduacao': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'estudantes_pos': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'financiador': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parceria': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parceria_inter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'voluntarios': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'})
},
'cadastro.pesquisa': {
'Meta': {'object_name': 'Pesquisa'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'bolsistas_pibic': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'bolsistas_ppq': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'estudantes_graduacao': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'estudantes_pos': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'}),
'financiador': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parceria': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parceria_inter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'voluntarios': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'blank': 'True'})
}
}
complete_apps = ['cadastro']
|
UFRB/chdocente
|
cadastro/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 11,352 | 0.007488 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import heapq
import json
import logging
import math
import multiprocessing
import os
import signal
import typing
# pylint: disable=g-bad-import-order
import numpy as np
from absl import app as absl_app
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
import tensorflow as tf
# pylint: enable=g-bad-import-order
from tensorflow.contrib.compiler import xla
from official.datasets import movielens
from official.recommendation import constants as rconst
from official.recommendation import data_pipeline
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.utils.logs import mlperf_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
FLAGS = flags.FLAGS
def construct_estimator(model_dir, params):
"""Construct either an Estimator or TPUEstimator for NCF.
Args:
model_dir: The model directory for the estimator
params: The params dict for the estimator
Returns:
An Estimator or TPUEstimator.
"""
if params["use_tpu"]:
# Some of the networking libraries are quite chatty.
for name in ["googleapiclient.discovery", "googleapiclient.discovery_cache",
"oauth2client.transport"]:
logging.getLogger(name).setLevel(logging.ERROR)
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu=params["tpu"],
zone=params["tpu_zone"],
project=params["tpu_gcp_project"],
coordinator_name="coordinator"
)
tf.logging.info("Issuing reset command to TPU to ensure a clean state.")
tf.Session.reset(tpu_cluster_resolver.get_master())
# Estimator looks at the master it connects to for MonitoredTrainingSession
# by reading the `TF_CONFIG` environment variable, and the coordinator
# is used by StreamingFilesDataset.
tf_config_env = {
"session_master": tpu_cluster_resolver.get_master(),
"eval_session_master": tpu_cluster_resolver.get_master(),
"coordinator": tpu_cluster_resolver.cluster_spec()
.as_dict()["coordinator"]
}
os.environ['TF_CONFIG'] = json.dumps(tf_config_env)
distribution = tf.contrib.distribute.TPUStrategy(
tpu_cluster_resolver, steps_per_run=100)
else:
distribution = distribution_utils.get_distribution_strategy(
num_gpus=params["num_gpus"])
run_config = tf.estimator.RunConfig(train_distribute=distribution,
eval_distribute=distribution)
model_fn = neumf_model.neumf_model_fn
if params["use_xla_for_gpu"]:
tf.logging.info("Using XLA for GPU for training and evaluation.")
model_fn = xla.estimator_model_fn(model_fn)
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir,
config=run_config, params=params)
return estimator
def log_and_get_hooks(eval_batch_size):
"""Convenience function for hook and logger creation."""
# Create hooks that log information about the training and metric values
train_hooks = hooks_helper.get_train_hooks(
FLAGS.hooks,
model_dir=FLAGS.model_dir,
batch_size=FLAGS.batch_size, # for ExamplesPerSecondHook
tensors_to_log={"cross_entropy": "cross_entropy"}
)
run_params = {
"batch_size": FLAGS.batch_size,
"eval_batch_size": eval_batch_size,
"number_factors": FLAGS.num_factors,
"hr_threshold": FLAGS.hr_threshold,
"train_epochs": FLAGS.train_epochs,
}
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info(
model_name="recommendation",
dataset_name=FLAGS.dataset,
run_params=run_params,
test_id=FLAGS.benchmark_test_id)
return benchmark_logger, train_hooks
def parse_flags(flags_obj):
"""Convenience function to turn flags into params."""
num_gpus = flags_core.get_num_gpus(flags_obj)
num_devices = FLAGS.num_tpu_shards if FLAGS.tpu else num_gpus or 1
batch_size = (flags_obj.batch_size + num_devices - 1) // num_devices
eval_divisor = (rconst.NUM_EVAL_NEGATIVES + 1) * num_devices
eval_batch_size = flags_obj.eval_batch_size or flags_obj.batch_size
eval_batch_size = ((eval_batch_size + eval_divisor - 1) //
eval_divisor * eval_divisor // num_devices)
return {
"train_epochs": flags_obj.train_epochs,
"batches_per_step": num_devices,
"use_seed": flags_obj.seed is not None,
"batch_size": batch_size,
"eval_batch_size": eval_batch_size,
"learning_rate": flags_obj.learning_rate,
"mf_dim": flags_obj.num_factors,
"model_layers": [int(layer) for layer in flags_obj.layers],
"mf_regularization": flags_obj.mf_regularization,
"mlp_reg_layers": [float(reg) for reg in flags_obj.mlp_regularization],
"num_neg": flags_obj.num_neg,
"num_gpus": num_gpus,
"use_tpu": flags_obj.tpu is not None,
"tpu": flags_obj.tpu,
"tpu_zone": flags_obj.tpu_zone,
"tpu_gcp_project": flags_obj.tpu_gcp_project,
"beta1": flags_obj.beta1,
"beta2": flags_obj.beta2,
"epsilon": flags_obj.epsilon,
"match_mlperf": flags_obj.ml_perf,
"use_xla_for_gpu": flags_obj.use_xla_for_gpu,
"epochs_between_evals": FLAGS.epochs_between_evals,
}
def main(_):
with logger.benchmark_context(FLAGS), \
mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):
mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])
run_ncf(FLAGS)
def run_ncf(_):
"""Run NCF training and eval loop."""
if FLAGS.download_if_missing and not FLAGS.use_synthetic_data:
movielens.download(FLAGS.dataset, FLAGS.data_dir)
if FLAGS.seed is not None:
np.random.seed(FLAGS.seed)
params = parse_flags(FLAGS)
total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals
if FLAGS.use_synthetic_data:
producer = data_pipeline.DummyConstructor()
num_users, num_items = data_preprocessing.DATASET_TO_NUM_USERS_AND_ITEMS[
FLAGS.dataset]
num_train_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH
num_eval_steps = rconst.SYNTHETIC_BATCHES_PER_EPOCH
else:
num_users, num_items, producer = data_preprocessing.instantiate_pipeline(
dataset=FLAGS.dataset, data_dir=FLAGS.data_dir, params=params,
constructor_type=FLAGS.constructor_type,
deterministic=FLAGS.seed is not None)
num_train_steps = (producer.train_batches_per_epoch //
params["batches_per_step"])
num_eval_steps = (producer.eval_batches_per_epoch //
params["batches_per_step"])
assert not producer.train_batches_per_epoch % params["batches_per_step"]
assert not producer.eval_batches_per_epoch % params["batches_per_step"]
producer.start()
params["num_users"], params["num_items"] = num_users, num_items
model_helpers.apply_clean(flags.FLAGS)
estimator = construct_estimator(model_dir=FLAGS.model_dir, params=params)
benchmark_logger, train_hooks = log_and_get_hooks(params["eval_batch_size"])
target_reached = False
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.TRAIN_LOOP)
for cycle_index in range(total_training_cycle):
assert FLAGS.epochs_between_evals == 1 or not mlperf_helper.LOGGER.enabled
tf.logging.info("Starting a training cycle: {}/{}".format(
cycle_index + 1, total_training_cycle))
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.TRAIN_EPOCH,
value=cycle_index)
train_input_fn = producer.make_input_fn(is_training=True)
estimator.train(input_fn=train_input_fn, hooks=train_hooks,
steps=num_train_steps)
tf.logging.info("Beginning evaluation.")
eval_input_fn = producer.make_input_fn(is_training=False)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_START,
value=cycle_index)
eval_results = estimator.evaluate(eval_input_fn, steps=num_eval_steps)
tf.logging.info("Evaluation complete.")
hr = float(eval_results[rconst.HR_KEY])
ndcg = float(eval_results[rconst.NDCG_KEY])
loss = float(eval_results["loss"])
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.EVAL_TARGET,
value={"epoch": cycle_index, "value": FLAGS.hr_threshold})
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_ACCURACY,
value={"epoch": cycle_index, "value": hr})
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.EVAL_HP_NUM_NEG,
value={"epoch": cycle_index, "value": rconst.NUM_EVAL_NEGATIVES})
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_STOP, value=cycle_index)
# Benchmark the evaluation results
benchmark_logger.log_evaluation_result(eval_results)
# Log the HR and NDCG results.
tf.logging.info(
"Iteration {}: HR = {:.4f}, NDCG = {:.4f}, Loss = {:.4f}".format(
cycle_index + 1, hr, ndcg, loss))
# If some evaluation threshold is met
if model_helpers.past_stop_threshold(FLAGS.hr_threshold, hr):
target_reached = True
break
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_STOP,
value={"success": target_reached})
producer.stop_loop()
producer.join()
# Clear the session explicitly to avoid session delete error
tf.keras.backend.clear_session()
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_FINAL)
def define_ncf_flags():
"""Add flags for running ncf_main."""
# Add common flags
flags_core.define_base(export_dir=False)
flags_core.define_performance(
num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=True,
max_train_steps=False,
dtype=False,
all_reduce_alg=False
)
flags_core.define_device(tpu=True)
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(
model_dir="/tmp/ncf/",
data_dir="/tmp/movielens-data/",
train_epochs=2,
batch_size=256,
hooks="ProfilerHook",
tpu=None
)
# Add ncf-specific flags
flags.DEFINE_enum(
name="dataset", default="ml-1m",
enum_values=["ml-1m", "ml-20m"], case_sensitive=False,
help=flags_core.help_wrap(
"Dataset to be trained and evaluated."))
flags.DEFINE_boolean(
name="download_if_missing", default=True, help=flags_core.help_wrap(
"Download data to data_dir if it is not already present."))
flags.DEFINE_integer(
name="eval_batch_size", default=None, help=flags_core.help_wrap(
"The batch size used for evaluation. This should generally be larger"
"than the training batch size as the lack of back propagation during"
"evaluation can allow for larger batch sizes to fit in memory. If not"
"specified, the training batch size (--batch_size) will be used."))
flags.DEFINE_integer(
name="num_factors", default=8,
help=flags_core.help_wrap("The Embedding size of MF model."))
# Set the default as a list of strings to be consistent with input arguments
flags.DEFINE_list(
name="layers", default=["64", "32", "16", "8"],
help=flags_core.help_wrap(
"The sizes of hidden layers for MLP. Example "
"to specify different sizes of MLP layers: --layers=32,16,8,4"))
flags.DEFINE_float(
name="mf_regularization", default=0.,
help=flags_core.help_wrap(
"The regularization factor for MF embeddings. The factor is used by "
"regularizer which allows to apply penalties on layer parameters or "
"layer activity during optimization."))
flags.DEFINE_list(
name="mlp_regularization", default=["0.", "0.", "0.", "0."],
help=flags_core.help_wrap(
"The regularization factor for each MLP layer. See mf_regularization "
"help for more info about regularization factor."))
flags.DEFINE_integer(
name="num_neg", default=4,
help=flags_core.help_wrap(
"The Number of negative instances to pair with a positive instance."))
flags.DEFINE_float(
name="learning_rate", default=0.001,
help=flags_core.help_wrap("The learning rate."))
flags.DEFINE_float(
name="beta1", default=0.9,
help=flags_core.help_wrap("beta1 hyperparameter for the Adam optimizer."))
flags.DEFINE_float(
name="beta2", default=0.999,
help=flags_core.help_wrap("beta2 hyperparameter for the Adam optimizer."))
flags.DEFINE_float(
name="epsilon", default=1e-8,
help=flags_core.help_wrap("epsilon hyperparameter for the Adam "
"optimizer."))
flags.DEFINE_float(
name="hr_threshold", default=None,
help=flags_core.help_wrap(
"If passed, training will stop when the evaluation metric HR is "
"greater than or equal to hr_threshold. For dataset ml-1m, the "
"desired hr_threshold is 0.68 which is the result from the paper; "
"For dataset ml-20m, the threshold can be set as 0.95 which is "
"achieved by MLPerf implementation."))
flags.DEFINE_enum(
name="constructor_type", default="bisection",
enum_values=["bisection", "materialized"], case_sensitive=False,
help=flags_core.help_wrap(
"Strategy to use for generating false negatives. materialized has a"
"precompute that scales badly, but a faster per-epoch construction"
"time and can be faster on very large systems."))
flags.DEFINE_bool(
name="ml_perf", default=False,
help=flags_core.help_wrap(
"If set, changes the behavior of the model slightly to match the "
"MLPerf reference implementations here: \n"
"https://github.com/mlperf/reference/tree/master/recommendation/"
"pytorch\n"
"The two changes are:\n"
"1. When computing the HR and NDCG during evaluation, remove "
"duplicate user-item pairs before the computation. This results in "
"better HRs and NDCGs.\n"
"2. Use a different soring algorithm when sorting the input data, "
"which performs better due to the fact the sorting algorithms are "
"not stable."))
flags.DEFINE_bool(
name="output_ml_perf_compliance_logging", default=False,
help=flags_core.help_wrap(
"If set, output the MLPerf compliance logging. This is only useful "
"if one is running the model for MLPerf. See "
"https://github.com/mlperf/policies/blob/master/training_rules.adoc"
"#submission-compliance-logs for details. This uses sudo and so may "
"ask for your password, as root access is needed to clear the system "
"caches, which is required for MLPerf compliance."
)
)
flags.DEFINE_integer(
name="seed", default=None, help=flags_core.help_wrap(
"This value will be used to seed both NumPy and TensorFlow."))
@flags.validator("eval_batch_size", "eval_batch_size must be at least {}"
.format(rconst.NUM_EVAL_NEGATIVES + 1))
def eval_size_check(eval_batch_size):
return (eval_batch_size is None or
int(eval_batch_size) > rconst.NUM_EVAL_NEGATIVES)
flags.DEFINE_bool(
name="use_xla_for_gpu", default=False, help=flags_core.help_wrap(
"If True, use XLA for the model function. Only works when using a "
"GPU. On TPUs, XLA is always used"))
xla_message = "--use_xla_for_gpu is incompatible with --tpu"
@flags.multi_flags_validator(["use_xla_for_gpu", "tpu"], message=xla_message)
def xla_validator(flag_dict):
return not flag_dict["use_xla_for_gpu"] or not flag_dict["tpu"]
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
define_ncf_flags()
absl_app.run(main)
|
GoogleCloudPlatform/ml-on-gcp
|
example_zoo/tensorflow/models/ncf_main/official/recommendation/ncf_main.py
|
Python
|
apache-2.0
| 17,012 | 0.006348 |
#!/usr/bin/env python
# coding:utf-8 vi:et:ts=2
# parabridge persistent settings module.
# Copyright 2013 Grigory Petrov
# See LICENSE for details.
import xmlrpclib
import socket
import sqlite3
import uuid
import info
SQL_CREATE = """
CREATE TABLE IF NOT EXISTS task (
guid TEXT UNIQUE,
name TEXT UNIQUE,
src TEXT,
dst TEXT);
CREATE TABLE IF NOT EXISTS index_last (
guid TEXT,
file TEXT,
index_last INTEGER);
"""
SQL_TASK_ADD = """INSERT INTO task (guid, name, src, dst)
VALUES (:guid, :name, :src, :dst)"""
SQL_TASK_LIST = """SELECT * FROM task"""
SQL_TASK_DEL_BY_NAME = """DELETE FROM task WHERE name = :name"""
SQL_TASK_GUID_BY_NAME = """SELECT guid FROM task WHERE name = :name"""
SQL_INDEX_LAST_DEL = """DELETE FROM index_last WHERE guid = :guid"""
SQL_INDEX_LAST_UPDATE = """UPDATE index_last SET index_last = :index_last
WHERE guid = :guid AND file = :file"""
SQL_INDEX_LAST_ADD = """INSERT INTO index_last (guid, file, index_last)
VALUES (:guid, :file, :index_last)"""
SQL_INDEX_LAST_GET = """SELECT index_last FROM index_last WHERE
guid = :guid AND file = :file"""
class Settings( object ):
def __init__( self ):
self._init_f = False
self._notify_f = False
def init( self, f_notify = False ):
self._notify_f = f_notify
self._init_f = True
with sqlite3.connect( info.FILE_CFG ) as oConn:
oConn.executescript( SQL_CREATE )
## Notify daemon process so it can read updated settings.
def notifyIfNeeded( self ):
if not self._notify_f:
return
try:
xmlrpclib.ServerProxy( info.COMM_ADDR ).cfg_changed()
except socket.error:
pass
def taskAdd( self, s_name, s_src, s_dst ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
try:
mValues = {
'guid': str( uuid.uuid4() ),
'name': s_name,
'src': s_src,
'dst': s_dst }
oConn.execute( SQL_TASK_ADD, mValues )
except sqlite3.IntegrityError:
## Name not unique.
return False
else:
return True
finally:
self.notifyIfNeeded()
def indexLastSet( self, s_guid, s_file, n_index ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
mArgs = {
'guid': s_guid,
'file': s_file,
'index_last': n_index }
oRet = oConn.execute( SQL_INDEX_LAST_UPDATE, mArgs )
if oRet.rowcount > 0:
return
## No record for guid and name pair: add one.
oConn.execute( SQL_INDEX_LAST_ADD, mArgs )
def indexLastGet( self, s_guid, s_file ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
oConn.row_factory = sqlite3.Row
mArgs = { 'guid': s_guid, 'file': s_file }
lRet = oConn.execute( SQL_INDEX_LAST_GET, mArgs ).fetchall()
if 0 == len( lRet ):
return None
if len( lRet ) > 1:
raise Exception( "Consistency error." )
return lRet[ 0 ][ 'index_last' ]
def taskDelByName( self, s_name ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
oConn.row_factory = sqlite3.Row
try:
mArgs = { 'name': s_name }
oRow = oConn.execute( SQL_TASK_GUID_BY_NAME, mArgs ).fetchone()
if oRow is None:
return False
mArgs[ 'guid' ] = oRow[ 'guid' ]
oRet = oConn.execute( SQL_TASK_DEL_BY_NAME, mArgs )
if 0 == oRet.rowcount:
raise Exception( "Consistency error" )
oConn.execute( SQL_INDEX_LAST_DEL, mArgs )
return True
finally:
self.notifyIfNeeded()
def taskList( self ):
with sqlite3.connect( info.FILE_CFG ) as oConn:
try:
oConn.row_factory = sqlite3.Row
return oConn.execute( SQL_TASK_LIST ).fetchall()
finally:
self.notifyIfNeeded()
instance = Settings()
|
eyeofhell/parabridge
|
parabridge/settings.py
|
Python
|
gpl-3.0
| 3,770 | 0.032891 |
from bt_proximity import BluetoothRSSI
import time
import sys
import datetime
#////////////////////////////////
BT_ADDR = 'xx:xx:xx:xx:xx:xx'#/// Enter your bluetooth address here!
#////////////////////////////////
# ----------------------- DO NOT EDIT ANYTHING BELOW THIS LINE --------------------------- #
def write(records, count):
f = open("test_records.txt", "a+") # open records for append. If not present create
for i in range(count): # write out each record
f.write(str(records[i][0]) + "," + str(records[i][1]) + '\n')
f.close()
def time_diff(start_time):
current_time = datetime.datetime.now() # get current time
diff = (current_time - start_time).total_seconds() # get difference of startime and current time
return str(round(diff,2))
def main(start_time):
records = [] # initialize array of records
count = 0 # initialize count
addr = BT_ADDR # assign BT_ADDR
num = 10 # amount of records to be recorded
while(count < num):
btrssi = BluetoothRSSI(addr=addr)
time_e = time_diff(start_time) # get seconds elapsed
record = (btrssi.get_rssi(), time_e) # create record
records.append(record) # add record to records array
count += 1
time.sleep(.5) # wait time to get next record
write(records, count) # write out records
if __name__ == '__main__':
main()
|
stan-cap/bt_rssi
|
test/main_test.py
|
Python
|
mit
| 1,713 | 0.010508 |
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from Plugins.Plugin import PluginDescriptor
def getUpgradeVersion():
import os
try:
r = os.popen("fpupgrade --version").read()
except IOError:
return None
if r[:16] != "FP update tool v":
return None
else:
return int(r[16:17])
class FPUpgrade(Screen):
skin = """
<screen position="150,200" size="450,200" title="FP upgrade required" >
<widget name="text" position="0,0" size="550,50" font="Regular;20" />
<widget name="oldversion_label" position="10,100" size="290,25" font="Regular;20" />
<widget name="newversion_label" position="10,125" size="290,25" font="Regular;20" />
<widget name="oldversion" position="300,100" size="50,25" font="Regular;20" />
<widget name="newversion" position="300,125" size="50,25" font="Regular;20" />
</screen>"""
def __init__(self, session):
self.skin = FPUpgrade.skin
Screen.__init__(self, session)
from Tools.StbHardware import getFPVersion
version = str(getFPVersion() or "N/A")
newversion = str(getUpgradeVersion() or "N/A")
self["text"] = Label(_("Your frontprocessor firmware must be upgraded.\nPress OK to start upgrade."))
self["oldversion_label"] = Label(_("Current version:"))
self["newversion_label"] = Label(_("New version:"))
self["oldversion"] = Label(version)
self["newversion"] = Label(newversion)
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.ok,
"cancel": self.close,
})
def ok(self):
self.close(4)
class SystemMessage(Screen):
skin = """
<screen position="150,200" size="450,200" title="System Message" >
<widget source="text" position="0,0" size="450,200" font="Regular;20" halign="center" valign="center" render="Label" />
<ePixmap pixmap="icons/input_error.png" position="5,5" size="53,53" alphatest="on" />
</screen>"""
def __init__(self, session, message):
from Components.Sources.StaticText import StaticText
Screen.__init__(self, session)
self["text"] = StaticText(message)
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.ok,
})
def ok(self):
self.close()
def Plugins(**kwargs):
from Tools.StbHardware import getFPVersion
version = getFPVersion()
newversion = getUpgradeVersion() or 0
list = []
if version is not None and version < newversion:
list.append(PluginDescriptor(name=_("FP Upgrade"), where = PluginDescriptor.WHERE_WIZARD, needsRestart = True, fnc=(8, FPUpgrade)))
try:
msg = open("/proc/stb/message").read()
list.append(PluginDescriptor(name=_("System Message Check"), where = PluginDescriptor.WHERE_WIZARD, needsRestart = True, fnc=(9, SystemMessage, msg)))
except:
pass
return list
|
atvcaptain/enigma2
|
lib/python/Plugins/SystemPlugins/FrontprocessorUpgrade/plugin.py
|
Python
|
gpl-2.0
| 2,732 | 0.032211 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import pwd
from django.db import transaction
from django.contrib.auth.models import User as DjangoUser
from storageadmin.models import User
from system import users
@transaction.atomic
def change_password(username, password):
try:
duser = DjangoUser.objects.get(username=username)
duser.set_password(password)
duser.save()
except:
sys.exit('username: %s does not exist in the admin database' %
username)
try:
User.objects.get(username=username)
except:
sys.exit('username: %s does not exist in the database' % username)
try:
pwd.getpwnam(username)
except KeyError:
sys.exit('username: %s does not exist in the system' % username)
try:
users.usermod(username, password)
users.smbpasswd(username, password)
except:
sys.exit('Low level error occured while changing password of user: %s'
% username)
def main():
if (len(sys.argv) < 3 or
(len(sys.argv) > 1 and sys.argv[1] == '-h')):
sys.exit('Usage: pwreset <username> <new_password>')
try:
change_password(sys.argv[1], sys.argv[2])
except:
sys.exit('Error changing password for user: %s. Check the username '
'and try again.' % sys.argv[1])
|
schakrava/rockstor-core
|
src/rockstor/scripts/pwreset.py
|
Python
|
gpl-3.0
| 2,030 | 0.00197 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
from django.views.i18n import JavaScriptCatalog
from demo.apps.app import application
js_info_dict = {
'packages': ('base', ),
}
urlpatterns = [
url(r'^jsi18n/$', JavaScriptCatalog.as_view(), name='javascript_catalog'),
# Admin
url(r'^' + settings.ADMIN_URL, admin.site.urls),
# Apps
url(r'', include(application.urls)),
]
if settings.DEBUG:
# Add the Debug Toolbar’s URLs to the project’s URLconf
import debug_toolbar
urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls)), ]
# In DEBUG mode, serve media files through Django.
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views import static
urlpatterns += staticfiles_urlpatterns()
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += [
url(r'^%s/(?P<path>.*)$' % media_url, static.serve,
{'document_root': settings.MEDIA_ROOT}),
]
|
reinbach/django-machina
|
example_projects/demo/demo_project/urls.py
|
Python
|
bsd-3-clause
| 1,208 | 0 |
"""
The Netio switch component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.netio/
"""
import logging
from collections import namedtuple
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
EVENT_HOMEASSISTANT_STOP, STATE_ON)
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pynetio==0.1.6']
_LOGGER = logging.getLogger(__name__)
ATTR_START_DATE = 'start_date'
ATTR_TOTAL_CONSUMPTION_KWH = 'total_energy_kwh'
CONF_OUTLETS = 'outlets'
DEFAULT_PORT = 1234
DEFAULT_USERNAME = 'admin'
DEPENDENCIES = ['http']
Device = namedtuple('device', ['netio', 'entities'])
DEVICES = {}
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
URL_API_NETIO_EP = '/api/netio/{host}'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_OUTLETS): {cv.string: cv.string},
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Netio platform."""
from pynetio import Netio
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
if not DEVICES:
hass.http.register_view(NetioApiView)
dev = Netio(host, port, username, password)
DEVICES[host] = Device(dev, [])
# Throttle the update for all Netio switches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(
DEVICES[host].netio, key, config[CONF_OUTLETS][key])
DEVICES[host].entities.append(switch)
add_entities(DEVICES[host].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = 'api:netio'
@callback
def get(self, request, host):
"""Request handler."""
hass = request.app['hass']
data = request.query
states, consumptions, cumulated_consumptions, start_dates = \
[], [], [], []
for i in range(1, 5):
out = 'output%d' % i
states.append(data.get('%s_state' % out) == STATE_ON)
consumptions.append(float(data.get('%s_consumption' % out, 0)))
cumulated_consumptions.append(
float(data.get('%s_cumulatedConsumption' % out, 0)) / 1000)
start_dates.append(data.get('%s_consumptionStart' % out, ""))
_LOGGER.debug('%s: %s, %s, %s since %s', host, states,
consumptions, cumulated_consumptions, start_dates)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
hass.async_add_job(dev.async_update_ha_state())
return self.json(True)
class NetioSwitch(SwitchDevice):
"""Provide a Netio linked switch."""
def __init__(self, netio, outlet, name):
"""Initialize the Netio switch."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Return the device's name."""
return self._name
@property
def available(self):
"""Return true if entity is available."""
return not hasattr(self, 'telnet')
def turn_on(self, **kwargs):
"""Turn switch on."""
self._set(True)
def turn_off(self, **kwargs):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list('uuuu')
val[self.outlet - 1] = '1' if value else '0'
self.netio.get('port list %s' % ''.join(val))
self.netio.states[self.outlet - 1] = value
self.schedule_update_ha_state()
@property
def is_on(self):
"""Return the switch's status."""
return self.netio.states[self.outlet - 1]
def update(self):
"""Update the state."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split('|')[0]
}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[self.outlet - 1]
@property
def cumulated_consumption_kwh(self):
"""Return the total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[self.outlet - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[self.outlet - 1]
|
persandstrom/home-assistant
|
homeassistant/components/switch/netio.py
|
Python
|
apache-2.0
| 5,530 | 0 |
#!/usr/bin/env python3.7
from multiprocessing import Process
import time
import os
from printerState import main as printerStateMain
from server import main as serverMain
from websocket import main as websocketServerMain
servicesTemplate = {
'server': {
'name': 'Server',
'run': serverMain,
'running': False
},
'printerState': {
'name': 'Printer State',
'run': printerStateMain,
'running': False
},
'websocketServer': {
'name': 'Websocket server',
'run': websocketServerMain,
'running': False
}
}
class ServiceManager:
def __init__(self, services, autoStart=False):
self.log('Creating processes')
self.services = services
for serviceName in services:
newProcess = Process(target=self.services[serviceName]['run'])
newProcess.daemon = True
self.services[serviceName]['process'] = newProcess
if (autoStart):
newProcess.start()
self.log('Creating and starting process for {0} with pid {1}'.format(self.services[serviceName]['name'], newProcess.pid))
self.services[serviceName]['running'] = True
else:
self.log('Creating process for {0}'.format(self.services[serviceName]['name']))
self.services[serviceName]['running'] = False
def updateServiceState(self):
servicesRunning = []
servicesStopped = []
for serviceName in self.services:
self.services[serviceName]['running'] = self.services[serviceName]['process'].is_alive()
if(self.services[serviceName]['running']):
servicesRunning.append(self.services[serviceName]['name'])
else:
servicesStopped.append(self.services[serviceName]['name'])
if(len(servicesStopped) != 0):
self.log('Services stopped: {0}'.format(','.join(servicesStopped)))
def restartStoppedServices(self):
for serviceName in self.services:
if (not self.services[serviceName]['running']):
self.startService(serviceName)
def startService(self, serviceName):
if(self.services[serviceName]['running']):
self.log('Cant start service which is already running', 'warning')
else:
self.services[serviceName]['process'].terminate()
self.services[serviceName]['process'] = Process(target=self.services[serviceName]['run'])
self.services[serviceName]['process'].start()
self.log('Creating and starting process for {0} with pid {1}'.format(
self.services[serviceName]['name'],
self.services[serviceName]['process'].pid))
self.services[serviceName]['running'] = True
def loop(self):
while True:
self.updateServiceState()
self.restartStoppedServices()
time.sleep(4)
def log(self, message, level='info'):
print('{0}-[Service Manager][{2}] {1}'.format(round(time.time()), message, level))
def main():
services = ServiceManager(servicesTemplate, autoStart=True)
services.loop()
if __name__ == '__main__':
main()
|
MakersLab/Farm-server
|
server/main.py
|
Python
|
gpl-3.0
| 3,234 | 0.002474 |
# -*- coding: utf-8 -*-
import sublime, sublime_plugin
import os
import shutil
import subprocess
import zipfile
import glob
import sys
import codecs
import re
import json
import xml.etree.ElementTree
###
### Global Value
###
PACKAGE_NAME = 'EPubMaker'
OPEN_COMMAND = 'epub_maker_open'
SAVE_COMMAND = 'epub_maker_save'
PREVIEW_COMMAND = 'epub_maker_preview'
WORKSPACES_PATH = None
SUMMARY_EXTENSION = 'sublime-epub-summary'
IDENTIFIER_EXTENSION = 'sublime-epub-identifier'
PROJECT_EXTENSION = 'sublime-project'
IGNORE_EXTENSIONS = [
SUMMARY_EXTENSION,
IDENTIFIER_EXTENSION,
PROJECT_EXTENSION,
'sublime-workspace'
]
PREVIEW_PREFIX = 'epub-preview-'
SETTINGS = {}
ST3 = sublime.version() >= '3000'
###
### EventListener
###
class EpubMakerEventListener(sublime_plugin.EventListener):
def on_load(self, view):
filename = view.file_name()
if is_valid_format(filename, [SUMMARY_EXTENSION]): # summary 파일은 수정할 수 없도록
view.set_read_only(True)
elif not is_valid_format(filename): # epub 확장자 확인
return
elif ST3: # Sublime Text 3 확인
global WORKSPACES_PATH
if WORKSPACES_PATH is None: # workspaces 초기화 확인
return
else:
view.run_command(OPEN_COMMAND) # epub 열기
def on_post_save(self, view):
if not get_setting('auto_save'):
return
view.run_command(SAVE_COMMAND) # epub 저장
###
### TextCommand
###
class EpubMakerOpenCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return is_valid_format(self.view.file_name())
def run(self, edit):
def extract(workpath, namelist):
os.makedirs(workpath)
for name in namelist:
filepath = os.path.join(workpath, name)
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname): # 디렉토리가 존재하지 않는지
os.makedirs(dirname)
if os.path.isdir(filepath): # 디렉토리인지
continue
else:
with open(filepath, 'wb') as descriptor:
descriptor.write(epub.read(name))
def close_views(workpath, namelist):
activewindow = sublime.active_window()
activeview = activewindow.active_view()
for name in namelist:
if name.startswith(workpath): # 절대경로 인지
filepath = name
else:
filepath = os.path.join(workpath, name)
for window in sublime.windows():
for view in window.views():
if view.file_name() == filepath:
view.set_scratch(True)
window.focus_view(view)
window.run_command('close_file')
break
activewindow.focus_view(activeview)
def close_folders(workpath):
for window in sublime.windows():
for folder in window.folders():
if folder == workpath:
window.run_command('remove_folder', {'dirs': [folder]})
break
window.run_command('refresh_folder_list')
# 압축 해제
epubpath = self.view.file_name()
try:
epub = zipfile.ZipFile(epubpath)
except Exception as e:
sublime.error_message('압축을 해제하는 중 오류가 발생했습니다')
print(PACKAGE_NAME + ':open: \'' + epubpath + '\'의 압축을 해제하는 중 오류가 발생했습니다')
return
# workspace 생성
global WORKSPACES_PATH
workpath = os.path.join(WORKSPACES_PATH, os.path.splitext(os.path.basename(epubpath))[0])
namelist = epub.namelist()
close_views(workpath, namelist + [get_sumblime_project_path(workpath), get_epub_identifier_path(workpath), get_epub_summary_path(workpath), get_preview_path(workpath)])
close_folders(workpath)
if not os.path.exists(workpath):
extract(workpath, namelist)
elif not sublime.ok_cancel_dialog('이전에 작업하던 ePub입니다.\n이어서 작업하시겠습니까?'):
shutil.rmtree(workpath)
extract(workpath, namelist)
# 프로젝트 파일 생성
idpath = create_epub_identifier(workpath, epubpath)
projectpath = create_sublime_project(workpath)
summarypath = create_epub_summary(workpath, epubpath)
# epub 뷰 닫음
view = self.view
window = view.window()
view.set_scratch(True)
window.focus_view(view)
window.run_command('close_file')
# 생성된 프로젝트 오픈
if is_windows():
sumlpath = os.path.join(os.path.dirname(sublime.__file__), 'subl.exe')
else:
sumlpath = os.path.join(os.path.dirname(os.path.dirname(sublime.__file__)), 'SharedSupport', 'bin', 'subl')
cmd = '"' + sumlpath + '" --project "' + projectpath + '" --add "' + summarypath + '"'
if get_setting('new_window'):
cmd += ' --new-window'
subprocess.Popen(cmd, shell=True)
window.run_command('refresh_folder_list')
sublime.status_message('Opend ePub ' + epubpath)
print(PACKAGE_NAME + ':open: \'' + epubpath + '\' -> \'' + workpath + '\'')
class EpubMakerSaveCommand(sublime_plugin.TextCommand):
def run(self, edit):
workpath = get_work_path(self.view)
if workpath is None:
return
# epub-identifier 찾기
idpath = get_epub_identifier_path(workpath)
if not os.path.exists(idpath):
sublime.error_message('\'' + idpath + '\'를 찾을 수 없습니다')
print(PACKAGE_NAME + ':save: \'' + idpath + '\'를 찾을 수 없습니다')
return
if get_setting('require_confirm_save'):
if not sublime.ok_cancel_dialog('변경된 내용을 ePub에도 반영 하시겠습니까?'):
return
# epub-identifier 읽기
idfile = open(idpath, 'r')
epubid = json.loads(idfile.read())
idfile.close()
epubpath = None
if get_setting('overwite_original'):
epubpath = epubid['src_path']
if not epubpath is None and get_setting('backup_original'):
def backup(path):
try:
shutil.copy(path, set_extension(path, get_setting('backup_extension')))
except Exception as e:
sublime.error_message('\'' + epubpath + '\'을 백업하는 중 오류가 발생했습니다')
print(PACKAGE_NAME + ':save: \'' + epubpath + '\'을 백업하는 중 오류가 발생했습니다')
backup(epubpath)
if epubpath is None:
epubpath = set_extension(os.path.join(workpath, '..', os.path.basename(workpath)), 'epub')
epub = zipfile.ZipFile(epubpath, 'w')
# ePub OCF에 따라 mimetype을 제일 먼저 압축없이 압축파일에 포함
epub.writestr('mimetype', 'application/epub+zip', zipfile.ZIP_STORED)
# 이후 디렉토리와 파일을 추가
for root, dirs, files in os.walk(workpath):
if root == workpath:
continue
epub.write(root, root[len(workpath + os.sep):], zipfile.ZIP_STORED)
for f in files:
if is_ignore_file(f) or f == 'mimetype' or f.startswith(PREVIEW_PREFIX):
continue
f = os.path.join(root, f)
epub.write(f, f[len(workpath + os.sep):], zipfile.ZIP_DEFLATED)
epub.close()
sublime.status_message('Saved ePub ' + epubpath)
print(PACKAGE_NAME + ':save: \'' + epubpath + '\'')
class EpubMakerPreviewCommand(sublime_plugin.TextCommand):
def run(self, edit):
workpath = get_work_path(self.view)
if workpath is None:
return
filename = self.view.file_name()
if not is_valid_format(filename, ['html', 'htm', 'xhtml', 'xhtm']):
return
previewfile = open(get_resource_path('preview.html'), 'r')
preview = previewfile.read()
previewfile.close()
preview = preview.replace('#EPUB_NAME#', os.path.basename(workpath))
preview = preview.replace('#EPUB_SPINE_NAME#', os.path.basename(filename))
preview = preview.replace('#EPUB_SPINE_PATH#', filename.replace(workpath + os.sep, ''))
previewpath = get_preview_path(workpath)
with codecs.open(previewpath, 'w', 'utf-8') as html:
html.write(preview)
html.close()
sublime.active_window().run_command('side_bar_open_in_browser', {'browser': 'chromium', 'paths': [previewpath], 'type': 'testing'})
###
### Global Def (utility)
###
def get_platform_name():
return sublime.platform()
def is_windows():
return get_platform_name().startswith('windows')
def is_osx():
return get_platform_name().startswith('osx')
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def set_extension(path=None, extension=None):
if path is None or extension is None:
return None
else:
return path + '.' + extension
def is_valid_format(filename=None, extensions=['epub']):
if filename is None or '.' not in filename:
return False
else:
return filename.rsplit('.', 1)[1] in extensions
def is_ignore_file(filename=None):
if filename is None:
return True
elif is_valid_format(filename, IGNORE_EXTENSIONS):
return True
else:
return False
def get_setting(key):
return SETTINGS[key];
def load_settings():
settings = sublime.load_settings(PACKAGE_NAME + '.sublime-settings')
SETTINGS['new_window'] = settings.get('new_window', True)
SETTINGS['auto_save'] = settings.get('auto_save', False)
SETTINGS['require_confirm_save'] = settings.get('require_confirm_save', False)
SETTINGS['overwite_original'] = settings.get('overwite_original', True)
SETTINGS['backup_original'] = settings.get('backup_original', True)
SETTINGS['backup_extension'] = settings.get('backup_extension', 'back')
# workpath: 할당된 작업 경로
def create_sublime_project(workpath):
if not os.path.exists(workpath):
return None
else:
projectpath = get_sumblime_project_path(workpath)
with codecs.open(projectpath, 'w', 'utf-8') as project:
project.write(json.dumps({"folders": [{"path": workpath}]}, sort_keys=True, indent=4, separators=(',', ': ')))
project.close()
return projectpath
def get_sumblime_project_path(workpath):
return set_extension(os.path.join(workpath, os.path.basename(workpath)), PROJECT_EXTENSION)
# workpath: 할당된 작업 경로
# epubpath: 원본 ePub 파일의 경로
def create_epub_identifier(workpath, epubpath):
if not os.path.exists(workpath):
return None
else:
idpath = get_epub_identifier_path(workpath)
with codecs.open(idpath, 'w', 'utf-8') as idf:
idf.write(json.dumps({"src_path": epubpath, "work_path": workpath}, sort_keys=True, indent=4, separators=(',', ': ')))
idf.close()
return idpath
def get_epub_identifier_path(workpath):
return set_extension(os.path.join(workpath, os.path.basename(workpath)), IDENTIFIER_EXTENSION)
# workpath: 할당된 작업 경로
# epubpath: 원본 ePub 파일의 경로
def create_epub_summary(workpath, epubpath):
def size_of(filepath, suffix='B'):
if not os.path.exists(filepath):
size = 0
elif os.path.isdir(filepath):
size = 0
for dirpath, dirnames, filenames in os.walk(filepath):
for filename in filenames:
size += os.path.getsize(os.path.join(dirpath, filename))
else:
size = os.path.getsize(filepath)
for unit in ['','K','M','G']:
if abs(size) < 1024.0:
return '%3.1f%s%s' % (size, unit, suffix)
size /= 1024.0
return '%.1f%s' % (size, suffix)
def list_files(startpath):
tree = ''
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
tree += '{0}{1}{2}\n'.format(indent, os.path.basename(root), os.sep)
subindent = ' ' * 4 * (level + 1)
for f in natural_sort(files):
if is_ignore_file(f):
continue
tree += '{0}{1} ({2})\n'.format(subindent, f, size_of(os.path.join(root, f)))
return tree
if not os.path.exists(workpath) or not os.path.exists(epubpath):
return Non
else:
summarypath = get_epub_summary_path(workpath)
with codecs.open(summarypath, 'w', 'utf-8') as summary:
summary.write(os.path.basename(workpath) + '\n\n')
summary.write('원본 경로: ' + epubpath + ' (' + size_of(epubpath) + ')\n')
summary.write('작업 경로: ' + workpath + ' (' + size_of(workpath) + ')\n\n')
summary.write('OPF 경로: ' + (get_opf_path(workpath) or 'null') + '\n')
summary.write('OEBPS 경로: ' + (get_oebps_path(workpath) or 'null') + '\n\n')
summary.write('ePub 구조:\n')
summary.write(list_files(workpath))
summary.close()
return summarypath
def get_epub_summary_path(workpath):
return set_extension(os.path.join(workpath, os.path.basename(workpath)), SUMMARY_EXTENSION)
def get_preview_path(workpath):
return set_extension(os.path.join(workpath, PREVIEW_PREFIX + os.path.basename(workpath)), 'html')
def get_resource_path(subpath):
return os.path.join(sublime.packages_path(), PACKAGE_NAME, subpath);
def get_work_path(view):
global WORKSPACES_PATH
filename = view.file_name()
if not filename.startswith(WORKSPACES_PATH):
return None
components = filename.replace(WORKSPACES_PATH, '').split(os.sep)
if not len(components[0]) == 0:
return None
workpath = os.path.join(WORKSPACES_PATH, components[1])
if not os.path.exists(workpath):
return None
if not os.path.isdir(workpath):
return None
return workpath
def get_container_path(workpath):
path = os.path.join(workpath, 'META-INF', 'container.xml')
if os.path.exists(path):
return path
else:
return None
def get_opf_path(workpath):
containerpath = get_container_path(workpath)
if containerpath is None:
return None
root = xml.etree.ElementTree.parse(containerpath).getroot()
containerns = {'ns': 'urn:oasis:names:tc:opendocument:xmlns:container'}
rootfiles = root.findall('./ns:rootfiles/ns:rootfile', namespaces=containerns)
for rootfile in rootfiles:
mediatype = rootfile.get('media-type')
if mediatype == 'application/oebps-package+xml':
return os.path.join(workpath, rootfile.get('full-path'))
return None
def get_oebps_path(workpath):
opfpath = get_opf_path(workpath)
if opfpath is None:
return None
return os.path.dirname(opfpath)
###
### Global Def (setup)
###
def init_menu():
menupath = get_resource_path('Main.sublime-menu')
if os.path.exists(menupath):
return
else:
with codecs.open(menupath, 'w', 'utf-8') as menu:
menu.write(json.dumps([
{
"caption": "File",
"id": "file",
"children":
[
{
"caption": "Save As ePub",
"mnemonic": "e",
"command": SAVE_COMMAND
}
]
},
{
"caption": "View",
"id": "view",
"children":
[
{
"caption": "Preview Current Spine In ePub",
"command": PREVIEW_COMMAND
},
{
"caption": "-"
}
]
},
{
"caption": "Preferences",
"mnemonic": "n",
"id": "preferences",
"children":
[
{
"caption": "Package Settings",
"mnemonic": "P",
"id": "package-settings",
"children":
[
{
"caption": PACKAGE_NAME,
"children":
[
{
"command": "open_file",
"args": {
"file": "${packages}/" + PACKAGE_NAME + "/" + PACKAGE_NAME + ".sublime-settings"
},
"caption": "Settings – Default"
},
{
"command": "open_file",
"args": {
"file": "${packages}/User/" + PACKAGE_NAME + ".sublime-settings"
},
"caption": "Settings – User"
},
{
"caption": "-"
}
]
}
]
}
]
}
], sort_keys=True, indent=4, separators=(',', ': ')))
menu.close()
def init_keymap():
windowkeymappath = get_resource_path('Default (Windows).sublime-keymap')
if os.path.exists(windowkeymappath):
return
else:
with codecs.open(windowkeymappath, 'w', 'utf-8') as keymap:
keymap.write(json.dumps([
{"keys": ["ctrl+shift+e"], "command": SAVE_COMMAND},
{"keys": ["f5"], "command": PREVIEW_COMMAND}
], sort_keys=True, indent=4, separators=(',', ': ')))
keymap.close()
osxkeymappath = get_resource_path('Default (OSX).sublime-keymap')
if os.path.exists(osxkeymappath):
return
else:
with codecs.open(osxkeymappath, 'w', 'utf-8') as keymap:
keymap.write(json.dumps([
{"keys": ["super+shift+e"], "command": SAVE_COMMAND},
{"keys": ["f5"], "command": PREVIEW_COMMAND}
], sort_keys=True, indent=4, separators=(',', ': ')))
keymap.close()
def init_settings():
load_settings()
def init_workspaces():
global WORKSPACES_PATH
if is_windows():
WORKSPACES_PATH = os.path.join(os.getenv('HOMEDRIVE'), os.getenv('HOMEPATH'), 'Documents', PACKAGE_NAME, 'workspaces')
else:
WORKSPACES_PATH = os.path.join(os.getenv('HOME'), PACKAGE_NAME, 'workspaces')
if not os.path.exists(WORKSPACES_PATH):
os.makedirs(WORKSPACES_PATH)
print(PACKAGE_NAME + ':init_workspaces: \'' + WORKSPACES_PATH + '\'')
def plugin_loaded():
if not ST3:
return
if not is_windows and not is_osx:
return
init_menu()
init_keymap()
init_settings()
init_workspaces()
|
DaVinAhn/EPubMaker
|
EPubMaker.py
|
Python
|
mit
| 16,500 | 0.030364 |
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import itk
itk.auto_progress(2)
n_channels = 31
# Verify UC addition operation
vector_type = itk.VariableLengthVector[itk.UC]
vector1 = vector_type(n_channels)
vector2 = vector_type(n_channels)
assert len(vector1) == n_channels and len(vector2) == n_channels
vector1.Fill(16)
for idx in range(n_channels):
vector2[idx] = idx
sum = vector1 + vector2
print(f'UC sum: {sum}')
for idx in range(n_channels):
assert sum[idx] == 16 + idx, "Got unexpected result from vector sum"
# Verify float addition operation
vector_float_type = itk.VariableLengthVector[itk.F]
vector3 = vector_float_type(n_channels)
vector4 = vector_float_type(n_channels)
assert len(vector3) == n_channels and len(vector4) == n_channels
vector3.Fill(0.5)
for idx in range(n_channels):
vector4.SetElement(idx, 0.1 * idx)
float_sum = vector3 + vector4
print(f'float sum: {float_sum}')
tolerance = 1e-6
for idx in range(n_channels):
diff = abs(float_sum[idx] - (0.5 + 0.1 * idx))
print(f'float sum[{idx}]: {float_sum[idx]:0.9f} diff: {diff:0.2e}')
assert diff < tolerance, "Got unexpected result from vector float sum"
|
BRAINSia/ITK
|
Modules/Core/Common/wrapping/test/itkVariableLengthVectorTest.py
|
Python
|
apache-2.0
| 1,870 | 0 |
"""
Support for python 2 & 3, ripped pieces from six.py
"""
import sys
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
else:
string_types = basestring,
|
drewrobb/marathon-python
|
marathon/_compat.py
|
Python
|
mit
| 173 | 0 |
# Author: Pontus Laestadius.
# Since: 2nd of March, 2017.
# Maintained since: 17th of April 2017.
from receiver import Receiver
print("Version 2.2")
Receiver("172.24.1.1", 9005)
|
DIT524-V17/group-7
|
TCP raspberry/server.py
|
Python
|
gpl-3.0
| 181 | 0.005525 |
################################################################################
# #
# Copyright (C) 2010,2011,2012,2013,2014, 2015,2016 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Rectification System Setup #
# #
################################################################################
from __future__ import print_function
from math import cos, pi, sin
import numpy as np
import os
import sys
from espressomd import assert_features, lb
from espressomd.lbboundaries import LBBoundary
from espressomd.shapes import Cylinder, Wall, HollowCone
assert_features(["LB_GPU","LB_BOUNDARIES_GPU"])
# Setup constants
outdir = "./RESULTS_RECTIFICATION_GEOMETRY/"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
# Setup the box (we pad the diameter to ensure that the LB boundaries
# and therefore the constraints, are away from the edge of the box)
length = 100
diameter = 20
dt = 0.01
# Setup the MD parameters
system = espressomd.System(box_l=[length, dieameter+4, diameter+4])
system.cell_system.skin = 0.1
system.time_step = dt
system.min_global_cut = 0.5
# Setup LB parameters (these are irrelevant here) and fluid
agrid = 1
vskin = 0.1
frict = 20.0
visco = 1.0
densi = 1.0
lbf = lb.LBFluidGPU(agrid=agrid, dens=densi, visc=visco, tau=dt, fric=frict)
system.actors.add(lbf)
################################################################################
#
# Now we set up the three LB boundaries that form the rectifying geometry.
# The cylinder boundary/constraint is actually already capped, but we put
# in two planes for safety's sake. If you want to create an cylinder of
# 'infinite length' using the periodic boundaries, then the cylinder must
# extend over the boundary.
#
################################################################################
# Setup cylinder
cylinder = LBBoundary(shape=Cylinder(center=[length/2.0, (diameter+4)/2.0, (diameter+4)/2.0],
axis=[1,0,0],
radius=diameter/2.0,
length=length,
direction=-1))
system.lbboundaries.add(cylinder)
# Setup walls
wall = LBBoundary(shape=Wall(dist=2, normal=[1,0,0]))
system.lbboundaries.add(wall)
wall = LBBoundary(shape=Wall(dist=-(length - 2), normal=[-1,0,0]))
system.lbboundaries.add(wall)
# Setup cone
irad = 4.0
angle = pi/4.0
orad = (diameter - irad)/sin(angle)
shift = 0.25*orad*cos(angle)
hollow_cone = LBBoundary(shape=HollowCone(position_x=length/2.0 - shift,
position_y=(diameter+4)/2.0,
position_z=(diameter+4)/2.0,
orientation_x=1,
orientation_y=0,
orientation_z=0,
outer_radius=orad,
inner_radius=irad,
width=2.0,
opening_angle=angle,
direction=1))
system.lbboundaries.add(hollow_cone)
################################################################################
# Output the geometry
lbf.print_vtk_boundary("{}/boundary.vtk".format(outdir))
################################################################################
|
KonradBreitsprecher/espresso
|
doc/tutorials/06-active_matter/SOLUTIONS/rectification_geometry.py
|
Python
|
gpl-3.0
| 5,253 | 0.008186 |
def process(target, other):
result = [[] for ch in target]
ret = []
for xi, xv in enumerate(target):
for yi, yv in enumerate(other):
if xv != yv:
result[xi].append(0)
elif 0 == xi or 0 == yi:
result[xi].append(1)
else:
result[xi].append(result[xi-1][yi-1]+1)
ret.append(max(result[xi]))
return ret
def find_shortest(word_length, sub_map):
for l in range(1, word_length+1):
# print "LEN: ", l
for pos in range(l-1, word_length):
# print "POS: ", pos
flag = True
for other in sub_map:
# print l, other[pos]
if l <= other[pos]:
flag = False
break
if flag:
return l
def solve(n, word_list):
for (xi, xv) in enumerate(word_list):
result = []
for (yi, yv) in enumerate(word_list):
if (xv != yv):
result.append(process(xv, yv))
# print xv, len(xv), result
print find_shortest(len(xv), result)
if __name__ == '__main__':
N = int(raw_input())
WORD = []
for n in xrange(N):
WORD.append(raw_input().strip())
solve(N, WORD)
|
everyevery/programming_study
|
lgecodejam/2014-mar/c/c.py
|
Python
|
mit
| 1,284 | 0.010125 |
# -*- coding: utf-8 -*-
#
# test_enable_multithread.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
__author__ = 'sdiaz'
# Structural plasticity currently does not work with multiple threads.
# An exception should be rised if structural plasticity is enabled
# and multiple threads are set, or if multiple threads are set and
# the enable_structural_plasticity function is called.
HAVE_OPENMP = nest.sli_func("is_threaded")
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
class TestEnableMultithread(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
def test_enable_multithread(self):
nest.ResetKernel()
nest.EnableStructuralPlasticity()
# Setting multiple threads when structural plasticity is enabled should
# throw an exception
with self.assertRaises(nest.NESTError):
nest.SetKernelStatus(
{
'local_num_threads': 2
}
)
def test_multithread_enable(self):
nest.ResetKernel()
nest.SetKernelStatus(
{
'local_num_threads': 2
}
)
# Setting multiple threads when structural plasticity is enabled should
# throw an exception
with self.assertRaises(nest.NESTError):
nest.EnableStructuralPlasticity()
def suite():
test_suite = unittest.makeSuite(TestEnableMultithread, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
|
tobikausk/nest-simulator
|
pynest/nest/tests/test_sp/test_enable_multithread.py
|
Python
|
gpl-2.0
| 2,237 | 0 |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import xml.etree.ElementTree
from xml.etree.cElementTree import ElementTree, Element, SubElement
from xml.etree.cElementTree import fromstring, tostring
import fs_uae_launcher.fsui as fsui
from ..Config import Config
from ..Settings import Settings
from ..I18N import _, ngettext
class XMLControl(fsui.TextArea):
def __init__(self, parent):
fsui.TextArea.__init__(self, parent, horizontal_scroll=True)
self.path = ""
def connect_game(self, info):
tree = self.get_tree()
root = tree.getroot()
if not root.tag == "config":
return
game_node = self.find_or_create_node(root, "game")
game_node.set("uuid", info["uuid"])
game_name_node = self.find_or_create_node(game_node, "name")
game_name_node.text = info["name"]
self.set_tree(tree)
def find_or_create_node(self, element, name):
node = element.find(name)
if node is None:
node = SubElement(element, name)
return node
def set_path(self, path):
if not os.path.exists(path):
path = ""
self.path = path
if path:
self.load_xml(path)
else:
self.set_text("")
def get_tree(self):
text = self.get_text().strip()
try:
root = fromstring(text.encode("UTF-8"))
except Exception:
# FIXME: show message
import traceback
traceback.print_exc()
return
tree = ElementTree(root)
indent_tree(root)
return tree
def set_tree(self, tree):
data = tostring(tree.getroot(), encoding="UTF-8").decode("UTF-8")
std_decl = "<?xml version='1.0' encoding='UTF-8'?>"
if data.startswith(std_decl):
data = data[len(std_decl):].strip()
self.set_text(data)
def load_xml(self, path):
with open(path, "rb") as f:
data = f.read()
self.set_text(data)
def save(self):
if not self.path:
print("no path to save XML to")
return
self.save_xml(self.path)
def save_xml(self, path):
self.get_tree().write(self.path)
def indent_tree(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent_tree(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
|
cnvogelg/fs-uae-gles
|
launcher/fs_uae_launcher/editor/XMLControl.py
|
Python
|
gpl-2.0
| 2,828 | 0.001061 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import six
from google.protobuf.internal import api_implementation
_USE_C_DESCRIPTORS = False
if api_implementation.Type() == 'cpp':
# Used by MakeDescriptor in cpp mode
import os
import uuid
from google.protobuf.pyext import _message
_USE_C_DESCRIPTORS = getattr(_message, '_USE_C_DESCRIPTORS', False)
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
if _USE_C_DESCRIPTORS:
# This metaclass allows to override the behavior of code like
# isinstance(my_descriptor, FieldDescriptor)
# and make it return True when the descriptor is an instance of the extension
# type written in C++.
class DescriptorMetaclass(type):
def __instancecheck__(cls, obj):
if super(DescriptorMetaclass, cls).__instancecheck__(obj):
return True
if isinstance(obj, cls._C_DESCRIPTOR_CLASS):
return True
return False
else:
# The standard metaclass; nothing changes.
DescriptorMetaclass = type
class DescriptorBase(six.with_metaclass(DescriptorMetaclass)):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionality.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
if _USE_C_DESCRIPTORS:
# The class, or tuple of classes, that are considered as "virtual
# subclasses" of this descriptor class.
_C_DESCRIPTOR_CLASS = ()
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
"""
super(_NestedDescriptorBase, self).__init__(
options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
fields_by_camelcase_name: (dict str -> FieldDescriptor) Same
FieldDescriptor objects as in |fields|, but indexed by
"camelcase_name" attribute in each FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable: Does this type define any extension ranges?
oneofs: (list of OneofDescriptor) The list of descriptors for oneof fields
in this message.
oneofs_by_name: (dict str -> OneofDescriptor) Same objects as in |oneofs|,
but indexed by "name" attribute.
file: (FileDescriptor) Reference to file descriptor.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.Descriptor
def __new__(cls, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None,
syntax=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindMessageTypeByName(full_name)
# NOTE(tmarek): The file argument redefining a builtin is nothing we can
# fix right now since we don't know how many clients already rely on the
# name of the argument.
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None,
syntax=None): # pylint:disable=redefined-builtin
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self._fields_by_camelcase_name = None
self.nested_types = nested_types
for nested_type in nested_types:
nested_type.containing_type = self
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self.oneofs = oneofs if oneofs is not None else []
self.oneofs_by_name = dict((o.name, o) for o in self.oneofs)
for oneof in self.oneofs:
oneof.containing_type = self
self.syntax = syntax or "proto2"
@property
def fields_by_camelcase_name(self):
if self._fields_by_camelcase_name is None:
self._fields_by_camelcase_name = dict(
(f.camelcase_name, f) for f in self.fields)
return self._fields_by_camelcase_name
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
A FieldDescriptor instance has the following attributes:
name: (str) Name of this field, exactly as it appears in .proto.
full_name: (str) Name of this field, including containing scope. This is
particularly relevant for extensions.
camelcase_name: (str) Camelcase name of this field.
index: (int) Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number: (int) Tag number declared for this field in the .proto file.
type: (One of the TYPE_* constants below) Declared type.
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label: (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value: (bool) True if this field has a default value defined,
otherwise false.
default_value: (Varies) Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type: (Descriptor) If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type: (EnumDescriptor) If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope: (Descriptor) Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options: (descriptor_pb2.FieldOptions) Protocol message field options or
None to use default field options.
containing_oneof: (OneofDescriptor) If the field is a member of a oneof
union, contains its descriptor. Otherwise, None.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
_PYTHON_TO_CPP_PROTO_TYPE_MAP = {
TYPE_DOUBLE: CPPTYPE_DOUBLE,
TYPE_FLOAT: CPPTYPE_FLOAT,
TYPE_ENUM: CPPTYPE_ENUM,
TYPE_INT64: CPPTYPE_INT64,
TYPE_SINT64: CPPTYPE_INT64,
TYPE_SFIXED64: CPPTYPE_INT64,
TYPE_UINT64: CPPTYPE_UINT64,
TYPE_FIXED64: CPPTYPE_UINT64,
TYPE_INT32: CPPTYPE_INT32,
TYPE_SFIXED32: CPPTYPE_INT32,
TYPE_SINT32: CPPTYPE_INT32,
TYPE_UINT32: CPPTYPE_UINT32,
TYPE_FIXED32: CPPTYPE_UINT32,
TYPE_BYTES: CPPTYPE_STRING,
TYPE_STRING: CPPTYPE_STRING,
TYPE_BOOL: CPPTYPE_BOOL,
TYPE_MESSAGE: CPPTYPE_MESSAGE,
TYPE_GROUP: CPPTYPE_MESSAGE
}
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
# Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber,
# and kLastReservedNumber in descriptor.h
MAX_FIELD_NUMBER = (1 << 29) - 1
FIRST_RESERVED_FIELD_NUMBER = 19000
LAST_RESERVED_FIELD_NUMBER = 19999
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FieldDescriptor
def __new__(cls, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
has_default_value=True, containing_oneof=None):
_message.Message._CheckCalledFromGeneratedFile()
if is_extension:
return _message.default_pool.FindExtensionByName(full_name)
else:
return _message.default_pool.FindFieldByName(full_name)
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
has_default_value=True, containing_oneof=None):
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
super(FieldDescriptor, self).__init__(options, 'FieldOptions')
self.name = name
self.full_name = full_name
self._camelcase_name = None
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
self.containing_oneof = containing_oneof
if api_implementation.Type() == 'cpp':
if is_extension:
self._cdescriptor = _message.default_pool.FindExtensionByName(full_name)
else:
self._cdescriptor = _message.default_pool.FindFieldByName(full_name)
else:
self._cdescriptor = None
@property
def camelcase_name(self):
if self._camelcase_name is None:
self._camelcase_name = _ToCamelCase(self.name)
return self._camelcase_name
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except KeyError:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
An EnumDescriptor instance has the following attributes:
name: (str) Name of the enum type.
full_name: (str) Full name of the type, including package name
and any enclosing type(s).
values: (list of EnumValueDescriptors) List of the values
in this enum.
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type: (Descriptor) Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file: (FileDescriptor) Reference to file descriptor.
options: (descriptor_pb2.EnumOptions) Enum options message or
None to use default enum options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumDescriptor
def __new__(cls, name, full_name, filename, values,
containing_type=None, options=None, file=None,
serialized_start=None, serialized_end=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindEnumTypeByName(full_name)
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
self.values_by_number = dict((v.number, v) for v in values)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto: An empty descriptor_pb2.EnumDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
name: (str) Name of this value.
index: (int) Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number: (int) Actual number assigned to this enum value.
type: (EnumDescriptor) EnumDescriptor to which this value
belongs. Set by EnumDescriptor's constructor if we're
passed into one.
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
None to use default enum value options options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor
def __new__(cls, name, index, number, type=None, options=None):
_message.Message._CheckCalledFromGeneratedFile()
# There is no way we can build a complete EnumValueDescriptor with the
# given parameters (the name of the Enum is not known, for example).
# Fortunately generated files just pass it to the EnumDescriptor()
# constructor, which will ignore it, so returning None is good enough.
return None
def __init__(self, name, index, number, type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class OneofDescriptor(object):
"""Descriptor for a oneof field.
name: (str) Name of the oneof field.
full_name: (str) Full name of the oneof field, including package name.
index: (int) 0-based index giving the order of the oneof field inside
its containing type.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
fields: (list of FieldDescriptor) The list of field descriptors this
oneof can contain.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.OneofDescriptor
def __new__(cls, name, full_name, index, containing_type, fields):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindOneofByName(full_name)
def __init__(self, name, full_name, index, containing_type, fields):
"""Arguments are as described in the attribute description above."""
self.name = name
self.full_name = full_name
self.index = index
self.containing_type = containing_type
self.fields = fields
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
name: (str) Name of the service.
full_name: (str) Full name of the service, including package name.
index: (int) 0-indexed index giving the order that this services
definition appears withing the .proto file.
methods: (list of MethodDescriptor) List of methods provided by this
service.
options: (descriptor_pb2.ServiceOptions) Service options message or
None to use default service options.
file: (FileDescriptor) Reference to file info.
"""
def __init__(self, name, full_name, index, methods, options=None, file=None,
serialized_start=None, serialized_end=None):
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end)
self.index = index
self.methods = methods
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor."""
for method in self.methods:
if name == method.name:
return method
return None
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto: An empty descriptor_pb2.ServiceDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
name: (str) Name of the method within the service.
full_name: (str) Full name of method.
index: (int) 0-indexed index of the method inside the service.
containing_service: (ServiceDescriptor) The service that contains this
method.
input_type: The descriptor of the message that this method accepts.
output_type: The descriptor of the message that this method returns.
options: (descriptor_pb2.MethodOptions) Method options message or
None to use default method options.
"""
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
super(MethodDescriptor, self).__init__(options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
Note that enum_types_by_name, extensions_by_name, and dependencies
fields are only set by the message_factory module, and not by the
generated proto code.
name: name of file, relative to root of source tree.
package: name of the package
syntax: string indicating syntax of the file (can be "proto2" or "proto3")
serialized_pb: (str) Byte string of serialized
descriptor_pb2.FileDescriptorProto.
dependencies: List of other FileDescriptors this FileDescriptor depends on.
message_types_by_name: Dict of message names of their descriptors.
enum_types_by_name: Dict of enum names and their descriptors.
extensions_by_name: Dict of extension names and their descriptors.
pool: the DescriptorPool this descriptor belongs to. When not passed to the
constructor, the global default pool is used.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FileDescriptor
def __new__(cls, name, package, options=None, serialized_pb=None,
dependencies=None, syntax=None, pool=None):
# FileDescriptor() is called from various places, not only from generated
# files, to register dynamic proto files and messages.
if serialized_pb:
# TODO(amauryfa): use the pool passed as argument. This will work only
# for C++-implemented DescriptorPools.
return _message.default_pool.AddSerializedFile(serialized_pb)
else:
return super(FileDescriptor, cls).__new__(cls)
def __init__(self, name, package, options=None, serialized_pb=None,
dependencies=None, syntax=None, pool=None):
"""Constructor."""
super(FileDescriptor, self).__init__(options, 'FileOptions')
if pool is None:
from google.protobuf import descriptor_pool
pool = descriptor_pool.Default()
self.pool = pool
self.message_types_by_name = {}
self.name = name
self.package = package
self.syntax = syntax or "proto2"
self.serialized_pb = serialized_pb
self.enum_types_by_name = {}
self.extensions_by_name = {}
self.dependencies = (dependencies or [])
if (api_implementation.Type() == 'cpp' and
self.serialized_pb is not None):
_message.default_pool.AddSerializedFile(self.serialized_pb)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def _ToCamelCase(name):
"""Converts name to camel-case and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
if result:
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
# Lower-case the first letter.
if result and result[0].isupper():
result[0] = result[0].lower()
return ''.join(result)
def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True,
syntax=None):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches.
Set to False on recursion, so no duplicates are created.
syntax: The syntax/semantics that should be used. Set to "proto3" to get
proto3 field presence semantics.
Returns:
A Descriptor for protobuf messages.
"""
if api_implementation.Type() == 'cpp' and build_file_if_cpp:
# The C++ implementation requires all descriptors to be backed by the same
# definition in the C++ descriptor pool. To do this, we build a
# FileDescriptorProto with the same definition as this descriptor and build
# it into the pool.
from google.protobuf import descriptor_pb2
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.message_type.add().MergeFrom(desc_proto)
# Generate a random name for this proto file to prevent conflicts with any
# imported ones. We need to specify a file name so the descriptor pool
# accepts our FileDescriptorProto, but it is not important what that file
# name is actually set to.
proto_name = str(uuid.uuid4())
if package:
file_descriptor_proto.name = os.path.join(package.replace('.', '/'),
proto_name + '.proto')
file_descriptor_proto.package = package
else:
file_descriptor_proto.name = proto_name + '.proto'
_message.default_pool.Add(file_descriptor_proto)
result = _message.default_pool.FindFileByName(file_descriptor_proto.name)
if _USE_C_DESCRIPTORS:
return result.message_types_by_name[desc_proto.name]
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
# Create Descriptors for enum types
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = '.'.join(full_message_name + [enum_proto.name])
enum_desc = EnumDescriptor(
enum_proto.name, full_name, None, [
EnumValueDescriptor(enum_val.name, ii, enum_val.number)
for ii, enum_val in enumerate(enum_proto.value)])
enum_types[full_name] = enum_desc
# Create Descriptors for nested types
nested_types = {}
for nested_proto in desc_proto.nested_type:
full_name = '.'.join(full_message_name + [nested_proto.name])
# Nested types are just those defined inside of the message, not all types
# used by fields in the message, so no loops are possible here.
nested_desc = MakeDescriptor(nested_proto,
package='.'.join(full_message_name),
build_file_if_cpp=False,
syntax=syntax)
nested_types[full_name] = nested_desc
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
enum_desc = None
nested_desc = None
if field_proto.HasField('type_name'):
type_name = field_proto.type_name
full_type_name = '.'.join(full_message_name +
[type_name[type_name.rfind('.')+1:]])
if full_type_name in nested_types:
nested_desc = nested_types[full_type_name]
elif full_type_name in enum_types:
enum_desc = enum_types[full_type_name]
# Else type_name references a non-local type, which isn't implemented
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, nested_desc, enum_desc, None, False, None,
options=field_proto.options, has_default_value=False)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
list(nested_types.values()), list(enum_types.values()), [],
options=desc_proto.options)
|
gwq5210/litlib
|
thirdparty/sources/protobuf/python/google/protobuf/descriptor.py
|
Python
|
gpl-3.0
| 37,400 | 0.006364 |
class APIConnectionError(Exception):
pass
class DownloadError(Exception):
pass
class ProducerAPIError(APIConnectionError):
pass
class ConsumerAPIError(APIConnectionError):
pass
|
hlmnrmr/liveblog
|
server/liveblog/syndication/exceptions.py
|
Python
|
agpl-3.0
| 199 | 0 |
"""
A Python interface to the primer3_core executable.
TODO: it is not possible to keep a persistent primer3 process
using subprocess module - communicate() terminates the input
stream and waits for the process to finish
Author: Libor Morkovsky 2012
"""
# This file is a part of Scrimer.
# See LICENSE.txt for details on licensing.
# Copyright (C) 2012, 2013 Libor Morkovsky
class BoulderIO:
"""Provides Python interface for ``BoulderIO`` format used by Primer3.
"""
@classmethod
def parse(self, string):
r"""Parse a BoulderIO string ``(KEY=VAL\n)``
return a list of records, where each record is a dictionary
end of the string implies a single ``'=\n'`` (record separator).
"""
record_strings = string.split("=\n")
return [dict(tuple(line.split("=", 1)) for line in record.split("\n") if len(line) > 3) for record in record_strings if len(record) > 3]
@classmethod
def deparse(self, records):
r"""Accepts a dict or a list of dicts, produces a BoulderIO string ``(KEY=VAL\n)``
with records separated by ``'=\n'``.
"""
# unify the input, create a list with single element
if type(records) == dict:
records = [records]
return "\n=\n".join("\n".join("=".join(kval) for kval in record.iteritems()) for record in records) + "\n=\n"
class Primer3:
"""Wraps Primer3 executable. `kwargs` are converted to strings and used as default parameters
for each call of primer3 binary.
"""
def __init__(self, p3path="primer3_core", **kwargs):
# store path to primer3
self.p3path = p3path
# add stringized versions of all kwargs to default args
self.default_params = {}
str_kw = dict((key, str(val)) for key, val in kwargs.iteritems())
self.default_params.update(str_kw)
def call(self, records):
"""Merge each of the records with `default_params`, the record taking precedence,
call the ``primer3`` binary,
parse the output and return a list of dictionaries,
``{RIGHT:[], LEFT:[], PAIR:[], INTERNAL:[]}`` for each input record
uppercase keys (in the result) are the original names from BoulderIO format,
lowercase keys have no direct equivalent in primer3 output (``position``, ``other-keys``)
"""
# merge the defaults with current query
full_records = [dict(self.default_params.items() + record.items()) for record in records]
# call primer3
import subprocess
self.child = subprocess.Popen([self.p3path, '-strict_tags'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.child.communicate(BoulderIO.deparse(full_records))
# simple check for errors in stderr
if len(err):
raise Exception(err)
results = BoulderIO.parse(out)
# parse the results to {RIGHT:[], LEFT:[], PAIR:[], INTERNAL:[]}
sides = ['RIGHT', 'LEFT', 'PAIR', 'INTERNAL']
primers = []
for result in results:
# primers for current result
res_primers = dict((side, []) for side in sides)
used_keys = []
for side in sides:
nret_key = 'PRIMER_%s_NUM_RETURNED' % side
nret = int(result.get(nret_key, 0))
used_keys.append(nret_key)
# extract the values for each single primer and put those to
# equivalent key
for num in xrange(nret):
template = 'PRIMER_%s_%d_' % (side, num)
primer_keys = filter(lambda k: template in k, result.iterkeys())
primer = dict((key[len(template):], result[key]) for key in primer_keys)
# extract the position, which itself has no extractible name in BoulderIO
# only 'PRIMER_LEFT_0'
if side != 'PAIR':
pos_key = template[:len(template)-1]
primer['position'] = result.get(pos_key, "#error!")
used_keys.append(pos_key)
# keep track of keys used in current record
used_keys.extend(primer_keys)
res_primers[side].append(primer)
# store all the unused keys for current result
res_primers['other-keys'] = dict((key, result[key]) for key in result.iterkeys() if key not in used_keys)
primers.append(res_primers)
return primers
if __name__ == "__main__":
print "Running tests"
import textwrap
record = BoulderIO.parse(textwrap.dedent(
"""
SEQUENCE_ID=example
SEQUENCE_TEMPLATE=GTAGTCAGTAGACGATGACTACTGACGATGCAGACNACACACACACACACAGCACACAGGTATTAGTGGGCCATTCGATCCCGACCCAAATCGATAGCTACGATGACG
SEQUENCE_TARGET=37,21
PRIMER_PICK_INTERNAL_OLIGO=0
PRIMER_OPT_SIZE=18
PRIMER_MIN_SIZE=15
PRIMER_MAX_SIZE=21
PRIMER_MAX_NS_ACCEPTED=3
PRIMER_PRODUCT_SIZE_RANGE=50-100
"""))
record_no_res = BoulderIO.parse(textwrap.dedent(
"""
SEQUENCE_ID=example
SEQUENCE_TEMPLATE=GTAGTCAGTAGACNATGACNACTGACGATGCAGACNACACACACACACACAGCACACAGGTATTAGTGGGCCATTCGATCCCGACCCAAATCGATAGCTACGATGACG
SEQUENCE_TARGET=37,21
PRIMER_TASK=pick_detection_primers
PRIMER_PICK_LEFT_PRIMER=1
PRIMER_PICK_INTERNAL_OLIGO=1
PRIMER_PICK_RIGHT_PRIMER=1
PRIMER_OPT_SIZE=18
PRIMER_MIN_SIZE=15
PRIMER_MAX_SIZE=21
PRIMER_MAX_NS_ACCEPTED=1
PRIMER_PRODUCT_SIZE_RANGE=75-100
SEQUENCE_INTERNAL_EXCLUDED_REGION=37,21
"""))
default_params = BoulderIO.parse(textwrap.dedent(
"""
PRIMER_THERMODYNAMIC_PARAMETERS_PATH=/opt/primer3/bin/primer3_config/
PRIMER_MAX_NS_ACCEPTED=0
PRIMER_EXPLAIN_FLAG=1
"""))[0]
print "Testing BoulderIO, single record:",
record_dp = BoulderIO.deparse(record)
record_reparsed = BoulderIO.parse(record_dp)
if record == record_reparsed:
print "OK"
else:
print "Failed!"
print "Testing BoulderIO, two records:",
two_records = record + record_no_res
record_dp = BoulderIO.deparse(two_records)
record_reparsed = BoulderIO.parse(record_dp)
if two_records == record_reparsed:
print "OK"
else:
print "Failed!"
print "Testing Primer3, single record:",
p3 = Primer3(**default_params)
# test for single record
res = p3.call(record)
if res[0]['RIGHT'][0]['SEQUENCE'] == 'GTCGGGATCGAATGGCCC':
print "OK"
else:
print "Failed!"
# test for multiple records
print "Testing Primer3, two records:",
res = p3.call(two_records)
# second record should produce no results
if len(res[1]['RIGHT']) == 0:
print "OK"
else:
print "Failed!"
# if no exception occurs, the test should be OK
print "Tests ran OK"
|
libor-m/scrimer
|
scrimer/primer3_connector.py
|
Python
|
agpl-3.0
| 7,094 | 0.006061 |
import paho.mqtt.client as mqtt
import os,binascii
import logging
import time
from enum import Enum
from threading import Timer
import json
import random
import math
ID_STRING = binascii.hexlify(os.urandom(15)).decode('utf-8')[:4]
CLIENT_ID = "robot-emulator-" + ID_STRING
BROKER_HOST = "mosquitto"
TOPIC_STATUS = "twin/%s/status" % ID_STRING
TOPIC_PLANS = "twin/%s/plans" % ID_STRING
TOPIC_REGISTRATION = "twins/registration/announce"
TOPIC_HANDSHAKE = "twins/registration/handshake"
class TwinStatus(Enum):
NOT_CONNECTED = 1
SEARCHING = 2
SELECTED = 3
CONNECTED = 4
DISCONNECTED = 5
status = TwinStatus.NOT_CONNECTED
timer = None
def main():
logging.info("Client '%s' is connecting...", CLIENT_ID)
# Client(client_id=””, clean_session=True, userdata=None, protocol=MQTTv311, transport=”tcp”)
client = mqtt.Client(CLIENT_ID)
client.on_connect = on_connect
client.on_message = on_message
try:
client.connect(BROKER_HOST)
logging.info("Client '%s' CONNECTED to '%s'", CLIENT_ID, BROKER_HOST)
except Exception as e:
logging.error("Failed to connect to the MQTT broker on host '%s' (CLIENT_ID='%s')", BROKER_HOST, CLIENT_ID)
logging.debug(e)
client.loop_forever()
def twin_search_timeout(client, n):
if not status == TwinStatus.CONNECTED:
logging.warning("Twin connection is not established (%s)", status)
request_twin(client)
schedule_reconnect(client, n+1)
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# no need to sub to our own statuses
# sub(client, TOPIC_STATUS)
sub(client, TOPIC_PLANS)
sub(client, TOPIC_HANDSHAKE)
# client.publish(TOPIC_STATUS, "{'status': 'on'}")
request_twin(client)
schedule_reconnect(client, 1)
# TODO also publish some message on the 'registration' topic
def sub(client, topic):
client.subscribe(topic)
logging.info("Subscribed to %s", topic)
def schedule_reconnect(client, n):
delay = min(0.1 * 2 ** (n-1) + (random.randint(0, 200) / 1000), 10)
logging.debug("Next reconnection attempt in %fs", delay)
timer = Timer(delay, twin_search_timeout, [client, n])
timer.start()
def request_twin(client):
client.publish(TOPIC_REGISTRATION, json.dumps({'twin': ID_STRING, 'status': 'awaiting'}))
status = TwinStatus.SEARCHING
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
logging.debug("New message '%s' (topic: '%s', QoS%d)", msg.payload, msg.topic, msg.qos)
if not msg.topic == TOPIC_STATUS:
client.publish(TOPIC_STATUS, json.dumps({'status': 'done'}))
if msg.topic == TOPIC_HANDSHAKE:
reg_reply = json.loads(msg.payload)
process_reg_reply(reg_reply, client, msg)
def process_reg_reply(reg_reply, client, msg):
if reg_reply["device"] != ID_STRING:
logging.debug("A registration message for another device received: %s", msg.payload)
else:
t = reg_reply["twin"]
logging.debug("Trying to select the twin '%s'", t)
# TODO do we really need this status?
status = TwinStatus.SELECTED
register_with_twin(t)
def register_with_twin(t):
logging.warning("Not implemented yet")
status = TwinStatus.CONNECTED
twin = t
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M')
main()
logging.warning("Client '%s' is shutting down", CLIENT_ID)
|
EricssonResearch/scott-eu
|
robot-emulator/main.py
|
Python
|
apache-2.0
| 3,749 | 0.005346 |
import unittest
import mock
from ...management.resource_servers import ResourceServers
class TestResourceServers(unittest.TestCase):
def test_init_with_optionals(self):
t = ResourceServers(domain='domain', token='jwttoken', telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get('Auth0-Client', None)
self.assertEqual(telemetry_header, None)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.create({'name': 'TestApi', 'identifier': 'https://test.com/api'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/resource-servers',
data={'name': 'TestApi', 'identifier': 'https://test.com/api'}
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_get_all(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
# with default params
r.get_all()
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers',
params={
'page': None,
'per_page': None,
'include_totals': 'false'
}
)
# with pagination params
r.get_all(page=3, per_page=27, include_totals=True)
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers',
params={
'page': 3,
'per_page': 27,
'include_totals': 'true'
}
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.get('some_id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/resource-servers/some_id'
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.delete('some_id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/resource-servers/some_id'
)
@mock.patch('auth0.v3.management.resource_servers.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
r = ResourceServers(domain='domain', token='jwttoken')
r.update('some_id', {'name': 'TestApi2',
'identifier': 'https://test.com/api2'})
mock_instance.patch.assert_called_with(
'https://domain/api/v2/resource-servers/some_id',
data={'name': 'TestApi2',
'identifier': 'https://test.com/api2'}
)
|
auth0/auth0-python
|
auth0/v3/test/management/test_resource_servers.py
|
Python
|
mit
| 3,056 | 0.000327 |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Client actions related to plist files."""
import cStringIO
import types
from grr.client import actions
from grr.client import vfs
from grr.lib import plist as plist_lib
from grr.lib import rdfvalue
from grr.parsers import binplist
class PlistQuery(actions.ActionPlugin):
"""Parses the plist request specified and returns the results.
PlistQuery allows you to obtain data from a plist, optionally only if it
matches the given filter.
Querying for a plist is done in two steps. First, its contents are
retrieved.
For plists where the top level element is a dict, you can use the key
parameter of the PlistRequest to specify a path into the dict to retrieve.
When specifying a key, the requested key values are places under a dictionary
key called "key".
Whether you've specified a key or not, the query parameter allows you to
filter based on the
"""
in_rdfvalue = rdfvalue.PlistRequest
out_rdfvalue = rdfvalue.RDFValueArray
MAX_PLIST_SIZE = 1024 * 1024 * 100 # 100 MB
def Run(self, args):
self.context = args.context
self.filter_query = args.query
with vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) as fd:
data = fd.Read(self.MAX_PLIST_SIZE)
plist = binplist.readPlist(cStringIO.StringIO(data))
# Create the query parser
parser = plist_lib.PlistFilterParser(self.filter_query).Parse()
filter_imp = plist_lib.PlistFilterImplementation
matcher = parser.Compile(filter_imp)
if self.context:
# Obtain the values for the context using the value expander
value_expander = filter_imp.FILTERS["ValueExpander"]
iterator = value_expander().Expand(plist, self.context)
else:
# If we didn't get a context, the context is the whole plist
iterator = [plist]
reply = rdfvalue.RDFValueArray()
for item in iterator:
# As we're setting the context manually, we need to account for types
if isinstance(item, types.ListType):
for sub_item in item:
partial_plist = plist_lib.PlistValueToPlainValue(sub_item)
if matcher.Matches(partial_plist):
reply.Append(sub_item)
else:
partial_plist = plist_lib.PlistValueToPlainValue(item)
if matcher.Matches(partial_plist):
reply.Append(partial_plist)
self.SendReply(reply)
|
wandec/grr
|
client/client_actions/plist.py
|
Python
|
apache-2.0
| 2,452 | 0.008564 |
#! -*- coding: utf-8 -*-
from collections import OrderedDict
from sqlalchemy import Column, Date, ForeignKey, Index, String
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from radar.database import db
from radar.models.common import MetaModelMixin, patient_id_column, patient_relationship, uuid_pk_column
from radar.models.logs import log_changes
COUNTRIES = OrderedDict([
('AF', 'Afghanistan'),
('AX', 'Åland Islands'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia, Plurinational State of'),
('BQ', 'Bonaire, Sint Eustatius and Saba'),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos (Keeling) Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CD', 'Congo, the Democratic Republic of the'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', 'Côte d\'Ivoire'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CW', 'Curaçao'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French Southern Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HM', 'Heard Island and McDonald Islands'),
('VA', 'Holy See (Vatican City State)'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran, Islamic Republic of'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KP', 'Korea, Democratic People\'s Republic of'),
('KR', 'Korea, Republic of'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Lao People\'s Democratic Republic'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia, the former Yugoslav Republic of'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia, Federated States of'),
('MD', 'Moldova, Republic of'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory, Occupied'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Réunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('BL', 'Saint Barthélemy'),
('SH', 'Saint Helena, Ascension and Tristan da Cunha'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin (French part)'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SX', 'Sint Maarten (Dutch part)'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('GS', 'South Georgia and the South Sandwich Islands'),
('SS', 'South Sudan'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard and Jan Mayen'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan, Province of China'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania, United Republic of'),
('TH', 'Thailand'),
('TL', 'Timor-Leste'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('GB', 'United Kingdom'),
('US', 'United States'),
('UM', 'United States Minor Outlying Islands'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VE', 'Venezuela, Bolivarian Republic of'),
('VN', 'Viet Nam'),
('VG', 'Virgin Islands, British'),
('VI', 'Virgin Islands, U.S.'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe'),
])
@log_changes
class PatientAddress(db.Model, MetaModelMixin):
__tablename__ = 'patient_addresses'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('patient_addresses')
source_group_id = Column(Integer, ForeignKey('groups.id'), nullable=False)
source_group = relationship('Group')
source_type = Column(String, nullable=False)
from_date = Column(Date)
to_date = Column(Date)
address1 = Column(String)
address2 = Column(String)
address3 = Column(String)
address4 = Column(String)
postcode = Column(String)
country = Column(String)
@property
def full_address(self):
parts = []
parts.extend([
self.address1,
self.address2,
self.address3,
self.address4,
self.postcode,
self.country,
])
return '\n'.join(x for x in parts if x)
@property
def anonymised_postcode(self):
postcode = self.postcode
if postcode is None:
anonymised_postcode = None
else:
# Postcode outbound code
anonymised_postcode = postcode.split(' ')[0][:4]
return anonymised_postcode
Index('patient_addresses_patient_idx', PatientAddress.patient_id)
|
renalreg/radar
|
radar/models/patient_addresses.py
|
Python
|
agpl-3.0
| 8,540 | 0.000117 |
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
def smooth_bruckner(y, smooth_points, iterations):
y_original = y
N_data = y.size
N = smooth_points
N_float = float(N)
y = np.empty(N_data + N + N)
y[0:N].fill(y_original[0])
y[N:N + N_data] = y_original[0:N_data]
y[N + N_data:N_data + N + N].fill(y_original[-1])
y_avg = np.average(y)
y_min = np.min(y)
y_c = y_avg + 2. * (y_avg - y_min)
y[y > y_c] = y_c
window_size = N_float*2+1
for j in range(0, iterations):
window_avg = np.average(y[0: 2*N + 1])
for i in range(N, N_data - 1 - N - 1):
if y[i]>window_avg:
y_new = window_avg
#updating central value in average (first bracket)
#and shifting average by one index (second bracket)
window_avg += ((window_avg-y[i]) + (y[i+N+1]-y[i - N]))/window_size
y[i] = y_new
else:
#shifting average by one index
window_avg += (y[i+N+1]-y[i - N])/window_size
return y[N:N + N_data]
|
erangre/Dioptas
|
dioptas/model/util/smooth_bruckner_python.py
|
Python
|
gpl-3.0
| 2,059 | 0.004371 |
#!/usr/bin/env python
"""
Standaone Rule
==============
This is a customer spec, parser and rule and can be run
against the local host using the following command::
$ insights-run -p examples.rules.stand_alone
or from the examples/rules directory::
$ ./stand_alone.py
"""
from __future__ import print_function
from collections import namedtuple
from insights import get_active_lines, parser, Parser
from insights import make_fail, make_pass, rule, run
from insights.core.spec_factory import SpecSet, simple_file
from insights.parsers.redhat_release import RedhatRelease
# Error key used in make_fail
ERROR_KEY = "TOO_MANY_HOSTS"
# jinga2 template displayed for rule responses
CONTENT = {
make_fail: """Too many hosts in /etc/hosts: {{num}}""",
make_pass: """Just right"""
}
class Specs(SpecSet):
""" Datasources for collection from local host """
hosts = simple_file("/etc/hosts")
@parser(Specs.hosts)
class HostParser(Parser):
"""
Parses the results of the ``hosts`` Specs
Attributes:
hosts (list): List of the namedtuple Host
which are the contents of the hosts file
including ``.ip``, ``.host``, and ``.aliases``.
"""
Host = namedtuple("Host", ["ip", "host", "aliases"])
def parse_content(self, content):
"""
Method to parse the contents of file ``/etc/hosts``
This method must be implemented by each parser.
Arguments:
content (list): List of strings that are the contents
of the /etc/hosts file.
"""
self.hosts = []
for line in get_active_lines(content):
# remove inline comments
line = line.partition("#")[0].strip()
# break the line into parts
parts = line.split()
ip, host = parts[:2]
aliases = parts[2:]
self.hosts.append(HostParser.Host(ip, host, aliases))
def __repr__(self):
""" str: Returns string representation of the class """
me = self.__class__.__name__
msg = "%s([" + ", ".join([str(d) for d in self.hosts]) + "])"
return msg % me
@rule(HostParser, RedhatRelease, content=CONTENT)
def report(hp, rhr):
"""
Rule reports a response if there is more than 1 host
entry defined in the /etc/hosts file.
Arguments:
hp (HostParser): Parser object for the custom parser in this
module.
rhr (RedhatRelease): Parser object for the /etc/redhat-release
file.
"""
if len(hp.hosts) > 1:
return make_fail("TOO_MANY_HOSTS", num=len(hp.hosts))
return make_pass("TOO_MANY_HOSTS", num=len(hp.hosts))
if __name__ == "__main__":
run(report, print_summary=True)
|
RedHatInsights/insights-core
|
examples/rules/stand_alone.py
|
Python
|
apache-2.0
| 2,746 | 0 |
from django.shortcuts import render
from django.template.loader import render_to_string
def home(request):
context_dict = {}
return render(request,'ms2ldaviz/index.html',context_dict)
def people(request):
context_dict = {}
return render(request,'ms2ldaviz/people.html',context_dict)
def api(request):
context_dict = {}
return render(request,'ms2ldaviz/api.html',context_dict)
def user_guide(request):
markdown_str = render_to_string('markdowns/user_guide.md')
return render(request, 'markdowns/user_guide.html', {'markdown_str':markdown_str})
def disclaimer(request):
markdown_str = render_to_string('markdowns/disclaimer.md')
return render(request, 'markdowns/disclaimer.html', {'markdown_str':markdown_str})
def confidence(request):
markdown_str = render_to_string('markdowns/confidence.md')
return render(request, 'markdowns/confidence.html', {'markdown_str':markdown_str})
|
sdrogers/ms2ldaviz
|
ms2ldaviz/ms2ldaviz/views.py
|
Python
|
mit
| 936 | 0.013889 |
# Speak.activity
# A simple front end to the espeak text-to-speech engine on the XO laptop
# http://wiki.laptop.org/go/Speak
#
# Copyright (C) 2008 Joshua Minor
# Copyright (C) 2014 Walter Bender
# This file is part of Speak.activity
#
# Parts of Speak.activity are based on code from Measure.activity
# Copyright (C) 2007 Arjun Sarwal - arjun@laptop.org
#
# Speak.activity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Speak.activity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Speak.activity. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from eye import Eye
from utils import svg_str_to_pixbuf
class Sleepy(Eye):
def __init__(self, fill_color):
Eye.__init__(self, fill_color)
self._pixbuf = svg_str_to_pixbuf(eye_svg())
def draw(self, widget, cr):
bounds = self.get_allocation()
# background
cr.set_source_rgba(*self.fill_color.get_rgba())
cr.rectangle(0, 0, bounds.width, bounds.height)
cr.fill()
w = h = min(bounds.width, bounds.height)
x = int((bounds.width - w) // 2)
y = int((bounds.height - h) // 2)
pixbuf = self._pixbuf.scale_simple(w, h, GdkPixbuf.InterpType.BILINEAR)
cr.translate(x + w / 2., y + h / 2.)
cr.translate(-x - w / 2., -y - h / 2.)
Gdk.cairo_set_source_pixbuf(cr, pixbuf, x, y)
cr.rectangle(x, y, w, h)
cr.fill()
return True
def eye_svg():
return \
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n' + \
'<svg\n' + \
' xmlns:svg="http://www.w3.org/2000/svg"\n' + \
' xmlns="http://www.w3.org/2000/svg"\n' + \
' version="1.1"\n' + \
' width="300"\n' + \
' height="300">\n' + \
' <path\n' + \
' d="m 260.26893,151.09803 c -6.07398,14.55176 -15.05894,27.89881 -26.27797,39.03563 -11.21904,11.13683 -24.66333,20.05466 -39.32004,26.08168 -14.65671,6.02702 -30.51431,9.15849 -46.37814,9.15849 -15.86384,0 -31.72144,-3.13147 -46.37815,-9.15849 C 87.257925,210.18832 73.813631,201.27049 62.594594,190.13366 51.375557,178.99684 42.3906,165.64979 36.316616,151.09803"\n' + \
' style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:13.18636799;stroke-linecap:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />\n' + \
'</svg>\n'
|
walterbender/speak
|
sleepy.py
|
Python
|
gpl-3.0
| 2,928 | 0.000683 |
# -*- coding: utf-8 -*-
from minheap import minheap
class maxheap(minheap):
"""
Heap class - made of keys and items
methods: build_heap, heappush, heappop
"""
MAX_HEAP = True
def __str__(self):
return "Max-heap with %s items" % (len(self.heap))
def heapify(self, i):
l = self.leftchild(i)
r = self.rightchild(i)
largest = i
if l < self.max_elements() and self.heap[l] > self.heap[largest]:
largest = l
if r < self.max_elements() and self.heap[r] > self.heap[largest]:
largest = r
if largest != i:
self.heap[i], self.heap[largest] = self.heap[largest], self.heap[i]
self.heapify(largest)
def heappush(self, x):
""" Adds a new item x in the heap"""
i = len(self.heap)
self.heap.append(x)
parent = self.parent(i)
while parent != -1 and self.heap[int(i)] > self.heap[int(parent)]:
self.heap[int(i)], self.heap[int(parent)] = self.heap[
int(parent)], self.heap[int(i)]
i = parent
parent = self.parent(i)
|
NicovincX2/Python-3.5
|
Algorithmique/Algorithme/Algorithme de tri/Tri par tas (Heapsort)/maxheap.py
|
Python
|
gpl-3.0
| 1,134 | 0.000882 |
from pygame import Rect
from widget import Widget
class GridView(Widget):
# cell_size (width, height) size of each cell
#
# Abstract methods:
#
# num_rows() --> no. of rows
# num_cols() --> no. of columns
# draw_cell(surface, row, col, rect)
# click_cell(row, col, event)
def __init__(self, cell_size, nrows, ncols, **kwds):
"""nrows, ncols are for calculating initial size of widget"""
Widget.__init__(self, **kwds)
self.cell_size = cell_size
w, h = cell_size
d = 2 * self.margin
self.size = (w * ncols + d, h * nrows + d)
self.cell_size = cell_size
def draw(self, surface):
for row in xrange(self.num_rows()):
for col in xrange(self.num_cols()):
r = self.cell_rect(row, col)
self.draw_cell(surface, row, col, r)
def cell_rect(self, row, col):
w, h = self.cell_size
d = self.margin
x = col * w + d
y = row * h + d
return Rect(x, y, w, h)
def draw_cell(self, surface, row, col, rect):
pass
def mouse_down(self, event):
x, y = event.local
w, h = self.cell_size
W, H = self.size
d = self.margin
if d <= x < W - d and d <= y < H - d:
row = (y - d) // h
col = (x - d) // w
self.click_cell(row, col, event)
def click_cell(self, row, col, event):
pass
|
vejmelkam/emotiv-reader
|
albow/grid_view.py
|
Python
|
gpl-3.0
| 1,254 | 0.039075 |
# -*- coding: utf-8 -*-
import logging
from chisch.common.retwrapper import RetWrapper
import cores
logger = logging.getLogger('django')
def signature_url(request):
params_query_dict = request.GET
params = {k: v for k, v in params_query_dict.items()}
try:
url = cores.get_url()
except Exception, e:
return RetWrapper.wrap_and_return(e)
result = {'url': url}
return RetWrapper.wrap_and_return(result)
|
zhaowenxiang/chisch
|
vod/views.py
|
Python
|
mit
| 446 | 0 |
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from django.conf import settings
from . import views
products = r'/products/(?P<product>\w+)'
versions = r'/versions/(?P<versions>[;\w\.()]+)'
version = r'/versions/(?P<version>[;\w\.()]+)'
perm_legacy_redirect = settings.PERMANENT_LEGACY_REDIRECTS
urlpatterns = patterns(
'', # prefix
url('^robots\.txt$',
views.robots_txt,
name='robots_txt'),
url(r'^status/json/$',
views.status_json,
name='status_json'),
url(r'^status/revision/$',
views.status_revision,
name='status_revision'),
url(r'^crontabber-state/$',
views.crontabber_state,
name='crontabber_state'),
url('^crashes-per-day/$',
views.crashes_per_day,
name='crashes_per_day'),
url(r'^exploitability/$',
views.exploitability_report,
name='exploitability_report'),
url(r'^report/index/(?P<crash_id>[\w-]+)$',
views.report_index,
name='report_index'),
url(r'^search/quick/$',
views.quick_search,
name='quick_search'),
url(r'^buginfo/bug', views.buginfo,
name='buginfo'),
url(r'^rawdumps/(?P<crash_id>[\w-]{36})-(?P<name>\w+)\.'
r'(?P<extension>json|dmp|json\.gz)$',
views.raw_data,
name='raw_data_named'),
url(r'^rawdumps/(?P<crash_id>[\w-]{36}).(?P<extension>json|dmp)$',
views.raw_data,
name='raw_data'),
url(r'^login/$',
views.login,
name='login'),
url(r'^graphics_report/$',
views.graphics_report,
name='graphics_report'),
url(r'^about/throttling/$',
views.about_throttling,
name='about_throttling'),
# if we do a permanent redirect, the browser will "cache" the redirect and
# it will make it very hard to ever change the DEFAULT_PRODUCT
url(r'^$',
RedirectView.as_view(
url='/home/product/%s' % settings.DEFAULT_PRODUCT,
permanent=False # this is not a legacy URL
)),
# redirect deceased Advanced Search URL to Super Search
url(r'^query/$',
RedirectView.as_view(
url='/search/',
query_string=True,
permanent=True
)),
# redirect deceased Report List URL to Signature report
url(r'^report/list$',
RedirectView.as_view(
pattern_name='signature:signature_report',
query_string=True,
permanent=True
)),
# redirect deceased Daily Crashes URL to Crasher per Day
url(r'^daily$',
RedirectView.as_view(
pattern_name='crashstats:crashes_per_day',
query_string=True,
permanent=True
)),
# Redirect old independant pages to the unified Profile page.
url(r'^your-crashes/$',
RedirectView.as_view(
url='/profile/',
permanent=perm_legacy_redirect
)),
url(r'^permissions/$',
RedirectView.as_view(
url='/profile/',
permanent=perm_legacy_redirect
)),
# Redirect deleted status page to monitoring page.
url(
r'^status/$',
RedirectView.as_view(
pattern_name='monitoring:index',
permanent=not settings.DEBUG,
),
name='status_redirect',
),
# handle old-style URLs
url(r'^products/(?P<product>\w+)/$',
RedirectView.as_view(
url='/home/products/%(product)s',
permanent=perm_legacy_redirect
)),
url(r'^products/(?P<product>\w+)/versions/(?P<versions>[;\w\.()]+)/$',
RedirectView.as_view(
url='/home/products/%(product)s/versions/%(versions)s',
permanent=perm_legacy_redirect
)),
url('^home' + products + '/versions/$',
RedirectView.as_view(
url='/home/products/%(product)s',
permanent=perm_legacy_redirect
)),
)
|
AdrianGaudebert/socorro
|
webapp-django/crashstats/crashstats/urls.py
|
Python
|
mpl-2.0
| 3,962 | 0.000252 |
"""Main view for geo locator application"""
from django.shortcuts import render
def index(request):
if request.location:
location = request.location
else:
location = None
return render(request, "homepage.html", {'location': location})
|
mindcube/mindcube-django-cookiecutter
|
{{cookiecutter.repo_name}}/project/apps/geo_locator/views.py
|
Python
|
mit
| 265 | 0.003774 |
# Generated file. Do not edit
__author__="drone"
from Abs import Abs
from And import And
from Average import Average
from Ceil import Ceil
from Cube import Cube
from Divide import Divide
from Double import Double
from Equal import Equal
from Even import Even
from Floor import Floor
from Greaterorequal import Greaterorequal
from Greaterthan import Greaterthan
from Half import Half
from If import If
from Increment import Increment
from Lessorequal import Lessorequal
from Lessthan import Lessthan
from Max import Max
from Min import Min
from Module import Module
from Multiply import Multiply
from Negate import Negate
from Not import Not
from Odd import Odd
from One import One
from Positive import Positive
from Quadruple import Quadruple
from Sign import Sign
from Sub import Sub
from Sum import Sum
from Two import Two
from Zero import Zero
__all__ = ['Abs', 'And', 'Average', 'Ceil', 'Cube', 'Divide', 'Double', 'Equal', 'Even', 'Floor', 'Greaterorequal', 'Greaterthan', 'Half', 'If', 'Increment', 'Lessorequal', 'Lessthan', 'Max', 'Min', 'Module', 'Multiply', 'Negate', 'Not', 'Odd', 'One', 'Positive', 'Quadruple', 'Sign', 'Sub', 'Sum', 'Two', 'Zero']
|
gcobos/rft
|
app/primitives/__init__.py
|
Python
|
agpl-3.0
| 1,163 | 0.029235 |
#!/usr/bin/env python
# Copyright (C) 2014-2017 Shea G Craig
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""misc_endpoints.py
Classes representing API endpoints that don't subclass JSSObject
"""
from __future__ import print_function
from __future__ import absolute_import
import mimetypes
import os
import sys
from xml.etree import ElementTree
from .exceptions import MethodNotAllowedError, PostError
from .tools import error_handler
__all__ = ('CommandFlush', 'FileUpload', 'LogFlush')
# Map Python 2 basestring type for Python 3.
if sys.version_info.major == 3:
basestring = str
# pylint: disable=missing-docstring
# pylint: disable=too-few-public-methods
class CommandFlush(object):
_endpoint_path = "commandflush"
can_get = False
can_put = False
can_post = False
def __init__(self, jss):
"""Initialize a new CommandFlush
Args:
jss: JSS object.
"""
self.jss = jss
@property
def url(self):
"""Return the path subcomponent of the url to this object."""
return self._endpoint_path
def command_flush_with_xml(self, data):
"""Flush commands for devices with a supplied xml string.
From the Casper API docs:
Status and devices specified in an XML file. Id lists may be
specified for <computers>, <computer_groups>, <mobile_devices>,
<mobile_device_groups>. Sample file:
<commandflush>
<status>Pending+Failed</status>
<mobile_devices>
<mobile_device>
<id>1</id>
</mobile_device>
<mobile_device>
<id>2</id>
</mobile_device>
</mobile_devices>
</commandflush>
Args:
data (string): XML string following the above structure or
an ElementTree/Element.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not isinstance(data, basestring):
data = ElementTree.tostring(data, encoding='UTF-8')
self.jss.delete(self.url, data)
def command_flush_for(self, id_type, command_id, status):
"""Flush commands for an individual device.
Args:
id_type (str): One of 'computers', 'computergroups',
'mobiledevices', or 'mobiledevicegroups'.
id_value (str, int, list): ID value(s) for the devices to
flush. More than one device should be passed as IDs
in a list or tuple.
status (str): One of 'Pending', 'Failed', 'Pending+Failed'.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
id_types = ('computers', 'computergroups', 'mobiledevices',
'mobiledevicegroups')
status_types = ('Pending', 'Failed', 'Pending+Failed')
if id_type not in id_types or status not in status_types:
raise ValueError("Invalid arguments.")
if isinstance(command_id, list):
command_id = ",".join(str(item) for item in command_id)
flush_url = "{}/{}/id/{}/status/{}".format(
self.url, id_type, command_id, status)
self.jss.delete(flush_url)
# pylint: disable=too-few-public-methods
class FileUpload(object):
"""FileUploads are a special case in the API. They allow you to add
file resources to a number of objects on the JSS.
To use, instantiate a new FileUpload object, then use the save()
method to upload.
Once the upload has been posted you may only interact with it
through the web interface. You cannot list/get it or delete it
through the API.
However, you can reuse the FileUpload object if you wish, by
changing the parameters, and issuing another save().
"""
_endpoint_path = "fileuploads"
allowed_kwargs = ('subset',)
def __init__(self, j, resource_type, id_type, _id, resource):
"""Prepare a new FileUpload.
Args:
j: A JSS object to POST the upload to.
resource_type:
String. Acceptable Values:
Attachments:
computers
mobiledevices
enrollmentprofiles
peripherals
mobiledeviceenrollmentprofiles
Icons:
policies
ebooks
mobiledeviceapplicationsicon
Mobile Device Application:
mobiledeviceapplicationsipa
Disk Encryption
diskencryptionconfigurations
diskencryptions (synonymous)
PPD
printers
id_type:
String of desired ID type:
id
name
_id: Int or String referencing the identity value of the
resource to add the FileUpload to.
resource: String path to the file to upload.
"""
resource_types = ["computers", "mobiledevices", "enrollmentprofiles",
"peripherals", "mobiledeviceenrollmentprofiles",
"policies", "ebooks", "mobiledeviceapplicationsicon",
"mobiledeviceapplicationsipa",
"diskencryptionconfigurations", "printers"]
id_types = ["id", "name"]
self.jss = j
# Do some basic error checking on parameters.
if resource_type in resource_types:
self.resource_type = resource_type
else:
raise TypeError(
"resource_type must be one of: %s" % ', '.join(resource_types))
if id_type in id_types:
self.id_type = id_type
else:
raise TypeError("id_type must be one of: %s" % ', '.join(id_types))
self._id = str(_id)
basename = os.path.basename(resource)
content_type = mimetypes.guess_type(basename)[0]
self.resource = {"name": (basename, open(resource, "rb"),
content_type)}
self._set_upload_url()
def _set_upload_url(self):
"""Generate the full URL for a POST."""
# pylint: disable=protected-access
self._upload_url = "/".join([
self.jss._url, self._endpoint_path, self.resource_type,
self.id_type, str(self._id)])
# pylint: enable=protected-access
def save(self):
"""POST the object to the JSS."""
try:
response = self.jss.session.post(
self._upload_url, files=self.resource)
except PostError as error:
if error.status_code == 409:
raise PostError(error)
else:
raise MethodNotAllowedError(self.__class__.__name__)
if response.status_code == 201:
if self.jss.verbose:
print("POST: Success")
print(response.content)
elif response.status_code >= 400:
error_handler(PostError, response)
class LogFlush(object):
_endpoint_path = "logflush"
def __init__(self, jss):
"""Initialize a new LogFlush
Args:
jss: JSS object.
"""
self.jss = jss
@property
def url(self):
"""Return the path subcomponent of the url to this object."""
return self._endpoint_path
def log_flush_with_xml(self, data):
"""Flush logs for devices with a supplied xml string.
From the Casper API docs:
log, log_id, interval, and devices specified in an XML file.
Sample file:
<logflush>
<log>policy</log>
<log_id>2</log_id>
<interval>THREE MONTHS</interval>
<computers>
<computer>
<id>1</id>
</computer>
<computer>
<id>2</id>
</computer>
</computers>
</logflush>
Args:
data (string): XML string following the above structure or
an ElementTree/Element.
Elements:
logflush (root)
log (Unknown; "policy" is the only one listed in
docs).
log_id: Log ID value.
interval: Combination of "Zero", "One", "Two",
"Three", "Six", and "Day", "Week", "Month",
"Year". e.g. ("Three+Months")
Please note: The documentation for this
specifies the singular form (e.g. "Month"),
and plural ("Months") at different times, and
further the construction is listed as
"THREE MONTHS" elsewhere. Limited testing
indicates that pluralization does not matter,
nor does capitalization. The "+" seems optional
as well.
Please test!
Device Arrays:
Again, acceptable values are not listed in the
docs, aside from the example ("computers").
Presumably "mobiledevices", and possibly
"computergroups" and "mobiledevicegroups" work.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not isinstance(data, basestring):
data = ElementTree.tostring(data, encoding='UTF-8')
self.jss.delete(self.url, data)
def log_flush_for_interval(self, log_type, interval):
"""Flush logs for an interval of time.
Args:
log_type (str): Only documented type is "policies". This
will be applied by default if nothing is passed.
interval (str): Combination of "Zero", "One", "Two",
"Three", "Six", and "Day", "Week", "Month", "Year". e.g.
("Three+Months") Please note: The documentation for this
specifies the singular form (e.g. "Month"), and plural
("Months") at different times, and further the
construction is listed as "THREE MONTHS" elsewhere.
Limited testing indicates that pluralization does not
matter, nor does capitalization.
Please test!
No validation is performed on this prior to the request
being made.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not log_type:
log_type = "policies"
# The XML for the /logflush basic endpoint allows spaces
# instead of "+", so do a replace here just in case.
interval = interval.replace(" ", "+")
flush_url = "{}/{}/interval/{}".format(
self.url, log_type, interval)
self.jss.delete(flush_url)
def log_flush_for_obj_for_interval(self, log_type, obj_id, interval):
"""Flush logs for an interval of time for a specific object.
Please note, log_type is a variable according to the API docs,
but acceptable values are not listed. Only "policies" is
demonstrated as an acceptable value.
Args:
log_type (str): Only documented type is "policies". This
will be applied by default if nothing is passed.
obj_id (str or int): ID of the object to have logs flushed.
interval (str): Combination of "Zero", "One", "Two",
"Three", "Six", and "Day", "Week", "Month", "Year". e.g.
("Three+Months") Please note: The documentation for this
specifies the singular form (e.g. "Month"), and plural
("Months") at different times, and further the
construction is listed as "THREE MONTHS" elsewhere.
Limited testing indicates that pluralization does not
matter, nor does capitalization.
Please test!
No validation is performed on this prior to the request
being made.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not log_type:
log_type = "policies"
# The XML for the /logflush basic endpoint allows spaces
# instead of "+", so do a replace here just in case.
interval = interval.replace(" ", "+")
flush_url = "{}/{}/id/{}/interval/{}".format(
self.url, log_type, obj_id, interval)
self.jss.delete(flush_url)
# pylint: enable=missing-docstring
# pylint: enable=too-few-public-methods
|
sheagcraig/python-jss
|
jss/misc_endpoints.py
|
Python
|
gpl-3.0
| 13,525 | 0.000074 |
from landscape.client.tests.helpers import LandscapeTest
from landscape.client.patch import UpgradeManager
from landscape.client.upgraders import monitor
class TestMonitorUpgraders(LandscapeTest):
def test_monitor_upgrade_manager(self):
self.assertEqual(type(monitor.upgrade_manager), UpgradeManager)
|
CanonicalLtd/landscape-client
|
landscape/client/upgraders/tests/test_monitor.py
|
Python
|
gpl-2.0
| 317 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
##############################################################################
# Configuration parameters for Google App Engine
##############################################################################
KEEP_CACHED = False # request a dummy url every 10secs to force caching app
LOG_STATS = False # web2py level log statistics
APPSTATS = True # GAE level usage statistics and profiling
DEBUG = False # debug mode
AUTO_RETRY = True # force gae to retry commit on failure
#
# Read more about APPSTATS here
# http://googleappengine.blogspot.com/2010/03/easy-performance-profiling-with.html
# can be accessed from:
# http://localhost:8080/_ah/stats
##############################################################################
# All tricks in this file developed by Robin Bhattacharyya
##############################################################################
import time
import os
import sys
import logging
import cPickle
import pickle
import wsgiref.handlers
import datetime
path = os.path.dirname(os.path.abspath(__file__))
sys.path = [path]+[p for p in sys.path if not p==path]
sys.modules['cPickle'] = sys.modules['pickle']
from gluon.settings import global_settings
from google.appengine.api.labs import taskqueue
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
global_settings.web2py_runtime_gae = True
global_settings.db_sessions = True
if os.environ.get('SERVER_SOFTWARE', '').startswith('Devel'):
(global_settings.web2py_runtime, DEBUG) = \
('gae:development', True)
else:
(global_settings.web2py_runtime, DEBUG) = \
('gae:production', False)
import gluon.main
def log_stats(fun):
"""Function that will act as a decorator to make logging"""
def newfun(env, res):
"""Log the execution time of the passed function"""
timer = lambda t: (t.time(), t.clock())
(t0, c0) = timer(time)
executed_function = fun(env, res)
(t1, c1) = timer(time)
log_info = """**** Request: %.2fms/%.2fms (real time/cpu time)"""
log_info = log_info % ((t1 - t0) * 1000, (c1 - c0) * 1000)
logging.info(log_info)
return executed_function
return newfun
logging.basicConfig(level=logging.INFO)
def wsgiapp(env, res):
"""Return the wsgiapp"""
if env['PATH_INFO'] == '/_ah/queue/default':
if KEEP_CACHED:
delta = datetime.timedelta(seconds=10)
taskqueue.add(eta=datetime.datetime.now() + delta)
res('200 OK',[('Content-Type','text/plain')])
return ['']
env['PATH_INFO'] = env['PATH_INFO'].encode('utf8')
return gluon.main.wsgibase(env, res)
if LOG_STATS or DEBUG:
wsgiapp = log_stats(wsgiapp)
if AUTO_RETRY:
from gluon.contrib.gae_retry import autoretry_datastore_timeouts
autoretry_datastore_timeouts()
def main():
"""Run the wsgi app"""
if APPSTATS:
run_wsgi_app(wsgiapp)
else:
wsgiref.handlers.CGIHandler().run(wsgiapp)
if __name__ == '__main__':
main()
|
stryder199/RyarkAssignments
|
Assignment2/web2py/gaehandler.py
|
Python
|
mit
| 3,279 | 0.005489 |
from typing import (Tuple,
List)
import matplotlib
# More info at
# http://matplotlib.org/faq/usage_faq.html#what-is-a-backend for details
# TODO: use this: https://stackoverflow.com/a/37605654/7851470
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.axes import Axes
import numpy as np
import pandas as pd
from .utils import to_cartesian_from_equatorial
# Kinematic properties of the thin disk taken from the paper of
# N.Rowell and N.C.Hambly (mean motions are relative to the Sun):
# "White dwarfs in the SuperCOSMOS Sky Survey: the thin disc,
# thick disc and spheroid luminosity functions"
# Mon. Not. R. Astron. Soc. 417, 93–113 (2011)
# doi:10.1111/j.1365-2966.2011.18976.x
AVERAGE_POPULATION_VELOCITY_U = -8.62
AVERAGE_POPULATION_VELOCITY_V = -20.04
AVERAGE_POPULATION_VELOCITY_W = -7.1
STD_POPULATION_U = 32.4
STD_POPULATION_V = 23
STD_POPULATION_W = 18.1
def plot(stars: pd.DataFrame,
*,
filename: str = 'velocity_clouds.ps',
figure_size: Tuple[float, float] = (8, 12),
spacing: float = 0.25,
u_label: str = '$U(km/s)$',
v_label: str = '$V(km/s)$',
w_label: str = '$W(km/s)$',
u_limits: Tuple[float, float] = (-150, 150),
v_limits: Tuple[float, float] = (-150, 150),
w_limits: Tuple[float, float] = (-150, 150)) -> None:
figure, (uv_subplot,
uw_subplot,
vw_subplot) = plt.subplots(nrows=3,
figsize=figure_size)
draw_subplot(subplot=uv_subplot,
xlabel=u_label,
ylabel=v_label,
xlim=u_limits,
ylim=v_limits,
x=stars['u_velocity'],
y=stars['v_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_V,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_V)
draw_subplot(subplot=uw_subplot,
xlabel=u_label,
ylabel=w_label,
xlim=u_limits,
ylim=w_limits,
x=stars['u_velocity'],
y=stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_W)
draw_subplot(subplot=vw_subplot,
xlabel=v_label,
ylabel=w_label,
xlim=v_limits,
ylim=w_limits,
x=stars['v_velocity'],
y=stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_V,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_V,
y_std=STD_POPULATION_W)
figure.subplots_adjust(hspace=spacing)
plt.savefig(filename)
def plot_lepine_case(stars: pd.DataFrame,
*,
filename: str = 'velocity_clouds.ps',
figure_size: Tuple[float, float] = (8, 12),
spacing: float = 0.25,
u_label: str = '$U(km/s)$',
v_label: str = '$V(km/s)$',
w_label: str = '$W(km/s)$',
u_limits: Tuple[float, float] = (-150, 150),
v_limits: Tuple[float, float] = (-150, 150),
w_limits: Tuple[float, float] = (-150, 150)) -> None:
x_coordinates, y_coordinates, z_coordinates = to_cartesian_from_equatorial(
stars)
highest_coordinates = np.maximum.reduce([np.abs(x_coordinates),
np.abs(y_coordinates),
np.abs(z_coordinates)])
uv_cloud_stars = stars[(highest_coordinates == z_coordinates)]
uw_cloud_stars = stars[(highest_coordinates == y_coordinates)]
vw_cloud_stars = stars[(highest_coordinates == x_coordinates)]
figure, (uv_subplot,
uw_subplot,
vw_subplot) = plt.subplots(nrows=3,
figsize=figure_size)
draw_subplot(subplot=uv_subplot,
xlabel=u_label,
ylabel=v_label,
xlim=u_limits,
ylim=v_limits,
x=uv_cloud_stars['u_velocity'],
y=uv_cloud_stars['v_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_V,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_V)
draw_subplot(subplot=uw_subplot,
xlabel=u_label,
ylabel=w_label,
xlim=u_limits,
ylim=w_limits,
x=uw_cloud_stars['u_velocity'],
y=uw_cloud_stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_U,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_U,
y_std=STD_POPULATION_W)
draw_subplot(subplot=vw_subplot,
xlabel=v_label,
ylabel=w_label,
xlim=v_limits,
ylim=w_limits,
x=vw_cloud_stars['v_velocity'],
y=vw_cloud_stars['w_velocity'],
x_avg=AVERAGE_POPULATION_VELOCITY_V,
y_avg=AVERAGE_POPULATION_VELOCITY_W,
x_std=STD_POPULATION_V,
y_std=STD_POPULATION_W)
figure.subplots_adjust(hspace=spacing)
plt.savefig(filename)
def draw_subplot(*,
subplot: Axes,
xlabel: str,
ylabel: str,
xlim: Tuple[float, float],
ylim: Tuple[float, float],
x: List[float],
y: List[float],
cloud_color: str = 'k',
point_size: float = 0.5,
x_avg: float,
y_avg: float,
x_std: float,
y_std: float,
ratio: float = 10 / 13) -> None:
subplot.set(xlabel=xlabel,
ylabel=ylabel,
xlim=xlim,
ylim=ylim)
subplot.scatter(x=x,
y=y,
color=cloud_color,
s=point_size)
plot_ellipses(subplot=subplot,
x_avg=x_avg,
y_avg=y_avg,
x_std=x_std,
y_std=y_std)
subplot.minorticks_on()
subplot.xaxis.set_ticks_position('both')
subplot.yaxis.set_ticks_position('both')
subplot.set_aspect(ratio / subplot.get_data_ratio())
def plot_ellipses(subplot: Axes,
x_avg: float,
y_avg: float,
x_std: float,
y_std: float,
ellipse_color: str = 'b') -> None:
std_ellipse = Ellipse(xy=(x_avg, y_avg),
width=x_std * 2,
height=y_std * 2,
fill=False,
edgecolor=ellipse_color,
linestyle='dashed')
double_std_ellipse = Ellipse(xy=(x_avg, y_avg),
width=x_std * 4,
height=y_std * 4,
fill=False,
edgecolor=ellipse_color)
subplot.add_artist(std_ellipse)
subplot.add_artist(double_std_ellipse)
|
wolvespack/alcor
|
alcor/services/plots/velocity_clouds.py
|
Python
|
mit
| 7,567 | 0.000793 |
#!/usr/bin/python
from typing import List, Optional
"""
16. 3Sum Closest
https://leetcode.com/problems/3sum-closest/
"""
def bsearch(nums, left, right, res, i, j, target):
while left <= right:
middle = (left + right) // 2
candidate = nums[i] + nums[j] + nums[middle]
if res is None or abs(candidate - target) < abs(res - target):
res = candidate
if candidate == target:
return res
elif candidate > target:
right = middle - 1
else:
left = middle + 1
return res
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> Optional[int]:
res = None
nums = sorted(nums)
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
res = bsearch(nums, j + 1, len(nums) - 1, res, i, j, target)
return res
def main():
sol = Solution()
print(sol.threeSumClosest([-111, -111, 3, 6, 7, 16, 17, 18, 19], 13))
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
pisskidney/leetcode
|
medium/16.py
|
Python
|
mit
| 1,070 | 0 |
"""Generate test data for IDTxl network comparison unit and system tests.
Generate test data for IDTxl network comparison unit and system tests. Simulate
discrete and continous data from three correlated Gaussian data sets. Perform
network inference using bivariate/multivariate mutual information (MI)/transfer
entropy (TE) analysis. Results are saved used for unit and system testing of
network comparison (systemtest_network_comparison.py).
A coupling is simulated as a lagged, linear correlation between three Gaussian
variables and looks like this:
1 -> 2 -> 3 with a delay of 1 sample for each coupling
"""
import pickle
import numpy as np
from idtxl.multivariate_te import MultivariateTE
from idtxl.bivariate_te import BivariateTE
from idtxl.multivariate_mi import MultivariateMI
from idtxl.bivariate_mi import BivariateMI
from idtxl.estimators_jidt import JidtDiscreteCMI
from idtxl.data import Data
# path = os.path.join(os.path.dirname(__file__) + '/data/')
path = 'data/'
def analyse_mute_te_data():
# Generate example data: the following was ran once to generate example
# data, which is now in the data sub-folder of the test-folder.
data = Data()
data.generate_mute_data(100, 5)
# analysis settings
settings = {
'cmi_estimator': 'JidtKraskovCMI',
'n_perm_max_stat': 50,
'n_perm_min_stat': 50,
'n_perm_omnibus': 200,
'n_perm_max_seq': 50,
'max_lag_target': 5,
'max_lag_sources': 5,
'min_lag_sources': 1,
'permute_in_time': True
}
# network inference for individual data sets
nw_0 = MultivariateTE()
res_0 = nw_0.analyse_network(
settings, data, targets=[0, 1], sources='all')
pickle.dump(res_0, open(path + 'mute_results_0.p', 'wb'))
res_1 = nw_0.analyse_network(
settings, data, targets=[1, 2], sources='all')
pickle.dump(res_1, open(path + 'mute_results_1.p', 'wb'))
res_2 = nw_0.analyse_network(
settings, data, targets=[0, 2], sources='all')
pickle.dump(res_2, open(path + 'mute_results_2.p', 'wb'))
res_3 = nw_0.analyse_network(
settings, data, targets=[0, 1, 2], sources='all')
pickle.dump(res_3, open(path + 'mute_results_3.p', 'wb'))
res_4 = nw_0.analyse_network(
settings, data, targets=[1, 2], sources='all')
pickle.dump(res_4, open(path + 'mute_results_4.p', 'wb'))
res_5 = nw_0.analyse_network(settings, data)
pickle.dump(res_5, open(path + 'mute_results_full.p', 'wb'))
def generate_discrete_data(n_replications=1):
"""Generate Gaussian test data: 1 -> 2 -> 3, delay 1."""
d = generate_gauss_data(n_replications=n_replications, discrete=True)
data = Data(d, dim_order='psr', normalise=False)
return data
def generate_continuous_data(n_replications=1):
"""Generate Gaussian test data: 1 -> 2 -> 3, delay 1."""
d = generate_gauss_data(n_replications=n_replications, discrete=False)
data = Data(d, dim_order='psr', normalise=True)
return data
def generate_gauss_data(n_replications=1, discrete=False):
settings = {'discretise_method': 'equal',
'n_discrete_bins': 5}
est = JidtDiscreteCMI(settings)
covariance_1 = 0.4
covariance_2 = 0.3
n = 10000
delay = 1
if discrete:
d = np.zeros((3, n - 2*delay, n_replications), dtype=int)
else:
d = np.zeros((3, n - 2*delay, n_replications))
for r in range(n_replications):
proc_1 = np.random.normal(0, 1, size=n)
proc_2 = (covariance_1 * proc_1 + (1 - covariance_1) *
np.random.normal(0, 1, size=n))
proc_3 = (covariance_2 * proc_2 + (1 - covariance_2) *
np.random.normal(0, 1, size=n))
proc_1 = proc_1[(2*delay):]
proc_2 = proc_2[delay:-delay]
proc_3 = proc_3[:-(2*delay)]
if discrete: # discretise data
proc_1_dis, proc_2_dis = est._discretise_vars(
var1=proc_1, var2=proc_2)
proc_1_dis, proc_3_dis = est._discretise_vars(
var1=proc_1, var2=proc_3)
d[0, :, r] = proc_1_dis
d[1, :, r] = proc_2_dis
d[2, :, r] = proc_3_dis
else:
d[0, :, r] = proc_1
d[1, :, r] = proc_2
d[2, :, r] = proc_3
return d
def analyse_discrete_data():
"""Run network inference on discrete data."""
data = generate_discrete_data()
settings = {
'cmi_estimator': 'JidtDiscreteCMI',
'discretise_method': 'none',
'n_discrete_bins': 5, # alphabet size of the variables analysed
'min_lag_sources': 1,
'max_lag_sources': 3,
'max_lag_target': 1}
nw = MultivariateTE()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_mte_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = BivariateTE()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_bte_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = MultivariateMI()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_mmi_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
nw = BivariateMI()
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}discrete_results_bmi_{1}.p'.format(
path, settings['cmi_estimator']), 'wb'))
def analyse_continuous_data():
"""Run network inference on continuous data."""
data = generate_continuous_data()
settings = {
'min_lag_sources': 1,
'max_lag_sources': 3,
'max_lag_target': 1}
nw = MultivariateTE()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_mte_{1}.p'.format(
path, estimator), 'wb'))
nw = BivariateTE()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_bte_{1}.p'.format(
path, estimator), 'wb'))
nw = MultivariateMI()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_mmi_{1}.p'.format(
path, estimator), 'wb'))
nw = BivariateMI()
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
settings['cmi_estimator'] = estimator
res = nw.analyse_network(settings=settings, data=data)
pickle.dump(res, open('{0}continuous_results_bmi_{1}.p'.format(
path, estimator), 'wb'))
def assert_results():
for algo in ['mmi', 'mte', 'bmi', 'bte']:
# Test continuous data:
for estimator in ['JidtGaussianCMI', 'JidtKraskovCMI']:
res = pickle.load(open(
'data/continuous_results_{0}_{1}.p'.format(
algo, estimator), 'rb'))
print('\nInference algorithm: {0} (estimator: {1})'.format(
algo, estimator))
_print_result(res)
# Test discrete data:
estimator = 'JidtDiscreteCMI'
res = pickle.load(open(
'data/discrete_results_{0}_{1}.p'.format(
algo, estimator), 'rb'))
print('\nInference algorithm: {0} (estimator: {1})'.format(
algo, estimator))
_print_result(res)
def _print_result(res):
res.adjacency_matrix.print_matrix()
tp = 0
fp = 0
if res.adjacency_matrix._edge_matrix[0, 1] == True: tp += 1
if res.adjacency_matrix._edge_matrix[1, 2] == True: tp += 1
if res.adjacency_matrix._edge_matrix[0, 2] == True: fp += 1
fn = 2 - tp
print('TP: {0}, FP: {1}, FN: {2}'.format(tp, fp, fn))
if __name__ == '__main__':
analyse_discrete_data()
analyse_mute_te_data()
analyse_continuous_data()
assert_results()
|
pwollstadt/IDTxl
|
test/generate_test_data.py
|
Python
|
gpl-3.0
| 8,187 | 0.000733 |
import sublime
from . import SblmCmmnFnctns
class Spinner:
SYMBOLS_ROW = u'←↑→↓'
SYMBOLS_BOX = u'⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'
def __init__(self, symbols, view, startStr, endStr):
self.symbols = symbols
self.length = len(symbols)
self.position = 0
self.stopFlag = False
self.view = view
self.startStr = startStr
self.endStr = endStr
def __next__(self):
self.position = self.position + 1
return self.startStr + self.symbols[self.position % self.length] + self.endStr
def start(self):
if not self.stopFlag:
self.view.set_status(SblmCmmnFnctns.SUBLIME_STATUS_SPINNER, self.__next__())
sublime.set_timeout(lambda: self.start(), 300)
def stop(self):
self.view.erase_status(SblmCmmnFnctns.SUBLIME_STATUS_SPINNER)
self.stopFlag = True
|
rusiv/BSScript
|
bsscript/bsscriptSblm/Spinner.py
|
Python
|
mit
| 784 | 0.034392 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: David Versmisse <david.versmisse@itaapy.com>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the Standard Library
from sys import stdout
# Import from docutils
from docutils import nodes
from docutils.core import publish_doctree
# Import from imaging
from PIL import Image
# Import from lpod
from document import odf_new_document_from_type
from frame import odf_create_image_frame, odf_create_text_frame
from heading import odf_create_heading
from link import odf_create_link
from list import odf_create_list, odf_create_list_item
from note import odf_create_note
from paragraph import odf_create_paragraph, odf_create_line_break
from paragraph import odf_create_undividable_space
from span import odf_create_span
from scriptutils import printwarn
from style import odf_create_style
from table import odf_create_cell, odf_create_table, odf_create_row
from table import odf_create_column, odf_create_header_rows
from toc import odf_create_toc
DPI = 72
def convert_text(node, context):
context["top"].append(node.astext())
def convert_section(node, context):
# Inc the heading level
context["heading-level"] += 1
# Reset the top to body
context["top"] = context["body"]
# Convert
for child in node:
convert_node(child, context)
# Restore the heading level
context["heading-level"] -= 1
def convert_title(node, context):
level = context["heading-level"]
if level == 0:
# The document did not start with a section
level = 1
heading = odf_create_heading(level, node.astext(),
style='Heading_20_%s' % level)
context["body"].append(heading)
def convert_paragraph(node, context):
# Search for a default style
style = context['styles'].get('paragraph')
paragraph = odf_create_paragraph(style=style)
context["top"].append(paragraph)
# Save the current top
old_top = context["top"]
# Convert
context["top"] = paragraph
for child in node:
convert_node(child, context)
# And restore the top
context["top"] = old_top
def convert_list(node, context, list_type):
# Reuse template styles
if list_type == "enumerated":
style_name = "Numbering_20_1"
else:
style_name = "List_20_1"
odf_list = odf_create_list(style=style_name)
context["top"].append(odf_list)
# Save the current top
old_top = context["top"]
for item in node:
if item.tagname != "list_item":
printwarn("node not supported: %s" % item.tagname)
continue
# Create a new item
odf_item = odf_create_list_item()
odf_list.append(odf_item)
# A new top
context["top"] = odf_item
for child in item:
convert_node(child, context)
# And restore the top
context["top"] = old_top
def convert_list_enumerated(node, context):
return convert_list(node, context, "enumerated")
def convert_list_bullet(node, context):
return convert_list(node, context, "bullet")
def convert_topic(node, context):
# Reset the top to body
context["top"] = context["body"]
# Yet an other TOC ?
if context["skip_toc"]:
return
if context["toc"] is not None:
printwarn("a TOC is already inserted")
return
title = node.next_node(condition=nodes.title).astext()
toc = odf_create_toc(title=title)
context["body"].append(toc)
context["toc"] = toc
def convert_footnote(node, context):
# XXX ids is a list ??
refid = node.get("ids")[0]
# Find the footnote
footnotes = context["footnotes"]
if refid not in footnotes:
printwarn('unknown footnote "%s"' % refid)
return
footnote_body = footnotes[refid].get_element("text:note-body")
# Save the current top
old_top = context["top"]
# Fill the note
context["top"] = footnote_body
for child in node:
# We skip the label (already added)
if child.tagname == "label":
continue
convert_node(child, context)
# And restore the top
context["top"] = old_top
def convert_footnote_reference(node, context):
refid = node.get("refid")
citation = node.astext()
footnote = odf_create_note(note_id=refid, citation=citation)
context["top"].append(footnote)
context["footnotes"][refid] = footnote
def _convert_style_like(node, context, style_name):
# Create the span
span = odf_create_span(style=style_name)
context["top"].append(span)
# Save the current top
old_top = context["top"]
# Convert
context["top"] = span
for child in node:
convert_node(child, context)
# And restore the top
context["top"] = old_top
def _get_emphasis_style(context):
styles = context['styles']
emphasis_style = styles.get('emphasis')
if emphasis_style is not None:
return emphasis_style
emphasis_style = odf_create_style("text", italic=True)
context['doc'].insert_style(emphasis_style, automatic=True)
styles['emphasis'] = emphasis_style
return emphasis_style
def convert_emphasis(node, context):
emphasis_style = _get_emphasis_style(context).get_style_name()
# Convert
_convert_style_like(node, context, emphasis_style)
def _get_strong_style(context):
styles = context['styles']
strong_style = styles.get('strong')
if strong_style is not None:
return strong_style
strong_style = odf_create_style("text", bold=True)
context['doc'].insert_style(strong_style, automatic=True)
styles['strong'] = strong_style
return strong_style
def convert_strong(node, context):
strong_style = _get_strong_style(context).get_style_name()
# Convert
_convert_style_like(node, context, strong_style)
def convert_literal(node, context):
# Convert
_convert_style_like(node, context, "Example")
def convert_literal_block(node, context):
paragraph = odf_create_paragraph(style="Preformatted_20_Text")
context["top"].append(paragraph)
# Convert
for child in node:
# Only text
if child.tagname != "#text":
printwarn('node "%s" not supported in literal block' % (
child.tagname))
continue
text = child.astext()
tmp = []
spaces = 0
for c in text:
if c == '\n':
if tmp:
tmp = u"".join(tmp)
paragraph.append(tmp)
tmp = []
spaces = 0
paragraph.append(odf_create_line_break())
elif c == '\r':
continue
elif c == ' ':
spaces += 1
elif c == '\t':
# Tab = 4 spaces
spaces += 4
else:
if spaces >= 2:
if tmp:
tmp = u"".join(tmp)
paragraph.append(tmp)
tmp = []
paragraph.append(
odf_create_undividable_space(spaces))
spaces = 0
elif spaces == 1:
tmp.append(' ')
spaces = 0
tmp.append(c)
if tmp:
tmp = u"".join(tmp)
paragraph.append(tmp)
def convert_reference(node, context):
refuri = node.get("refuri")
text = node.astext()
link = odf_create_link(refuri)
link.set_text(text)
context["top"].append(link)
def _get_term_style(context):
styles = context['styles']
term_style = styles.get('term')
if term_style is not None:
return term_style
# Reuse template style if any
doc = context['doc']
term_style = doc.get_style('paragraph',
u"Definition_20_List_20_Term")
if term_style is None:
# Create default one
term_style = odf_create_style('paragraph',
name=u"Definition_20_List_20_Term",
display_name=u"Definition List Term", parent="Standard",
font_weight=u"bold", area='text')
doc.insert_style(term_style, automatic=False)
styles['term'] = term_style
return term_style
def _get_definition_style(context):
styles = context['styles']
definition_style = styles.get('definition')
if definition_style is not None:
return definition_style
# Reuse template style if any
doc = context['doc']
definition_style = doc.get_style('paragraph',
u"Definition_20_List_20_Definition")
if definition_style is None:
# Create default one
definition_style = odf_create_style('paragraph',
name=u"Definition_20_List_20_Definition",
display_name=u"Definition List Definition",
parent="Standard", margin_left=u"0.5cm",
margin_right=u"0cm", text_indent=u"0cm",
**{'style:auto-text-indent': u"false"})
doc.insert_style(definition_style, automatic=False)
styles['definition'] = definition_style
return definition_style
def convert_definition_list(node, context):
"""Convert a list of term/definition pairs to styled paragraphs.
The "Definition List Term" style is looked for term paragraphs, and the
"Definition List Definition" style is looked for definition paragraphs.
"""
styles = context['styles']
term_style = _get_term_style(context).get_style_name()
definition_style = _get_definition_style(context).get_style_name()
for item in node:
if item.tagname != "definition_list_item":
printwarn('node "%s" not supported in definition_list' % (
item.tagname))
continue
for child in item:
tagname = child.tagname
if tagname == "term":
paragraph = odf_create_paragraph(text=child.astext(),
style=term_style)
context["top"].append(paragraph)
elif tagname == "definition":
# Push a style on the stack for next paragraphs to use
styles['paragraph'] = definition_style
for subchildren in child:
convert_node(subchildren, context)
# Pop the paragraph style
del styles['paragraph']
else:
printwarn('node "%s" not supported in definition_list_item' %
tagname)
def convert_block_quote(node, context):
# TODO Add the style
for child in node:
convert_node(child, context)
def _get_caption_style(context):
styles = context['styles']
caption_style = styles.get('caption')
if caption_style is not None:
return caption_style
caption_style = odf_create_style('graphic', parent=u"Frame",
**{'style:wrap': u"none", 'style:vertical-pos': u"top",
'style:vertical-rel': u"paragraph-content",
'style:horizontal-pos': u"center",
'style:horizontal-rel': u"paragraph-content",
'fo:padding': u"0.25cm", 'fo:border': u"0cm solid #000000"})
context['doc'].insert_style(caption_style, automatic=True)
styles['caption'] = caption_style
return caption_style
def _get_image_style(context):
styles = context['styles']
image_style = styles.get('image')
if image_style is not None:
return image_style
image_style = odf_create_style('graphic', parent="Graphics",
**{'style:horizontal-pos': u"center",
'style:horizontal-rel': u"paragraph"})
context['doc'].insert_style(image_style, automatic=True)
styles['image'] = image_style
return image_style
def _add_image(image, caption, context, width=None, height=None):
# Load the image to find its size
encoding = stdout.encoding if stdout.encoding is not None else "utf-8"
try:
image_file = open(image.encode(encoding), 'rb')
image_object = Image.open(image_file)
except (UnicodeEncodeError, IOError, OverflowError), e:
printwarn('unable to insert the image "%s": %s' % (image, e))
return
size = image_object.size
# Convert pixels to inches
if width:
try:
width = int(width.replace('px', ''))
except ValueError:
raise NotImplementedError, 'only pixel units supported'
if height:
try:
height = int(height)
except ValueError:
raise NotImplementedError, 'only pixel units supported'
else:
height = int(width / (float(size[0]) / float(size[1])))
size = (width, height)
elif height:
try:
height = int(height.replace('px', ''))
except ValueError:
raise NotImplementedError, 'only pixel units supported'
width = int(height * (float(size[0]) / float(size[1])))
size = (width, height)
size = ("%sin" % (float(size[0]) / DPI), "%sin" % (float(size[1]) / DPI))
# Add the image
local_uri = context["doc"].add_file(image)
# Frame style for the caption frame
caption_style = _get_caption_style(context).get_style_name()
# Frame style for the image frame
image_style = _get_image_style(context).get_style_name()
# In text application, image must be inserted in a paragraph
if context["top"].get_tag() == "office:text":
container = odf_create_paragraph()
context["top"].append(container)
else:
container = context["top"]
if caption:
paragraph = odf_create_paragraph()
image_frame = odf_create_image_frame(local_uri, size=size,
style=image_style)
paragraph.append(image_frame)
paragraph.append(caption)
# A new frame, we fix only the width
text_frame = odf_create_text_frame(paragraph, size=(size[0], None),
style=caption_style)
container.append(text_frame)
else:
image_frame = odf_create_image_frame(local_uri, size=size,
style=image_style)
container.append(image_frame)
def convert_image(node, context):
image = node.get("uri")
width = node.get('width')
height = node.get('height')
_add_image(image, None, context, width=width, height=height)
def convert_figure(node, context):
image = None
caption = None
width = None
height = None
for child in node:
tagname = child.tagname
if tagname == "image":
if image is not None:
printwarn("unexpected duplicate image in a figure")
continue
image = child.get("uri")
width = child.get('width')
height = child.get('height')
elif tagname == "caption":
if caption is not None:
printwarn("unexpected duplicate caption in a figure")
continue
caption = child.astext()
_add_image(image, caption, context, width=width, height=height)
def _convert_table_rows(container, node, context, cell_style=None):
for row in node:
if row.tagname != "row":
printwarn('node "%s" not supported in thead/tbody' % row.tagname)
continue
odf_row = odf_create_row()
container.append(odf_row)
for entry in row:
if entry.tagname != "entry":
printwarn('node "%s" not supported in row' % entry.tagname)
continue
# Create a new odf_cell
odf_cell = odf_create_cell(cell_type="string", style=cell_style)
odf_row.append(odf_cell)
# XXX We don't add table:covered-table-cell !
# It's bad but OO can nevertheless load the file
morecols = entry.get("morecols")
if morecols is not None:
morecols = int(morecols) + 1
odf_cell.set_attribute('table:number-columns-spanned',
str(morecols))
morerows = entry.get("morerows")
if morerows is not None:
morerows = int(morerows) + 1
odf_cell.set_attribute('table:number-rows-spanned',
str(morerows))
# Save the current top
old_top = context["top"]
# Convert
context["top"] = odf_cell
for child in entry:
convert_node(child, context)
# And restore the top
context["top"] = old_top
def _get_cell_style(context):
styles = context['styles']
cell_style = styles.get('cell')
if cell_style is not None:
return cell_style
# Give borders to cells
cell_style = odf_create_style('table-cell', u"odf_table.A1",
padding=u"0.049cm", border=u"0.002cm solid #000000")
context['doc'].insert_style(cell_style, automatic=True)
styles['cell'] = cell_style
return cell_style
def convert_table(node, context):
cell_style = _get_cell_style(context).get_style_name()
for tgroup in node:
if tgroup.tagname != "tgroup":
printwarn('node "%s" not supported in table' % tgroup.tagname)
continue
columns_number = 0
odf_table = None
for child in tgroup:
tagname = child.tagname
if tagname == "thead" or tagname == "tbody":
# Create a new table with the info columns_number
if odf_table is None:
context["tables_number"] += 1
# TODO Make it possible directly with odf_create_table
odf_table = odf_create_table(name="table%d" %
context["tables_number"])
columns = odf_create_column(repeated=columns_number)
odf_table.append(columns)
# Convert!
if tagname == "thead":
header = odf_create_header_rows()
odf_table.append(header)
_convert_table_rows(header, child, context,
cell_style=cell_style)
else:
_convert_table_rows(odf_table, child, context,
cell_style=cell_style)
elif tagname == "colspec":
columns_number += 1
else:
printwarn('node "%s" not supported in tgroup' % (
child.tagname))
continue
context["top"].append(odf_table)
convert_methods = {
'#text': convert_text,
'block_quote': convert_block_quote,
'bullet_list': convert_list_bullet,
'definition_list': convert_definition_list,
'emphasis': convert_emphasis,
'enumerated_list': convert_list_enumerated,
'figure': convert_figure,
'footnote': convert_footnote,
'footnote_reference': convert_footnote_reference,
'image': convert_image,
'literal': convert_literal,
'literal_block': convert_literal_block,
'paragraph': convert_paragraph,
'reference': convert_reference,
'section': convert_section,
'strong': convert_strong,
'table': convert_table,
'title': convert_title,
'subtitle': convert_title,
'topic': convert_topic
}
def convert_node(node, context):
tagname = node.tagname
convert_method = convert_methods.get(tagname)
if convert_method is not None:
convert_method(node, context)
else:
printwarn("node not supported: %s" % tagname)
def convert(document, doctree, heading_level=1, skip_toc=False):
"""Convert a reStructuredText source into an existing document.
If the document contains its own TOC, you can ignore others with
"skip_toc".
Arguments:
document -- odf_document
doctree -- docutils node (reST str accepted)
heading_level -- int
skip_toc -- bool
Return: odf_document
"""
# Init a context
body = document.get_body()
context = {"doc": document, "body": body, "top": body, "styles": {},
"heading-level": heading_level, "toc": None,
"skip_toc": skip_toc, "footnotes": {}, "tables_number": 0}
# Go!
if isinstance(doctree, str):
doctree = publish_doctree(doctree)
for child in doctree:
convert_node(child, context)
# Finish the work
toc = context["toc"]
if toc is not None:
toc.toc_fill()
return document
def rst2odt(rst_body, template=None, heading_level=1):
"""Convert a reStructuredText source to a new document.
The template is a document to reuse instead of the default lpOD template.
Arguments:
rst_body -- reST str (docutils node accepted)
template -- odf_document
Return: odf_document
"""
# Use an existing document structure
if template is not None:
document = template.clone()
# Clean the body
document.get_body().clear()
# Or create a new document
else:
document = odf_new_document_from_type("text")
return convert(document, rst_body, heading_level=heading_level)
|
uliss/quneiform
|
tests/py/lpod/rst2odt.py
|
Python
|
gpl-3.0
| 22,265 | 0.00265 |
"""Provides all the generic data related to the address."""
COUNTRY_CODES = {
"a2": [
"AD",
"AE",
"AF",
"AG",
"AI",
"AL",
"AM",
"AN",
"AO",
"AQ",
"AR",
"AS",
"AT",
"AU",
"AW",
"AX",
"AZ",
"BA",
"BB",
"BD",
"BE",
"BF",
"BG",
"BH",
"BI",
"BJ",
"BL",
"BM",
"BN",
"BO",
"BR",
"BS",
"BT",
"BV",
"BW",
"BY",
"BZ",
"CA",
"CC",
"CD",
"CF",
"CG",
"CH",
"CI",
"CK",
"CL",
"CM",
"CN",
"CO",
"CR",
"CU",
"CV",
"CX",
"CY",
"CZ",
"DE",
"DJ",
"DK",
"DM",
"DO",
"DZ",
"EC",
"EE",
"EG",
"EH",
"ER",
"ES",
"ET",
"FI",
"FJ",
"FK",
"FM",
"FO",
"FR",
"GA",
"GB",
"GD",
"GE",
"GF",
"GG",
"GH",
"GI",
"GL",
"GM",
"GN",
"GP",
"GQ",
"GR",
"GS",
"GT",
"GU",
"GW",
"GY",
"HK",
"HM",
"HN",
"HR",
"HT",
"HU",
"ID",
"IE",
"IL",
"IM",
"IN",
"IO",
"IQ",
"IR",
"IS",
"IT",
"JE",
"JM",
"JO",
"JP",
"KE",
"KG",
"KH",
"KI",
"KM",
"KN",
"KP",
"KR",
"KW",
"KY",
"KZ",
"LA",
"LB",
"LC",
"LI",
"LK",
"LR",
"LS",
"LT",
"LU",
"LV",
"LY",
"MA",
"MC",
"MD",
"ME",
"MF",
"MG",
"MH",
"MK",
"ML",
"MM",
"MN",
"MO",
"MP",
"MQ",
"MR",
"MS",
"MT",
"MU",
"MV",
"MW",
"MX",
"MY",
"MZ",
"NA",
"NC",
"NE",
"NF",
"NG",
"NI",
"NL",
"NO",
"NP",
"NR",
"NU",
"NZ",
"OM",
"PA",
"PE",
"PF",
"PG",
"PH",
"PK",
"PL",
"PM",
"PN",
"PR",
"PS",
"PT",
"PW",
"PY",
"QA",
"RE",
"RO",
"RS",
"RU",
"RW",
"SA",
"SB",
"SC",
"SD",
"SE",
"SG",
"SH",
"SI",
"SJ",
"SK",
"SL",
"SM",
"SN",
"SO",
"SR",
"SS",
"ST",
"SV",
"SY",
"SZ",
"TC",
"TD",
"TF",
"TG",
"TH",
"TJ",
"TK",
"TL",
"TM",
"TN",
"TO",
"TR",
"TT",
"TV",
"TW",
"TZ",
"UA",
"UG",
"UM",
"US",
"UY",
"UZ",
"VA",
"VC",
"VE",
"VG",
"VI",
"VN",
"VU",
"WF",
"WS",
"YE",
"YT",
"ZA",
"ZM",
"ZW",
],
"a3": [
"AND",
"ARE",
"AFG",
"ATG",
"AIA",
"ALB",
"ARM",
"ANT",
"AGO",
"ATA",
"ARG",
"ASM",
"AUT",
"AUS",
"ABW",
"ALA",
"AZE",
"BIH",
"BRB",
"BGD",
"BEL",
"BFA",
"BGR",
"BHR",
"BDI",
"BEN",
"BLM",
"BMU",
"BRN",
"BOL",
"BRA",
"BHS",
"BTN",
"BVT",
"BWA",
"BLR",
"BLZ",
"CAN",
"CCK",
"COD",
"CAF",
"COG",
"CHE",
"CIV",
"COK",
"CHL",
"CMR",
"CHN",
"COL",
"CRI",
"CUB",
"CPV",
"CXR",
"CYP",
"CZE",
"DEU",
"DJI",
"DNK",
"DMA",
"DOM",
"DZA",
"ECU",
"EST",
"EGY",
"ESH",
"ERI",
"ESP",
"ETH",
"FIN",
"FJI",
"FLK",
"FSM",
"FRO",
"FRA",
"GAB",
"GBR",
"GRD",
"GEO",
"GUF",
"GGY",
"GHA",
"GIB",
"GRL",
"GMB",
"GIN",
"GLP",
"GNQ",
"GRC",
"SGS",
"GTM",
"GUM",
"GNB",
"GUY",
"HKG",
"HMD",
"HND",
"HRV",
"HTI",
"HUN",
"IDN",
"IRL",
"ISR",
"IMN",
"IND",
"IOT",
"IRQ",
"IRN",
"ISL",
"ITA",
"JEY",
"JAM",
"JOR",
"JPN",
"KEN",
"KGZ",
"KHM",
"KIR",
"COM",
"KNA",
"PRK",
"KOR",
"KWT",
"CYM",
"KAZ",
"LAO",
"LBN",
"LCA",
"LIE",
"LKA",
"LBR",
"LSO",
"LTU",
"LUX",
"LVA",
"LBY",
"MAR",
"MCO",
"MDA",
"MNE",
"MAF",
"MDG",
"MHL",
"MKD",
"MLI",
"MMR",
"MNG",
"MAC",
"MNP",
"MTQ",
"MRT",
"MSR",
"MLT",
"MUS",
"MDV",
"MWI",
"MEX",
"MYS",
"MOZ",
"NAM",
"NCL",
"NER",
"NFK",
"NGA",
"NIC",
"NLD",
"NOR",
"NPL",
"NRU",
"NIU",
"NZL",
"OMN",
"PAN",
"PER",
"PYF",
"PNG",
"PHL",
"PAK",
"POL",
"SPM",
"PCN",
"PRI",
"PSE",
"PRT",
"PLW",
"PRY",
"QAT",
"REU",
"ROU",
"SRB",
"RUS",
"RWA",
"SAU",
"SLB",
"SYC",
"SDN",
"SWE",
"SGP",
"SHN",
"SVN",
"SJM",
"SVK",
"SLE",
"SMR",
"SEN",
"SOM",
"SUR",
"SSD",
"STP",
"SLV",
"SYR",
"SWZ",
"TCA",
"TCD",
"ATF",
"TGO",
"THA",
"TJK",
"TKL",
"TLS",
"TKM",
"TUN",
"TON",
"TUR",
"TTO",
"TUV",
"TWN",
"TZA",
"UKR",
"UGA",
"UMI",
"USA",
"URY",
"UZB",
"VAT",
"VCT",
"VEN",
"VGB",
"VIR",
"VNM",
"VUT",
"WLF",
"WSM",
"YEM",
"MYT",
"ZAF",
"ZMB",
"ZWE",
],
"fifa": [
"AFG",
"AIA",
"ALB",
"ALG",
"AND",
"ANG",
"ARG",
"ARM",
"ARU",
"ARU",
"ASA",
"ATG",
"AUT",
"AZE",
"BAH",
"BAN",
"BDI",
"BEL",
"BEN",
"BER",
"BFA",
"BHR",
"BHU",
"BIH",
"BLR",
"BLZ",
"BOE",
"BOL",
"BOT",
"BRA",
"BRB",
"BRU",
"BUL",
"CAM",
"CAN",
"CAY",
"CGO",
"CHA",
"CHI",
"CHN",
"CIV",
"CMR",
"COD",
"COK",
"COL",
"COM",
"CPV",
"CRC",
"CRO",
"CTA",
"CUB",
"CUW",
"CYP",
"CZE",
"DEN",
"DJI",
"DMA",
"DOM",
"ECU",
"EGY",
"ENG",
"EQG",
"ERI",
"ESP",
"EST",
"ETH",
"FIJ",
"FIN",
"FRA",
"FRO",
"GAB",
"GAM",
"GEO",
"GER",
"GHA",
"GIB",
"GNB",
"GPE",
"GRE",
"GRN",
"GUA",
"GUI",
"GUM",
"GUY",
"GYF",
"HAI",
"HKG",
"HON",
"HUN",
"IDN",
"IND",
"IRL",
"IRN",
"IRQ",
"ISL",
"ISR",
"ITA",
"JAM",
"JOR",
"JPN",
"KAZ",
"KEN",
"KGZ",
"KIR",
"KOR",
"KSA",
"KUW",
"LAO",
"LBR",
"LBY",
"LCA",
"LES",
"LIB",
"LIE",
"LTU",
"LUX",
"LVA",
"MAC",
"MAD",
"MAR",
"MAS",
"MDA",
"MDV",
"MEX",
"MKD",
"MLI",
"MLT",
"MNE",
"MNG",
"MOZ",
"MRI",
"MSR",
"MTN",
"MTQ",
"MWI",
"MYA",
"NAM",
"NCA",
"NCL",
"NED",
"NEP",
"NGA",
"NIG",
"NIR",
"NIU",
"NMI",
"NOR",
"NZL",
"OMA",
"PAK",
"PAN",
"PAR",
"PER",
"PHI",
"PLE",
"PNG",
"POL",
"POR",
"PRK",
"PUR",
"QAT",
"REU",
"ROU",
"RSA",
"RUS",
"RWA",
"SAM",
"SCO",
"SDN",
"SEN",
"SEY",
"SIN",
"SKN",
"SLE",
"SLV",
"SMR",
"SMT",
"SOL",
"SOM",
"SRB",
"SRI",
"SSD",
"STP",
"SUI",
"SUR",
"SVK",
"SVN",
"SWE",
"SWZ",
"SXM",
"SYR",
"TAH",
"TAN",
"TCA",
"TGA",
"THA",
"TJK",
"TKM",
"TLS",
"TOG",
"TPE",
"TRI",
"TUN",
"TUR",
"TUV",
"UAE",
"UGA",
"UKR",
"URU",
"USA",
"UZB",
"VAN",
"VEN",
"VGB",
"VIE",
"VIN",
"VIR",
"WAL",
"YEM",
"ZAM",
"ZAN",
"ZIM",
],
"ioc": [
"AFG",
"ALB",
"ALG",
"AND",
"ANG",
"ANT",
"ARG",
"ARM",
"ARU",
"ASA",
"AUS",
"AUT",
"AZE",
"BAH",
"BAN",
"BAR",
"BDI",
"BEL",
"BEN",
"BER",
"BHU",
"BIH",
"BIZ",
"BLR",
"BOL",
"BOT",
"BRA",
"BRN",
"BRU",
"BUL",
"BUR",
"CAF",
"CAM",
"CAN",
"CAY",
"CGO",
"CHA",
"CHI",
"CHN",
"CIV",
"CMR",
"COD",
"COK",
"COL",
"COM",
"CPV",
"CRC",
"CRO",
"CUB",
"CYP",
"CZE",
"DEN",
"DJI",
"DMA",
"DOM",
"ECU",
"EGY",
"ERI",
"ESA",
"ESP",
"EST",
"ETH",
"FIJ",
"FIN",
"FRA",
"FSM",
"GAB",
"GAM",
"GBR",
"GBS",
"GEO",
"GEQ",
"GER",
"GHA",
"GRE",
"GRN",
"GUA",
"GUI",
"GUM",
"GUY",
"HAI",
"HKG",
"HON",
"HUN",
"INA",
"IND",
"IRI",
"IRL",
"IRQ",
"ISL",
"ISR",
"ISV",
"ITA",
"IVB",
"JAM",
"JOR",
"JPN",
"KAZ",
"KEN",
"KGZ",
"KIR",
"KOR",
"KSA",
"KUW",
"LAO",
"LAT",
"LBA",
"LBR",
"LCA",
"LES",
"LIB",
"LIE",
"LTU",
"LUX",
"MAD",
"MAR",
"MAS",
"MAW",
"MDA",
"MDV",
"MEX",
"MGL",
"MHL",
"MKD",
"MLI",
"MLT",
"MNE",
"MON",
"MOZ",
"MRI",
"MTN",
"MYA",
"NAM",
"NCA",
"NED",
"NEP",
"NGR",
"NIG",
"NOR",
"NRU",
"NZL",
"OMA",
"PAK",
"PAN",
"PAR",
"PER",
"PHI",
"PLE",
"PLW",
"PNG",
"POL",
"POR",
"PRK",
"PUR",
"QAT",
"ROU",
"RSA",
"RUS",
"RWA",
"SAM",
"SEN",
"SEY",
"SIN",
"SKN",
"SLE",
"SLO",
"SMR",
"SOL",
"SOM",
"SRB",
"SRI",
"STP",
"SUD",
"SUI",
"SUR",
"SVK",
"SWE",
"SWZ",
"SYR",
"TAN",
"TGA",
"THA",
"TJK",
"TKM",
"TLS",
"TOG",
"TPE",
"TTO",
"TUN",
"TUR",
"TUV",
"UAE",
"UGA",
"UKR",
"URU",
"USA",
"UZB",
"VAN",
"VEN",
"VIE",
"VIN",
"YEM",
"ZAM",
"ZIM",
],
"numeric": [
"020",
"784",
"004",
"028",
"660",
"008",
"051",
"530",
"024",
"010",
"032",
"016",
"040",
"036",
"533",
"248",
"031",
"070",
"052",
"050",
"056",
"854",
"100",
"048",
"108",
"204",
"652",
"060",
"096",
"068",
"076",
"044",
"064",
"074",
"072",
"112",
"084",
"124",
"166",
"180",
"140",
"178",
"756",
"384",
"184",
"152",
"120",
"156",
"170",
"188",
"192",
"132",
"162",
"196",
"203",
"276",
"262",
"208",
"212",
"214",
"012",
"218",
"233",
"818",
"732",
"232",
"724",
"231",
"246",
"242",
"238",
"583",
"234",
"250",
"266",
"826",
"308",
"268",
"254",
"831",
"288",
"292",
"304",
"270",
"324",
"312",
"226",
"300",
"239",
"320",
"316",
"624",
"328",
"344",
"334",
"340",
"191",
"332",
"348",
"360",
"372",
"376",
"833",
"356",
"086",
"368",
"364",
"352",
"380",
"832",
"388",
"400",
"392",
"404",
"417",
"116",
"296",
"174",
"659",
"408",
"410",
"414",
"136",
"398",
"418",
"422",
"662",
"438",
"144",
"430",
"426",
"440",
"442",
"428",
"434",
"504",
"492",
"498",
"499",
"663",
"450",
"584",
"807",
"466",
"104",
"496",
"446",
"580",
"474",
"478",
"500",
"470",
"480",
"462",
"454",
"484",
"458",
"508",
"516",
"540",
"562",
"574",
"566",
"558",
"528",
"578",
"524",
"520",
"570",
"554",
"512",
"591",
"604",
"258",
"598",
"608",
"586",
"616",
"666",
"612",
"630",
"275",
"620",
"585",
"600",
"634",
"638",
"642",
"688",
"643",
"646",
"682",
"090",
"690",
"736",
"752",
"702",
"654",
"705",
"744",
"703",
"694",
"674",
"686",
"706",
"740",
"728",
"678",
"222",
"760",
"748",
"796",
"148",
"260",
"768",
"764",
"762",
"772",
"626",
"795",
"788",
"776",
"792",
"780",
"798",
"158",
"834",
"804",
"800",
"581",
"840",
"858",
"860",
"336",
"670",
"862",
"092",
"850",
"704",
"548",
"876",
"882",
"887",
"175",
"710",
"894",
"716",
],
}
SHORTENED_ADDRESS_FMT = [
"cs",
"da",
"de",
"de-at",
"de-ch",
"el",
"es",
"fi",
"is",
"nl",
"nl-be",
"no",
"sk",
"sv",
]
CONTINENT_CODES = ["AF", "NA", "OC", "AN", "AS", "EU", "SA"]
CALLING_CODES = [
"+1",
"+7",
"+20",
"+27",
"+30",
"+31",
"+32",
"+33",
"+34",
"+36",
"+39",
"+40",
"+41",
"+43",
"+44",
"+44",
"+44",
"+44",
"+45",
"+46",
"+47",
"+48",
"+49",
"+51",
"+52",
"+53",
"+54",
"+55",
"+56",
"+56",
"+57",
"+58",
"+60",
"+61",
"+61",
"+61",
"+62",
"+63",
"+64",
"+64",
"+64",
"+65",
"+66",
"+77",
"+81",
"+82",
"+84",
"+86",
"+90",
"+91",
"+92",
"+93",
"+94",
"+95",
"+98",
"+211",
"+212",
"+213",
"+216",
"+218",
"+220",
"+221",
"+222",
"+223",
"+224",
"+225",
"+226",
"+227",
"+228",
"+229",
"+230",
"+231",
"+232",
"+233",
"+234",
"+235",
"+236",
"+237",
"+238",
"+239",
"+240",
"+241",
"+242",
"+243",
"+244",
"+245",
"+246",
"+246",
"+247",
"+248",
"+249",
"+250",
"+251",
"+252",
"+253",
"+254",
"+255",
"+255",
"+256",
"+257",
"+258",
"+260",
"+261",
"+262",
"+262",
"+263",
"+264",
"+265",
"+266",
"+267",
"+268",
"+269",
"+290",
"+291",
"+297",
"+298",
"+299",
"+350",
"+351",
"+352",
"+353",
"+354",
"+355",
"+356",
"+357",
"+358",
"+359",
"+370",
"+371",
"+372",
"+373",
"+374",
"+375",
"+376",
"+377",
"+378",
"+379",
"+380",
"+381",
"+382",
"+383",
"+385",
"+386",
"+387",
"+389",
"+420",
"+421",
"+423",
"+500",
"+500",
"+501",
"+502",
"+503",
"+504",
"+505",
"+506",
"+507",
"+508",
"+509",
"+590",
"+590",
"+590",
"+591",
"+592",
"+593",
"+594",
"+595",
"+596",
"+596",
"+597",
"+598",
"+670",
"+672",
"+672",
"+673",
"+674",
"+675",
"+676",
"+677",
"+678",
"+679",
"+680",
"+681",
"+682",
"+683",
"+685",
"+686",
"+687",
"+688",
"+689",
"+690",
"+691",
"+692",
"+800",
"+808",
"+850",
"+852",
"+853",
"+855",
"+856",
"+870",
"+878",
"+880",
"+881",
"+886",
"+960",
"+961",
"+962",
"+963",
"+964",
"+965",
"+966",
"+967",
"+968",
"+970",
"+971",
"+972",
"+973",
"+974",
"+975",
"+976",
"+977",
"+992",
"+993",
"+994",
"+995",
"+996",
"+998",
"+1242",
"+1246",
"+1264",
"+1268",
"+1268",
"+1284",
"+1340",
"+1345",
"+1441",
"+1473",
"+1649",
"+1664",
"+1670",
"+1671",
"+1684",
"+1721",
"+1758",
"+1767",
"+1784",
"+1808",
"+1808",
"+1849",
"+1868",
"+1869",
"+1869",
"+1876",
"+1939",
"+2908",
"+4779",
"+4779",
"+5399",
"+5993",
"+5994",
"+5997",
"+5997",
"+5999",
"+8810",
"+8813",
"+8817",
"+8818",
"+35818",
"+88213",
"+88216",
"+90392",
"+99534",
"+99544",
]
|
lk-geimfari/elizabeth
|
mimesis/data/int/address.py
|
Python
|
mit
| 20,986 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import argparse
import asyncio
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
python3 select_get_poetry3.py port1 port2 port3 ...
"""
parser = argparse.ArgumentParser(usage)
parser.add_argument('port', nargs='+')
args = vars(parser.parse_args())
addresses = args['port']
if not addresses:
print(parser.format_help())
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
class PoetryClientProtocol(asyncio.Protocol):
def __init__(self, infile):
self.infile = infile
def connection_made(self, transport):
print(transport.get_extra_info('peername'))
self.transport = transport
self.transport.write(b'poems')
def data_received(self, data):
if data:
print(data)
print('writing to {}'.format(self.infile.name))
self.infile.write(data)
self.transport.write(b'poems')
def eof_received(self):
print('end of writing')
self.infile.close()
def main():
addresses = parse_args()
eventloop = asyncio.get_event_loop()
for address in addresses:
host, port = address
filename = str(port) + '.txt'
infile = open(filename, 'wb')
coro = eventloop.create_connection(
lambda: PoetryClientProtocol(infile), host, port)
t, p = eventloop.run_until_complete(coro)
print(t, p)
try:
eventloop.run_forever()
finally:
eventloop.close()
if __name__ == '__main__':
main()
|
a358003542/python-guide-book
|
codes/ch12/asyncio_get_poetry2.py
|
Python
|
gpl-2.0
| 1,899 | 0 |
from .design_inputs import *
|
samcoveney/GP_emu_UQSA
|
gp_emu_uqsa/design_inputs/__init__.py
|
Python
|
gpl-3.0
| 29 | 0 |
#!/usr/bin/env python
# A bag contains one red disc and one blue disc. In a game of chance a player
# takes a disc at random and its colour is noted. After each turn the disc is
# returned to the bag, an extra red disc is added, and another disc is
# taken at random.
# The player... wins if they have taken more blue discs than red discs a
# the end of the game.
# ------------------------------------------------------------------------
# P_n = prob(disc n is blue) = 1/(n + 1)
# For n discs, let C_1-C_2-...-C_n be the colors drawn, let i_1,...,i_k be the
# indices j such that disk i_j was drawn red. The probability of this event
# is (i_1 * ... * i_k)/factorial(n + 1)
# We can enumeratively define n_{j,k} to be the aggregate numerator
# of all possible draws with j blues drawn out of k draws
#
# The initial conditions are n_{0,1} = 1, n_{1,1} = 1
# The recurrence is defined by the fact that the n_{j + 1,k + 1} is
# can only have the (k + 1)'st element be blue or red, hence
# n_{j + 1,k + 1} = numer(blue)*n_{j,k} + numer(red)*n_{j + 1,k}
# = n_{j,k} + (k + 1)*n_{j + 1,k}
# except for the cases j = k, where n_{j,k} = numer(all blue) = 1
# except for the cases j = 0, where n_{0,k} = k!
from math import factorial
from python.decorators import euler_timer
def iterative_numerator(n):
numerators = {}
for k in range(1, n + 1):
for j in range(k + 1):
if j == 0:
numerators[(j, k)] = factorial(k)
elif j == k:
numerators[(j, k)] = 1
else:
numerators[(j, k)] = (numerators[(j - 1, k - 1)] +
k * numerators[(j, k - 1)])
min_blue = (n / 2) + 1
count = 0
for blue in range(min_blue, n + 1):
count += numerators[(blue, n)]
return count
def max_payout(n):
# Integer division precludes floor operation
return factorial(n + 1) / iterative_numerator(n)
def main(verbose=False):
return max_payout(15)
if __name__ == '__main__':
print euler_timer(121)(main)(verbose=True)
|
dhermes/project-euler
|
python/complete/no121.py
|
Python
|
apache-2.0
| 2,079 | 0.000481 |
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Nicolas Bornand
#
# The licence is in the file __manifest__.py
#
##############################################################################
from mock import patch
from .onramp_base_test import TestOnramp
mock_oauth = (
"odoo.addons.message_center_compassion.models.ir_http.IrHTTP._oauth_validation"
)
class TestOnRampController(TestOnramp):
def setUp(self):
super().setUp()
def test_no_token(self):
""" Check we have an access denied if token is not provided
"""
del self.opener.headers["Authorization"]
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
error = response.json()
self.assertEqual(error["ErrorMethod"], "ValidateToken")
def test_bad_token(self):
""" Check we have an access denied if token is not valid
"""
self.opener.headers["Authorization"] = "Bearer notrealtoken"
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
@patch(mock_oauth)
def test_wrong_client_id(self, oauth_patch):
""" Check that if we get a token with unrecognized client_id,
access is denied. """
oauth_patch.return_value = "wrong_user"
response = self._send_post({"nothing": "nothing"})
self.assertEqual(response.status_code, 401)
@patch(mock_oauth)
def test_good_client_id(self, oauth_patch):
""" Check that if we connect with admin as client_id,
access is granted. """
oauth_patch.return_value = "admin"
response = self._send_post({"nothing": "nothing"})
json_result = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(
json_result["Message"], "Unknown message type - not processed."
)
|
CompassionCH/compassion-modules
|
message_center_compassion/tests/test_onramp_controller.py
|
Python
|
agpl-3.0
| 2,068 | 0.000484 |
########################################################################
# #
# Anomalous Diffusion #
# #
########################################################################
import steps.interface
########################################################################
# Create Model
from steps.model import *
from steps.geom import *
from steps.rng import *
from steps.sim import *
from steps.saving import *
from steps.visual import *
import time
mdl = Model()
r = ReactionManager()
with mdl:
X = Species.Create()
vsys = VolumeSystem.Create()
with vsys:
dif_X = Diffusion.Create(X, 2e-09)
########################################################################
# Create Gemoetry
tetmesh = TetMesh.LoadAbaqus('2_20_0.7.inp', scale=1e-06, ebs=None, shadow_mesh="2_20_0.7_conf")
########################################################################
# Create Random number generator
rng = RNG('mt19937', 512, int(time.time()%4294967295))
########################################################################
# Initialize simulation
sim = Simulation('Tetexact', mdl, tetmesh, rng)
sim.injection.X.Count = 2000
########################################################################
# Visualization
rs = ResultSelector(sim)
# Create control
sc = SimControl(end_time = 1.0, upd_interval = 0.00001)
with sc:
with SimDisplay('Show Spine Species'):
# Static mesh element
ElementDisplay(rs.dend, color=[0, 0, 1, 0.2])
# Dynamic element
ElementDisplay(rs.LIST('dend', 'shaft').X, color=[1.0, 0.0, 0.0, 1.0], spec_size=0.1)
with SimDisplay('Hide Spine Species'):
ElementDisplay(rs.dend, color=[0, 0, 1, 0.2])
ElementDisplay(rs.shaft.X, color=[1.0, 0.0, 0.0, 1.0], spec_size=0.1)
with PlotDisplay('Plots'):
SpatialPlot(rs.TETS(tetmesh.shaft.tets).X.Count, axis=[0, 0, 1], nbins=100)
# Enter visualization loop
sc.run()
|
CNS-OIST/STEPS_Example
|
publication_models/API_2/Chen_FNeuroinf_2014/AD/AD_single.py
|
Python
|
gpl-2.0
| 2,125 | 0.004706 |
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import *
import plugin.databaseConnect as database
from datetime import datetime
class sendMessageUI(QMainWindow):
def __init__(self, id = None, bulk = None, parent = None):
QMainWindow.__init__(self,None)
self.setMinimumSize(626,380)
self.setWindowTitle("Message")
self.parent = parent
self.id = id
self.bulk = bulk
self.UIinit()
def UIinit(self):
loader = QUiLoader()
form = loader.load("resources/UI/sendMessage.ui",None)
self.setCentralWidget(form)
#QPushButton
self.send_button = form.findChild(QPushButton,"sendButton")
self.close_button = form.findChild(QPushButton,"closeButton")
#LineEdit
self.to_user = form.findChild(QLineEdit,"to")
self.message = form.findChild(QTextEdit,"message")
#Connect
self.send_button.clicked.connect(self.sendMes)
self.close_button.clicked.connect(self.closeWindow)
if(self.id != None):
self.to_user.setText(self.id)
def closeWindow(self):
self.close()
##Create message and send it to other user##
def sendMes(self):
db = database.databaseMessage()
toUser = self.to_user.text()
message = self.message.toPlainText()
time = datetime.now()
if(self.bulk == None):
data = self.parent.getCurrentUser()
fromUser = data.getID()
if(db.sendMessage(toUser, fromUser, message, time)):
db.disconnect()
self.parent.showOK("Message Sent", "The message has been sent to the user!")
self.closeWindow()
else:
self.parent.showERROR("UserID Not Found", "The UserID you entered does not exists.")
else:
data = self.parent.parent.getCurrentUser()
fromUser = data.getID()
val = 0
for id in self.bulk:
val = db.sendMessage(id, fromUser, message, time)
if (val):
db.disconnect()
self.parent.parentshowOK("All Message Sent to user.", "The message has been sent to all user!")
self.closeWindow()
else:
self.parent.parent.showERROR("ERROR!", "Some Messages are not delivered.")
|
Poom1997/GMan
|
sendMessageForm.py
|
Python
|
mit
| 2,434 | 0.012736 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2020 T. Zachary Laine
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
prop_lookup_test_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/{0}_break.hpp>
#include <gtest/gtest.h>
TEST({0}, prop_lookups_{2})
{{{1}
}}
'''
prop_lookup_perf_test_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/{0}_break.hpp>
#include <benchmark/benchmark.h>
#include <iostream>
void BM_{0}_prop(benchmark::State & state)
{{
while (state.KeepRunning()) {{
{1}
}}
std::cout << "Divide result by {2} to get mean time.\\n";
}}
BENCHMARK(BM_{0}_prop);
BENCHMARK_MAIN()
'''
break_test_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/{0}_break.hpp>
#include <gtest/gtest.h>
#include <algorithm>
TEST({0}, breaks_{2})
{{{1}
}}
'''
grapheme_iterator_test_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/grapheme_iterator.hpp>
#include <boost/text/transcode_iterator.hpp>
#include <gtest/gtest.h>
#include <algorithm>
{0}
'''
bidi_test_file_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/bidirectional.hpp>
#include "bidi_tests.hpp"
#include <gtest/gtest.h>
#include <algorithm>
std::vector<int> expected_levels;
std::vector<int> expected_reordered_indices;
TEST(bidi, bidi_{1:03}_000)
{{
{0}
}}
'''
bidi_test_form = '''
{{
// {0} ('{5}') (line {3})
std::vector<uint32_t> const cps = {{ {1} }};
std::vector<int> const levels =
bidi_levels(cps.begin(), cps.end(), {4});
int i = 0;
for (int l : expected_levels) {{
if (0 <= l) {{
EXPECT_EQ(levels[i], l) << "i=" << i;
++i;
}}
}}
EXPECT_EQ((int)levels.size(), i);
std::vector<int> const reordered =
bidi_reordered_indices(cps.begin(), cps.end(), {4});
i = 0;
for (int idx : expected_reordered_indices) {{
// Skip FSI, LRI, RLI, and PDI.
if (cps[idx] < 0x2066 || 0x2069 < cps[idx]) {{
EXPECT_EQ(reordered[i], (int)cps[idx])
<< std::hex
<< " 0x" << reordered[i]
<< " 0x" << cps[idx]
<< std::dec << " i=" << i;
}}
++i;
}}
std::vector<int> reordered_2;
for (auto subrange :
boost::text::bidirectional_subranges(cps, {4})) {{
for (auto cp : subrange) {{
reordered_2.push_back(cp);
}}
}}
i = 0;
for (int idx : expected_reordered_indices) {{
if (cps[idx] < 0x2066 || 0x2069 < cps[idx]) {{
EXPECT_EQ(reordered_2[i], (int)cps[idx])
<< std::hex
<< " 0x" << reordered_2[i]
<< " 0x" << cps[idx]
<< std::dec << " i=" << i;
}}
++i;
}}
EXPECT_EQ(i, (int)reordered_2.size());
}}
'''
bidi_character_test_file_form = decls = '''\
// Copyright (C) 2020 T. Zachary Laine
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Warning! This file is autogenerated.
#include <boost/text/bidirectional.hpp>
#include "bidi_tests.hpp"
#include <gtest/gtest.h>
#include <algorithm>
TEST(bidi_character, bidi_character_{1:03}_000)
{{
{0}
}}
'''
bidi_character_test_form = '''
{{
// line {4}
std::vector<uint32_t> const cps = {{ {0} }};
std::vector<int> const expected_levels =
{{ {2} }};
std::vector<int> const levels =
bidi_levels(cps.begin(), cps.end(), {1});
int i = 0;
for (int l : expected_levels) {{
if (0 <= l) {{
EXPECT_EQ(levels[i], l) << "i=" << i;
++i;
}}
}}
EXPECT_EQ((int)levels.size(), i);
std::vector<uint32_t> const expected_reordered_indices =
{{ {3} }};
std::vector<int> const reordered =
bidi_reordered_indices(cps.begin(), cps.end(), {1});
i = 0;
for (int idx : expected_reordered_indices) {{
EXPECT_EQ(reordered[i], (int)cps[idx])
<< std::hex
<< " 0x" << reordered[i]
<< " 0x" << cps[idx]
<< std::dec << " i=" << i;
++i;
}}
}}
'''
def extract_cps_and_breaks(filename, batch_size = 50):
current_batch = []
retval = []
lines = open(filename, 'r').readlines()
num_lines = 0
for line in lines:
if num_lines == batch_size:
retval.append(current_batch)
current_batch = []
num_lines = 0
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
comment_start = line.find('#')
comment = ''
if comment_start != -1:
comment = line[comment_start + 1:].strip()
line = line[:comment_start]
fields = line.split(' ')[1:-1]
cps = []
active_break = True
for i in range(len(fields)):
f = fields[i]
if f[0] in '0123456789ABCDEF':
cps.append((f, active_break))
else:
active_break = f == '÷'
current_batch.append((cps, line, comment))
num_lines += 1
if len(current_batch):
retval.append(current_batch)
return retval
def generate_prop_lookup_tests(cps_and_breaks, prop_, prop_names):
for i in range(len(cps_and_breaks)):
prop_lookup_tests = ''
chunk = cps_and_breaks[i]
for elem in chunk:
(cps, line, comment) = elem
comment_fields = comment.split(' ')
j = 0
for f in comment_fields:
if f.startswith('(') and f.endswith(')'):
prop_lookup_tests += \
'\n EXPECT_EQ(boost::text::{0}_prop(0x{1}), {2});'.format(
prop_, cps[j][0], prop_names[f[1:-1]]
)
j += 1
cpp_file = open('{}_prop_lookup_{:02}.cpp'.format(prop_, i), 'w')
cpp_file.write(prop_lookup_test_form.format(prop_, prop_lookup_tests, i))
def generate_prop_lookup_perf_tests(cps_and_breaks, prop_):
prop_lookup_perf_tests = ''
lines = 0
for i in range(len(cps_and_breaks)):
chunk = cps_and_breaks[i]
for elem in chunk:
(cps, line, comment) = elem
comment_fields = comment.split(' ')
j = 0
for f in comment_fields:
if f.startswith('(') and f.endswith(')'):
prop_lookup_perf_tests += \
' benchmark::DoNotOptimize(boost::text::{0}_prop(0x{1}));\n'.format(
prop_, cps[j][0]
)
j += 1
lines += 1
cpp_file = open('{}_prop_lookup_perf.cpp'.format(prop_, i), 'w')
cpp_file.write(prop_lookup_perf_test_form.format(prop_, prop_lookup_perf_tests, lines))
def generate_break_tests_2(cps_and_breaks, prop_, prop_prefix = '', call_suffix = ''):
for i in range(len(cps_and_breaks)):
break_tests = ''
chunk = cps_and_breaks[i]
for elem in chunk:
(cps, line, comment) = elem
comment_fields = comment.split(' ')
break_tests += '''
// {0}
// {1}
{{
std::array<uint32_t, {3}> cps = {{{{ {2} }}}};
'''.format(line, comment, ', '.join(map(lambda x: hex(int(x[0], 16)), cps)), len(cps))
for j in range(len(cps) + 1):
prev_break = j
while prev_break == len(cps) or prev_break != 0 and not cps[prev_break][1]:
prev_break -= 1
next_break = min(j + 1, len(cps))
while next_break != len(cps) and not cps[next_break][1]:
next_break += 1
break_tests += '''\
EXPECT_EQ(boost::text::prev_{4}{3}_break(cps.begin(), cps.begin() + {0}, cps.end()){5} - cps.begin(), {1});
EXPECT_EQ(boost::text::next_{4}{3}_break(cps.begin() + {1}, cps.end()){5} - cps.begin(), {2});
'''.format(j, prev_break, next_break, prop_, prop_prefix, call_suffix)
break_tests += ' }\n\n'
cpp_file = open('{}_break_{:02}.cpp'.format(prop_, i), 'w')
cpp_file.write(break_test_form.format(prop_, break_tests, i))
def contains_surrogate(cps):
for cp in cps:
if int(cp[0], 16) == 0xD800:
return True
return False
def generate_iterator_tests(cps_and_breaks, prop_):
for i in range(len(cps_and_breaks)):
iterator_tests = ''
chunk = cps_and_breaks[i]
elem_index = -1
for elem in chunk:
elem_index += 1
(cps, line, comment) = elem
comment_fields = comment.split(' ')
break_cp_indices = []
for j in range(len(cps)):
if cps[j][1]: # if break
break_cp_indices.append(j)
graphemes_and_end = []
code_unit_graphemes_and_end = []
for j in range(len(break_cp_indices)):
last_cp = j == len(break_cp_indices) - 1
first = break_cp_indices[j]
last = last_cp and len(cps) or break_cp_indices[j + 1]
graphemes_and_end.append('''\
EXPECT_EQ(it.base(), cps + {0});
EXPECT_EQ((*it).begin(), cps + {0});
EXPECT_EQ((*it).end(), cps + {1});'''.format(first, last))
code_unit_grapheme = '''\
EXPECT_EQ(*it.base(), cps[{0}]);
EXPECT_EQ(*it->begin(), cps[{0}]);'''.format(first)
if not last_cp:
code_unit_grapheme += '''
EXPECT_EQ(*it->end(), cps[{0}]);'''.format(last)
code_unit_grapheme += '''
EXPECT_EQ(it.base().base(), cus + cp_indices[{0}]);
EXPECT_EQ(it->begin().base(), cus + cp_indices[{0}]);
EXPECT_EQ(it->end().base(), cus + cp_indices[{1}]);'''.format(first, last)
code_unit_graphemes_and_end.append(code_unit_grapheme)
graphemes_and_end.append('''\
EXPECT_EQ(it.base(), cps + {});
EXPECT_EQ((*it).begin(), (*it).end());'''.format(len(cps)))
code_unit_graphemes_and_end.append('''\
EXPECT_EQ(it.base().base(), cus + cp_indices[{}]);
EXPECT_EQ(it->begin(), (*it).end());'''.format(len(cps)))
# forward
iterator_tests += '''
TEST({3}, iterator_{5:02}_{6}_fwd)
{{
// {0}
// {1}
{{
uint32_t const cps[] = {{ {2} }};
boost::text::{3}_iterator<uint32_t const *> it(cps, cps, cps + {4});
'''.format(line, comment, '0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
iterator_tests += '\n\n ++it;\n\n'.join(graphemes_and_end)
iterator_tests += '\n }\n}\n'
# reverse
iterator_tests += '''\
TEST({1}, iterator_{3:02}_{4}_rev)
{{
{{
// reverse
uint32_t const cps[] = {{ {0} }};
boost::text::{1}_iterator<uint32_t const *> it(cps, cps + {2}, cps + {2});
'''.format('0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
iterator_tests += '\n\n --it;\n\n'.join(reversed(graphemes_and_end))
iterator_tests += '\n }\n}\n'
# forth and back
iterator_tests += '''\
TEST({1}, iterator_{3:02}_{4}_fab)
{{
{{
// forth and back
uint32_t const cps[] = {{ {0} }};
boost::text::{1}_iterator<uint32_t const *> it(cps, cps, cps + {2});
'''.format('0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
idx = 0
iterator_tests += graphemes_and_end[idx]
for j in range(len(graphemes_and_end)):
for k in range(j):
iterator_tests += '\n\n ++it;\n\n'
idx += 1
iterator_tests += graphemes_and_end[idx]
for k in range(j):
iterator_tests += '\n\n --it;\n\n'
idx -= 1
iterator_tests += graphemes_and_end[idx]
iterator_tests += '\n }\n}\n'
# back and forth
iterator_tests += '''\
TEST({1}, iterator_{3:02}_{4}_baf)
{{
{{
// back and forth
uint32_t const cps[] = {{ {0} }};
boost::text::{1}_iterator<uint32_t const *> it(cps, cps + {2}, cps + {2});
'''.format('0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
idx = len(graphemes_and_end) - 1
iterator_tests += graphemes_and_end[idx]
for j in range(len(graphemes_and_end)):
for k in range(j):
iterator_tests += '\n\n --it;\n\n'
idx -= 1
iterator_tests += graphemes_and_end[idx]
for k in range(j):
iterator_tests += '\n\n ++it;\n\n'
idx += 1
iterator_tests += graphemes_and_end[idx]
iterator_tests += '\n }\n}\n'
# from UTF8
if contains_surrogate(cps):
iterator_tests += \
'// Skipping from-utf8 test due to presence of surrogate code point.\n'
else:
iterator_tests += '''\
TEST({1}, iterator_{3:02}_{4}_utf8)
{{
{{
// from UTF8
uint32_t const cps[] = {{ {0} }};
char cus[1024] = {{ 0 }};
int cp_indices[1024] = {{ 0 }};
std::copy(
boost::text::utf_32_to_8_iterator<uint32_t const *>(cps, cps, cps + {2}),
boost::text::utf_32_to_8_iterator<uint32_t const *>(cps, cps + {2}, cps + {2}),
cus);
boost::text::null_sentinel sentinel;
int * index_it = cp_indices;
for (boost::text::utf_8_to_32_iterator<char const *, boost::text::null_sentinel> it(cus, cus, boost::text::null_sentinel{{}}); ; ++it) {{
*index_it++ = it.base() - cus;
if (it == sentinel)
break;
}}
using iter_t = boost::text::utf_8_to_32_iterator<char const *, boost::text::null_sentinel>;
boost::text::{1}_iterator<iter_t, boost::text::null_sentinel> it(
iter_t{{cus, cus, boost::text::null_sentinel{{}}}}, iter_t{{cus, cus, boost::text::null_sentinel{{}}}}, sentinel);
'''.format('0x' + ', 0x'.join(map(lambda x: x[0], cps)), prop_, len(cps), i, elem_index)
iterator_tests += '\n\n ++it;\n\n'.join(code_unit_graphemes_and_end)
iterator_tests += '\n }\n}\n'
cpp_file = open('{}_iterator_{:02}.cpp'.format(prop_, i), 'w')
cpp_file.write(grapheme_iterator_test_form.format(iterator_tests, i))
bidi_property_cps = {
'L': '0x0041',
'R': '0x05BE',
'EN': '0x0030',
'ES': '0x002B',
'ET': '0x0023',
'AN': '0x0660',
'CS': '0x002C',
'B': '0x2029',
'S': '0x0009',
'WS': '0x0020',
'ON': '0x0021',
'BN': '0x00AD',
'NSM': '0x0300',
'AL': '0x0608',
'LRO': '0x202D',
'RLO': '0x202E',
'LRE': '0x202A',
'RLE': '0x202B',
'PDF': '0x202C',
'LRI': '0x2066',
'RLI': '0x2067',
'FSI': '0x2068',
'PDI': '0x2069'
}
def generate_bidi_tests(filename, batch_size):
current_batch = []
test_data = []
lines = open(filename, 'r').readlines()
num_lines = 0
curr_levels = []
curr_reorder = []
line_number = 0
for line in lines:
line_number += 1
if num_lines == batch_size:
test_data.append(current_batch)
current_batch = []
num_lines = 0
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
comment_start = line.find('#')
comment = ''
if comment_start != -1:
comment = line[comment_start + 1:].strip()
line = line[:comment_start]
if line.startswith('@Levels:'):
curr_levels = line[len('@Levels:'):].strip().split(' ')
elif line.startswith('@Reorder:'):
curr_reorder = line[len('@Reorder:'):].strip().split(' ')
elif line.startswith('@'):
pass
else:
input_,bitset = line.split(';')
input_ = input_.split(' ')
bitset = int(bitset)
test_cases = {'auto': bool(bitset & 1), 'LTR': bool(bitset & 2), 'RTL' : bool(bitset & 4)}
current_batch.append((input_, curr_levels, curr_reorder, test_cases, line, line_number))
num_lines += 1
if len(current_batch):
test_data.append(current_batch)
i = 0
for batch in test_data:
cpp_file = open('bidi_test_{:03}.cpp'.format(i), 'w')
tests = ''
curr_levels = []
curr_reorder = []
test_case_idx = 0
test_idx = 0
for test in batch:
if test_case_idx == 10:
test_idx += 1
tests += '''\
}}
TEST(bidi, bidi_{:03}_{:03})
{{
'''.format(i, test_idx)
curr_levels = []
curr_reorder = []
test_case_idx = 0
test_case_idx += 1
if all(map(lambda x: x == 'x', test[1])):
continue
if test[1] != curr_levels:
levels = ', '.join(map(lambda x: x == 'x' and '-1' or x, test[1]))
indices = ', '.join(test[2])
tests += '''
expected_levels = {{ {} }};
expected_reordered_indices = {{ {} }};
'''.format(levels, indices)
curr_levels = test[1]
cps = ', '.join(map(lambda x: bidi_property_cps[x], test[0]))
if test[3]['auto']:
tests += bidi_test_form.format(test[4], cps, levels, test[5], -1, 'auto')
if test[3]['LTR']:
tests += bidi_test_form.format(test[4], cps, levels, test[5], 0, 'LTR')
if test[3]['RTL']:
tests += bidi_test_form.format(test[4], cps, levels, test[5], 1, 'RTL')
cpp_file.write(bidi_test_file_form.format(tests, i))
i += 1
def generate_bidi_character_tests(filename, batch_size):
current_batch = []
test_data = []
lines = open(filename, 'r').readlines()
num_lines = 0
line_number = 0
for line in lines:
line_number += 1
if num_lines == batch_size:
test_data.append(current_batch)
current_batch = []
num_lines = 0
line = line[:-1]
if not line.startswith('#') and len(line) != 0:
fields = line.split(';')
cps = fields[0].split(' ')
pel = fields[1]
if pel == '2':
pel = '-1'
levels = fields[3].split(' ')
reorder = fields[4].split(' ')
current_batch.append((cps, pel, levels, reorder, line_number))
num_lines += 1
if len(current_batch):
test_data.append(current_batch)
i = 0
for batch in test_data:
cpp_file = open('bidi_character_test_{:03}.cpp'.format(i), 'w')
tests = ''
test_case_idx = 0
test_idx = 0
for test in batch:
if test_case_idx == 10:
test_idx += 1
tests += '''\
}}
TEST(bidi_character, bidi_character_{:03}_{:03})
{{
'''.format(i, test_idx)
test_case_idx = 0
test_case_idx += 1
if all(map(lambda x: x == 'x', test[1])):
continue
tests += bidi_character_test_form.format(
', '.join(map(lambda x: '0x' + x, test[0])), test[1],
', '.join(map(lambda x: x == 'x' and '-1' or x, test[2])),
', '.join(test[3]),
test[4]
)
cpp_file.write(bidi_character_test_file_form.format(tests, i))
i += 1
import sys
grapheme_cps_and_breaks = extract_cps_and_breaks('GraphemeBreakTest.txt')
if '--perf' in sys.argv:
generate_prop_lookup_perf_tests(grapheme_cps_and_breaks, 'grapheme')
exit(0)
generate_break_tests_2(grapheme_cps_and_breaks, 'grapheme')
generate_iterator_tests(grapheme_cps_and_breaks, 'grapheme')
word_cps_and_breaks = extract_cps_and_breaks('WordBreakTest.txt')
generate_break_tests_2(word_cps_and_breaks, 'word')
sentence_cps_and_breaks = extract_cps_and_breaks('SentenceBreakTest.txt')
generate_break_tests_2(sentence_cps_and_breaks, 'sentence')
line_cps_and_breaks = extract_cps_and_breaks('LineBreakTest.txt', 100)
generate_break_tests_2(line_cps_and_breaks, 'line', 'allowed_', '.iter')
generate_bidi_tests('BidiTest.txt', 500)
generate_bidi_character_tests('BidiCharacterTest.txt', 700)
|
wiltonlazary/arangodb
|
3rdParty/iresearch/external/text/scripts/generate_unicode_break_tests.py
|
Python
|
apache-2.0
| 22,214 | 0.002206 |
#!/usr/bin/env python
path="/var/lib/gpu/gpu_locked.txt"
import os,sys
import ast
import socket
def getHost():
return socket.gethostname()
def getlocked():
hostname=getHost()
#print path
fp=open(path, "r")
info=fp.read()
#print info
d=ast.literal_eval(info)
#print len(d)
print "%s,nvidia0,%d" % (hostname, (9999 - d['nvidia0']['available_count']))
print "%s,nvidia1,%d" % (hostname, (9999 - d['nvidia1']['available_count']))
print "%s,nvidia2,%d" % (hostname, (9999 - d['nvidia2']['available_count']))
print "%s,nvidia3,%d" % (hostname, (9999 - d['nvidia3']['available_count']))
fp.close()
if __name__ == "__main__":
getlocked()
|
linzhaolover/myansible
|
openstackfile/getgpulocked.py
|
Python
|
apache-2.0
| 696 | 0.027299 |
#!/usr/bin/python
__author__ = 'anson'
import optparse
import re
import sys
from utils.utils_cmd import execute_sys_cmd
from lib_monitor.monitor_default_format import nagios_state_to_id
class messages_check():
def __init__(self, rex, config, type):
self.rex = rex
self.config = config
self.type = type
def run(self):
result, infos = execute_sys_cmd('/usr/local/nagios/libexec/check_logfiles -f ' + self.config)
v_protocol = None
exit_state = 3
if len(infos) > 0:
state = infos[0].split()[0]
if state not in nagios_state_to_id.keys():
print infos
sys.exit(exit_state)
exit_state = nagios_state_to_id[state]
if nagios_state_to_id[state] > 0:
m_protocol = re.search(r'\(\d+ errors in ([^ ]+)\)', infos[0])
v_protocol = m_protocol.group(1) if m_protocol else None
else:
sys.exit(exit_state)
if v_protocol is not None:
rex_dict = []
with open(self.rex, buffering=2000000) as rex_all:
for rex_split in rex_all:
rex_dict.append(rex_split)
with open('/tmp/' + v_protocol, buffering=2000000) as file_to_check:
for part in file_to_check:
for rex_rule in rex_dict:
m_iface = re.search(rex_rule, part)
v_dev = m_iface.group(1) if m_iface else 'none'
print v_dev
sys.exit(exit_state)
def main():
"""
messages_monitor.py
unit test example
python messages_monitor.py
"""
parser = optparse.OptionParser(
usage="%prog [options] [--parameter]",
description="To monitor system log file."
)
parser.add_option("--config",
dest="config",
help="Config file for error extraction",
type="string",
default="/usr/local/nagios/libexec/check_log.log"
)
parser.add_option("--type",
dest="type",
help="Event type",
type="string",
default="disk"
)
parser.add_option("--rex",
dest="rex",
help="Regular Expression",
type="string",
default="/usr/local/nagios/libexec/rule.conf"
)
(options, args) = parser.parse_args()
check = messages_check(options.rex, options.config, options.type)
check.run()
if __name__ == '__main__':
main()
|
AnsonShie/system_monitor
|
messages_monitor.py
|
Python
|
apache-2.0
| 2,645 | 0.003403 |
#!/usr/bin/env python2
"""
COSMO TECHNICAL TESTSUITE
General purpose script to compare two files containing tables
Only lines with given table pattern are considered
"""
# built-in modules
import os, sys, string
# information
__author__ = "Xavier Lapillonne"
__maintainer__ = "xavier.lapillonne@meteoswiss.ch"
def cmp_table(file1,file2,colpattern,minval,threshold,verbose=1,maxcompline=-1):
# General purpose script to compare two files containing tables
# Only lines with given table column pattern. Column to be compared are marked with c
# column to discard with x
#init
ncomp=0
nerror=0
lerror=False
epsilon=1e-16 #used to avoid division by zero in case minval is zero
# check file existence
if not(os.path.exists(file1)):
print('File %s does not exist' %(file1))
return -1
elif not(os.path.exists(file2)):
print('File %s does not exist' %(file2))
print('File '+file2+' does not exist')
return -1
# convert input
colpattern=[x=='c' for x in list(colpattern)]
threshold=float(threshold)
minval=float(minval)
# open file
data1=open(file1).readlines()
data2=open(file2).readlines()
# get max record
nd1=len(data1)
nd2=len(data2)
# check that files are not empty
if nd1==0:
print('file %s is empty!' %(file1))
return -1
if nd2==0:
print('file %s is empty!' %(file2))
return -1
if nd1!=nd2 and verbose>1:
print('Warning: %s and %s have different size, comparing commun set only \n' %(file1,file2))
ncdata=min(nd1,nd2)
if (maxcompline>0):
ncdata=min(ncdata,maxcompline)
# Iterates through the lines
for il in range(ncdata):
l1=data1[il].split()
l2=data2[il].split()
l1match=matchColPattern(l1,colpattern)
l2match=matchColPattern(l2,colpattern)
# compare values if both lines are compatible
if l1match and l2match:
for ic in range(len(colpattern)):
if colpattern[ic]:
v1=float(l1[ic])
v2=float(l2[ic])
val_abs_max=max(abs(v1),abs(v2))
if val_abs_max > minval:
ncomp+=1
diff=abs(v1-v2)/(val_abs_max+epsilon)
if diff>threshold:
nerror+=1
# Print error
if verbose>1:
print('Error %2.2e above %2.2e thresold at line %i, col %i' %(diff,threshold,il+1,ic+1))
print('> %s' %(file1))
print(data1[il])
print('< %s' %(file2))
print(data2[il])
#save line for first error
if not lerror:
differ=diff
linerr=il+1
colerr=ic+1
linerr1=data1[il]
linerr2=data2[il]
lerror=True
if ncomp==0:
print('Warning :no line to compare')
nerror=-2
if lerror and verbose>0:
print('Compared values: %i, errors above threshold: %i ; %i %% ' %(ncomp,nerror,nerror*100./ncomp))
if verbose==1:
print('First error %2.2e above %2.2e thresold at line %i, col %i' %(differ,threshold,linerr,colerr))
print('> %s' %(file1))
print(linerr1)
print('< %s' %(file2))
print(linerr2)
return nerror
#----------------------------------------------------------------------------
# Local functions
def matchColPattern(line,colpattern):
if len(line)!=len(colpattern):
return False
try:
for i in range(len(colpattern)):
if colpattern[i]: f=float(line[i])
except ValueError:
return False
return True
#-----------------------------------
#execute as a script
if __name__ == "__main__":
if len(sys.argv)==6:
cmp_table(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5])
elif len(sys.argv)==7:
cmp_table(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5],sys.argv[6])
elif len(sys.argv)==8:
cmp_table(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4], \
sys.argv[5],sys.argv[6],sys.argv[7])
else:
print('''USAGE : ./comp_table file1 file2 colpattern minval threshold [verbose maxcompline]
General purpose script to compare two files containing tables
Only lines with given table column pattern. Column to be compared must be numbers are marked with c
column to discard with x
colpattern c for compare or x for ignore, ex: xccx discard first and last column of a 4 column table
''')
|
C2SM-RCM/testsuite
|
tools/comp_table.py
|
Python
|
mit
| 5,041 | 0.026384 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import json
import frappe
from erpnext.accounts.party import get_party_account_currency
from erpnext.controllers.accounts_controller import get_taxes_and_charges
from erpnext.setup.utils import get_exchange_rate
from erpnext.stock.get_item_details import get_pos_profile
from frappe import _
from frappe.core.doctype.communication.email import make
from frappe.utils import nowdate, cint
from six import string_types, iteritems
@frappe.whitelist()
def get_pos_data():
doc = frappe.new_doc('Sales Invoice')
doc.is_pos = 1
pos_profile = get_pos_profile(doc.company) or {}
if not pos_profile:
frappe.throw(_("POS Profile is required to use Point-of-Sale"))
if not doc.company:
doc.company = pos_profile.get('company')
doc.update_stock = pos_profile.get('update_stock')
if pos_profile.get('name'):
pos_profile = frappe.get_doc('POS Profile', pos_profile.get('name'))
pos_profile.validate()
company_data = get_company_data(doc.company)
update_pos_profile_data(doc, pos_profile, company_data)
update_multi_mode_option(doc, pos_profile)
default_print_format = pos_profile.get('print_format') or "Point of Sale"
print_template = frappe.db.get_value('Print Format', default_print_format, 'html')
items_list = get_items_list(pos_profile, doc.company)
customers = get_customers_list(pos_profile)
doc.plc_conversion_rate = update_plc_conversion_rate(doc, pos_profile)
return {
'doc': doc,
'default_customer': pos_profile.get('customer'),
'items': items_list,
'item_groups': get_item_groups(pos_profile),
'customers': customers,
'address': get_customers_address(customers),
'contacts': get_contacts(customers),
'serial_no_data': get_serial_no_data(pos_profile, doc.company),
'batch_no_data': get_batch_no_data(),
'barcode_data': get_barcode_data(items_list),
'tax_data': get_item_tax_data(),
'price_list_data': get_price_list_data(doc.selling_price_list, doc.plc_conversion_rate),
'customer_wise_price_list': get_customer_wise_price_list(),
'bin_data': get_bin_data(pos_profile),
'pricing_rules': get_pricing_rule_data(doc),
'print_template': print_template,
'pos_profile': pos_profile,
'meta': get_meta()
}
def update_plc_conversion_rate(doc, pos_profile):
conversion_rate = 1.0
price_list_currency = frappe.get_cached_value("Price List", doc.selling_price_list, "currency")
if pos_profile.get("currency") != price_list_currency:
conversion_rate = get_exchange_rate(price_list_currency,
pos_profile.get("currency"), nowdate(), args="for_selling") or 1.0
return conversion_rate
def get_meta():
doctype_meta = {
'customer': frappe.get_meta('Customer'),
'invoice': frappe.get_meta('Sales Invoice')
}
for row in frappe.get_all('DocField', fields=['fieldname', 'options'],
filters={'parent': 'Sales Invoice', 'fieldtype': 'Table'}):
doctype_meta[row.fieldname] = frappe.get_meta(row.options)
return doctype_meta
def get_company_data(company):
return frappe.get_all('Company', fields=["*"], filters={'name': company})[0]
def update_pos_profile_data(doc, pos_profile, company_data):
doc.campaign = pos_profile.get('campaign')
if pos_profile and not pos_profile.get('country'):
pos_profile.country = company_data.country
doc.write_off_account = pos_profile.get('write_off_account') or \
company_data.write_off_account
doc.change_amount_account = pos_profile.get('change_amount_account') or \
company_data.default_cash_account
doc.taxes_and_charges = pos_profile.get('taxes_and_charges')
if doc.taxes_and_charges:
update_tax_table(doc)
doc.currency = pos_profile.get('currency') or company_data.default_currency
doc.conversion_rate = 1.0
if doc.currency != company_data.default_currency:
doc.conversion_rate = get_exchange_rate(doc.currency, company_data.default_currency, doc.posting_date, args="for_selling")
doc.selling_price_list = pos_profile.get('selling_price_list') or \
frappe.db.get_value('Selling Settings', None, 'selling_price_list')
doc.naming_series = pos_profile.get('naming_series') or 'SINV-'
doc.letter_head = pos_profile.get('letter_head') or company_data.default_letter_head
doc.ignore_pricing_rule = pos_profile.get('ignore_pricing_rule') or 0
doc.apply_discount_on = pos_profile.get('apply_discount_on') or 'Grand Total'
doc.customer_group = pos_profile.get('customer_group') or get_root('Customer Group')
doc.territory = pos_profile.get('territory') or get_root('Territory')
doc.terms = frappe.db.get_value('Terms and Conditions', pos_profile.get('tc_name'), 'terms') or doc.terms or ''
doc.offline_pos_name = ''
def get_root(table):
root = frappe.db.sql(""" select name from `tab%(table)s` having
min(lft)""" % {'table': table}, as_dict=1)
return root[0].name
def update_multi_mode_option(doc, pos_profile):
from frappe.model import default_fields
if not pos_profile or not pos_profile.get('payments'):
for payment in get_mode_of_payment(doc):
payments = doc.append('payments', {})
payments.mode_of_payment = payment.parent
payments.account = payment.default_account
payments.type = payment.type
return
for payment_mode in pos_profile.payments:
payment_mode = payment_mode.as_dict()
for fieldname in default_fields:
if fieldname in payment_mode:
del payment_mode[fieldname]
doc.append('payments', payment_mode)
def get_mode_of_payment(doc):
return frappe.db.sql("""
select mpa.default_account, mpa.parent, mp.type as type
from `tabMode of Payment Account` mpa,`tabMode of Payment` mp
where mpa.parent = mp.name and mpa.company = %(company)s and mp.enabled = 1""",
{'company': doc.company}, as_dict=1)
def update_tax_table(doc):
taxes = get_taxes_and_charges('Sales Taxes and Charges Template', doc.taxes_and_charges)
for tax in taxes:
doc.append('taxes', tax)
def get_items_list(pos_profile, company):
cond = ""
args_list = []
if pos_profile.get('item_groups'):
# Get items based on the item groups defined in the POS profile
for d in pos_profile.get('item_groups'):
args_list.extend([d.name for d in get_child_nodes('Item Group', d.item_group)])
if args_list:
cond = "and i.item_group in (%s)" % (', '.join(['%s'] * len(args_list)))
return frappe.db.sql("""
select
i.name, i.item_code, i.item_name, i.description, i.item_group, i.has_batch_no,
i.has_serial_no, i.is_stock_item, i.brand, i.stock_uom, i.image,
id.expense_account, id.selling_cost_center, id.default_warehouse,
i.sales_uom, c.conversion_factor, it.item_tax_template, it.valid_from
from
`tabItem` i
left join `tabItem Default` id on id.parent = i.name and id.company = %s
left join `tabItem Tax` it on it.parent = i.name
left join `tabUOM Conversion Detail` c on i.name = c.parent and i.sales_uom = c.uom
where
i.disabled = 0 and i.has_variants = 0 and i.is_sales_item = 1
{cond}
group by i.item_code
""".format(cond=cond), tuple([company] + args_list), as_dict=1)
def get_item_groups(pos_profile):
item_group_dict = {}
item_groups = frappe.db.sql("""Select name,
lft, rgt from `tabItem Group` order by lft""", as_dict=1)
for data in item_groups:
item_group_dict[data.name] = [data.lft, data.rgt]
return item_group_dict
def get_customers_list(pos_profile={}):
cond = "1=1"
customer_groups = []
if pos_profile.get('customer_groups'):
# Get customers based on the customer groups defined in the POS profile
for d in pos_profile.get('customer_groups'):
customer_groups.extend([d.get('name') for d in get_child_nodes('Customer Group', d.get('customer_group'))])
cond = "customer_group in (%s)" % (', '.join(['%s'] * len(customer_groups)))
return frappe.db.sql(""" select name, customer_name, customer_group,
territory, customer_pos_id from tabCustomer where disabled = 0
and {cond}""".format(cond=cond), tuple(customer_groups), as_dict=1) or {}
def get_customers_address(customers):
customer_address = {}
if isinstance(customers, string_types):
customers = [frappe._dict({'name': customers})]
for data in customers:
address = frappe.db.sql(""" select name, address_line1, address_line2, city, state,
email_id, phone, fax, pincode from `tabAddress` where is_primary_address =1 and name in
(select parent from `tabDynamic Link` where link_doctype = 'Customer' and link_name = %s
and parenttype = 'Address')""", data.name, as_dict=1)
address_data = {}
if address:
address_data = address[0]
address_data.update({'full_name': data.customer_name, 'customer_pos_id': data.customer_pos_id})
customer_address[data.name] = address_data
return customer_address
def get_contacts(customers):
customer_contact = {}
if isinstance(customers, string_types):
customers = [frappe._dict({'name': customers})]
for data in customers:
contact = frappe.db.sql(""" select email_id, phone, mobile_no from `tabContact`
where is_primary_contact=1 and name in
(select parent from `tabDynamic Link` where link_doctype = 'Customer' and link_name = %s
and parenttype = 'Contact')""", data.name, as_dict=1)
if contact:
customer_contact[data.name] = contact[0]
return customer_contact
def get_child_nodes(group_type, root):
lft, rgt = frappe.db.get_value(group_type, root, ["lft", "rgt"])
return frappe.db.sql(""" Select name, lft, rgt from `tab{tab}` where
lft >= {lft} and rgt <= {rgt} order by lft""".format(tab=group_type, lft=lft, rgt=rgt), as_dict=1)
def get_serial_no_data(pos_profile, company):
# get itemwise serial no data
# example {'Nokia Lumia 1020': {'SN0001': 'Pune'}}
# where Nokia Lumia 1020 is item code, SN0001 is serial no and Pune is warehouse
cond = "1=1"
if pos_profile.get('update_stock') and pos_profile.get('warehouse'):
cond = "warehouse = %(warehouse)s"
serial_nos = frappe.db.sql("""select name, warehouse, item_code
from `tabSerial No` where {0} and company = %(company)s """.format(cond),{
'company': company, 'warehouse': frappe.db.escape(pos_profile.get('warehouse'))
}, as_dict=1)
itemwise_serial_no = {}
for sn in serial_nos:
if sn.item_code not in itemwise_serial_no:
itemwise_serial_no.setdefault(sn.item_code, {})
itemwise_serial_no[sn.item_code][sn.name] = sn.warehouse
return itemwise_serial_no
def get_batch_no_data():
# get itemwise batch no data
# exmaple: {'LED-GRE': [Batch001, Batch002]}
# where LED-GRE is item code, SN0001 is serial no and Pune is warehouse
itemwise_batch = {}
batches = frappe.db.sql("""select name, item from `tabBatch`
where ifnull(expiry_date, '4000-10-10') >= curdate()""", as_dict=1)
for batch in batches:
if batch.item not in itemwise_batch:
itemwise_batch.setdefault(batch.item, [])
itemwise_batch[batch.item].append(batch.name)
return itemwise_batch
def get_barcode_data(items_list):
# get itemwise batch no data
# exmaple: {'LED-GRE': [Batch001, Batch002]}
# where LED-GRE is item code, SN0001 is serial no and Pune is warehouse
itemwise_barcode = {}
for item in items_list:
barcodes = frappe.db.sql("""
select barcode from `tabItem Barcode` where parent = %s
""", item.item_code, as_dict=1)
for barcode in barcodes:
if item.item_code not in itemwise_barcode:
itemwise_barcode.setdefault(item.item_code, [])
itemwise_barcode[item.item_code].append(barcode.get("barcode"))
return itemwise_barcode
def get_item_tax_data():
# get default tax of an item
# example: {'Consulting Services': {'Excise 12 - TS': '12.000'}}
itemwise_tax = {}
taxes = frappe.db.sql(""" select parent, tax_type, tax_rate from `tabItem Tax Template Detail`""", as_dict=1)
for tax in taxes:
if tax.parent not in itemwise_tax:
itemwise_tax.setdefault(tax.parent, {})
itemwise_tax[tax.parent][tax.tax_type] = tax.tax_rate
return itemwise_tax
def get_price_list_data(selling_price_list, conversion_rate):
itemwise_price_list = {}
price_lists = frappe.db.sql("""Select ifnull(price_list_rate, 0) as price_list_rate,
item_code from `tabItem Price` ip where price_list = %(price_list)s""",
{'price_list': selling_price_list}, as_dict=1)
for item in price_lists:
itemwise_price_list[item.item_code] = item.price_list_rate * conversion_rate
return itemwise_price_list
def get_customer_wise_price_list():
customer_wise_price = {}
customer_price_list_mapping = frappe._dict(frappe.get_all('Customer',fields = ['default_price_list', 'name'], as_list=1))
price_lists = frappe.db.sql(""" Select ifnull(price_list_rate, 0) as price_list_rate,
item_code, price_list from `tabItem Price` """, as_dict=1)
for item in price_lists:
if item.price_list and customer_price_list_mapping.get(item.price_list):
customer_wise_price.setdefault(customer_price_list_mapping.get(item.price_list),{}).setdefault(
item.item_code, item.price_list_rate
)
return customer_wise_price
def get_bin_data(pos_profile):
itemwise_bin_data = {}
filters = { 'actual_qty': ['>', 0] }
if pos_profile.get('warehouse'):
filters.update({ 'warehouse': pos_profile.get('warehouse') })
bin_data = frappe.db.get_all('Bin', fields = ['item_code', 'warehouse', 'actual_qty'], filters=filters)
for bins in bin_data:
if bins.item_code not in itemwise_bin_data:
itemwise_bin_data.setdefault(bins.item_code, {})
itemwise_bin_data[bins.item_code][bins.warehouse] = bins.actual_qty
return itemwise_bin_data
def get_pricing_rule_data(doc):
pricing_rules = ""
if doc.ignore_pricing_rule == 0:
pricing_rules = frappe.db.sql(""" Select * from `tabPricing Rule` where docstatus < 2
and ifnull(for_price_list, '') in (%(price_list)s, '') and selling = 1
and ifnull(company, '') in (%(company)s, '') and disable = 0 and %(date)s
between ifnull(valid_from, '2000-01-01') and ifnull(valid_upto, '2500-12-31')
order by priority desc, name desc""",
{'company': doc.company, 'price_list': doc.selling_price_list, 'date': nowdate()}, as_dict=1)
return pricing_rules
@frappe.whitelist()
def make_invoice(pos_profile, doc_list={}, email_queue_list={}, customers_list={}):
import json
if isinstance(doc_list, string_types):
doc_list = json.loads(doc_list)
if isinstance(email_queue_list, string_types):
email_queue_list = json.loads(email_queue_list)
if isinstance(customers_list, string_types):
customers_list = json.loads(customers_list)
customers_list = make_customer_and_address(customers_list)
name_list = []
for docs in doc_list:
for name, doc in iteritems(docs):
if not frappe.db.exists('Sales Invoice', {'offline_pos_name': name}):
if isinstance(doc, dict):
validate_records(doc)
si_doc = frappe.new_doc('Sales Invoice')
si_doc.offline_pos_name = name
si_doc.update(doc)
si_doc.set_posting_time = 1
si_doc.customer = get_customer_id(doc)
si_doc.due_date = doc.get('posting_date')
name_list = submit_invoice(si_doc, name, doc, name_list)
else:
doc.due_date = doc.get('posting_date')
doc.customer = get_customer_id(doc)
doc.set_posting_time = 1
doc.offline_pos_name = name
name_list = submit_invoice(doc, name, doc, name_list)
else:
name_list.append(name)
email_queue = make_email_queue(email_queue_list)
if isinstance(pos_profile, string_types):
pos_profile = json.loads(pos_profile)
customers = get_customers_list(pos_profile)
return {
'invoice': name_list,
'email_queue': email_queue,
'customers': customers_list,
'synced_customers_list': customers,
'synced_address': get_customers_address(customers),
'synced_contacts': get_contacts(customers)
}
def validate_records(doc):
validate_item(doc)
def get_customer_id(doc, customer=None):
cust_id = None
if doc.get('customer_pos_id'):
cust_id = frappe.db.get_value('Customer',{'customer_pos_id': doc.get('customer_pos_id')}, 'name')
if not cust_id:
customer = customer or doc.get('customer')
if frappe.db.exists('Customer', customer):
cust_id = customer
else:
cust_id = add_customer(doc)
return cust_id
def make_customer_and_address(customers):
customers_list = []
for customer, data in iteritems(customers):
data = json.loads(data)
cust_id = get_customer_id(data, customer)
if not cust_id:
cust_id = add_customer(data)
else:
frappe.db.set_value("Customer", cust_id, "customer_name", data.get('full_name'))
make_contact(data, cust_id)
make_address(data, cust_id)
customers_list.append(customer)
frappe.db.commit()
return customers_list
def add_customer(data):
customer = data.get('full_name') or data.get('customer')
if frappe.db.exists("Customer", customer.strip()):
return customer.strip()
customer_doc = frappe.new_doc('Customer')
customer_doc.customer_name = data.get('full_name') or data.get('customer')
customer_doc.customer_pos_id = data.get('customer_pos_id')
customer_doc.customer_type = 'Company'
customer_doc.customer_group = get_customer_group(data)
customer_doc.territory = get_territory(data)
customer_doc.flags.ignore_mandatory = True
customer_doc.save(ignore_permissions=True)
frappe.db.commit()
return customer_doc.name
def get_territory(data):
if data.get('territory'):
return data.get('territory')
return frappe.db.get_single_value('Selling Settings','territory') or _('All Territories')
def get_customer_group(data):
if data.get('customer_group'):
return data.get('customer_group')
return frappe.db.get_single_value('Selling Settings', 'customer_group') or frappe.db.get_value('Customer Group', {'is_group': 0}, 'name')
def make_contact(args, customer):
if args.get('email_id') or args.get('phone'):
name = frappe.db.get_value('Dynamic Link',
{'link_doctype': 'Customer', 'link_name': customer, 'parenttype': 'Contact'}, 'parent')
args = {
'first_name': args.get('full_name'),
'email_id': args.get('email_id'),
'phone': args.get('phone')
}
doc = frappe.new_doc('Contact')
if name:
doc = frappe.get_doc('Contact', name)
doc.update(args)
doc.is_primary_contact = 1
if not name:
doc.append('links', {
'link_doctype': 'Customer',
'link_name': customer
})
doc.flags.ignore_mandatory = True
doc.save(ignore_permissions=True)
def make_address(args, customer):
if not args.get('address_line1'):
return
name = args.get('name')
if not name:
data = get_customers_address(customer)
name = data[customer].get('name') if data else None
if name:
address = frappe.get_doc('Address', name)
else:
address = frappe.new_doc('Address')
if args.get('company'):
address.country = frappe.get_cached_value('Company',
args.get('company'), 'country')
address.append('links', {
'link_doctype': 'Customer',
'link_name': customer
})
address.is_primary_address = 1
address.is_shipping_address = 1
address.update(args)
address.flags.ignore_mandatory = True
address.save(ignore_permissions=True)
def make_email_queue(email_queue):
name_list = []
for key, data in iteritems(email_queue):
name = frappe.db.get_value('Sales Invoice', {'offline_pos_name': key}, 'name')
if not name: continue
data = json.loads(data)
sender = frappe.session.user
print_format = "POS Invoice" if not cint(frappe.db.get_value('Print Format', 'POS Invoice', 'disabled')) else None
attachments = [frappe.attach_print('Sales Invoice', name, print_format=print_format)]
make(subject=data.get('subject'), content=data.get('content'), recipients=data.get('recipients'),
sender=sender, attachments=attachments, send_email=True,
doctype='Sales Invoice', name=name)
name_list.append(key)
return name_list
def validate_item(doc):
for item in doc.get('items'):
if not frappe.db.exists('Item', item.get('item_code')):
item_doc = frappe.new_doc('Item')
item_doc.name = item.get('item_code')
item_doc.item_code = item.get('item_code')
item_doc.item_name = item.get('item_name')
item_doc.description = item.get('description')
item_doc.stock_uom = item.get('stock_uom')
item_doc.uom = item.get('uom')
item_doc.item_group = item.get('item_group')
item_doc.append('item_defaults', {
"company": doc.get("company"),
"default_warehouse": item.get('warehouse')
})
item_doc.save(ignore_permissions=True)
frappe.db.commit()
def submit_invoice(si_doc, name, doc, name_list):
try:
si_doc.insert()
si_doc.submit()
frappe.db.commit()
name_list.append(name)
except Exception as e:
if frappe.message_log:
frappe.message_log.pop()
frappe.db.rollback()
frappe.log_error(frappe.get_traceback())
name_list = save_invoice(doc, name, name_list)
return name_list
def save_invoice(doc, name, name_list):
try:
if not frappe.db.exists('Sales Invoice', {'offline_pos_name': name}):
si = frappe.new_doc('Sales Invoice')
si.update(doc)
si.set_posting_time = 1
si.customer = get_customer_id(doc)
si.due_date = doc.get('posting_date')
si.flags.ignore_mandatory = True
si.insert(ignore_permissions=True)
frappe.db.commit()
name_list.append(name)
except Exception:
frappe.db.rollback()
frappe.log_error(frappe.get_traceback())
return name_list
|
neilLasrado/erpnext
|
erpnext/accounts/doctype/sales_invoice/pos.py
|
Python
|
gpl-3.0
| 21,154 | 0.02496 |
#!/usr/bin/env python
# Copyright (C) 2010-2011 Hideo Hattori
# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
# Copyright (C) 2013-2015 Hideo Hattori, Steven Myint, Bill Wendling
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Automatically formats Python code to conform to the PEP 8 style guide.
Fixes that only need be done once can be added by adding a function of the form
"fix_<code>(source)" to this module. They should return the fixed source code.
These fixes are picked up by apply_global_fixes().
Fixes that depend on pep8 should be added as methods to FixPEP8. See the class
documentation for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import collections
import copy
import difflib
import fnmatch
import inspect
import io
import keyword
import locale
import os
import re
import signal
import sys
import textwrap
import token
import tokenize
import pep8
try:
unicode
except NameError:
unicode = str
__version__ = '1.2.1a0'
CR = '\r'
LF = '\n'
CRLF = '\r\n'
PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
# For generating line shortening candidates.
SHORTEN_OPERATOR_GROUPS = frozenset([
frozenset([',']),
frozenset(['%']),
frozenset([',', '(', '[', '{']),
frozenset(['%', '(', '[', '{']),
frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
frozenset(['%', '+', '-', '*', '/', '//']),
])
DEFAULT_IGNORE = 'E24'
DEFAULT_INDENT_SIZE = 4
# W602 is handled separately due to the need to avoid "with_traceback".
CODE_TO_2TO3 = {
'E231': ['ws_comma'],
'E721': ['idioms'],
'W601': ['has_key'],
'W603': ['ne'],
'W604': ['repr'],
'W690': ['apply',
'except',
'exitfunc',
'numliterals',
'operator',
'paren',
'reduce',
'renames',
'standarderror',
'sys_exc',
'throw',
'tuple_params',
'xreadlines']}
if sys.platform == 'win32': # pragma: no cover
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
def open_with_encoding(filename, encoding=None, mode='r'):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename)
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
def detect_encoding(filename):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
# Check for correctness of encoding
with open_with_encoding(filename, encoding) as test_file:
test_file.read()
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
def readlines_from_file(filename):
"""Return contents of file."""
with open_with_encoding(filename) as input_file:
return input_file.readlines()
def extended_blank_lines(logical_line,
blank_lines,
blank_before,
indent_level,
previous_logical):
"""Check for missing blank lines after class declaration."""
if previous_logical.startswith('class '):
if logical_line.startswith(('def ', 'class ', '@')):
if indent_level and not blank_lines and not blank_before:
yield (0, 'E309 expected 1 blank line after class declaration')
elif previous_logical.startswith('def '):
if blank_lines and pep8.DOCSTRING_REGEX.match(logical_line):
yield (0, 'E303 too many blank lines ({0})'.format(blank_lines))
elif pep8.DOCSTRING_REGEX.match(previous_logical):
# Missing blank line between class docstring and method declaration.
if (
indent_level and
not blank_lines and
not blank_before and
logical_line.startswith(('def ')) and
'(self' in logical_line
):
yield (0, 'E301 expected 1 blank line, found 0')
pep8.register_check(extended_blank_lines)
def continued_indentation(logical_line, tokens, indent_level, indent_char,
noqa):
"""Override pep8's function to provide indentation information."""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented. Assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line. In turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (
(DEFAULT_INDENT_SIZE,)
if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
2 * DEFAULT_INDENT_SIZE)
)
# Remember how many brackets were opened on each line.
parens = [0] * nrows
# Relative indents of physical lines.
rel_indent = [0] * nrows
# For each depth, collect a list of opening rows.
open_rows = [[0]]
# For each depth, memorize the hanging indentation.
hangs = [None]
# Visual indents.
indent_chances = {}
last_indent = tokens[0][2]
indent = [last_indent[1]]
last_token_multiline = None
line = None
last_line = ''
last_line_begins_with_multiline = False
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
last_line_begins_with_multiline = last_token_multiline
if newline:
# This is the beginning of a continuation line.
last_indent = start
# Record the initial indent.
rel_indent[row] = pep8.expand_indent(line) - indent_level
# Identify closing bracket.
close_bracket = (token_type == tokenize.OP and text in ']})')
# Is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# Closing bracket for visual indent.
if start[1] != indent[depth]:
yield (start, 'E124 {0}'.format(indent[depth]))
elif close_bracket and not hang:
pass
elif indent[depth] and start[1] < indent[depth]:
# Visual indent is broken.
yield (start, 'E128 {0}'.format(indent[depth]))
elif (hanging_indent or
(indent_next and
rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)):
# Hanging indent is verified.
if close_bracket:
yield (start, 'E123 {0}'.format(indent_level +
rel_indent[open_row]))
hangs[depth] = hang
elif visual_indent is True:
# Visual indent is verified.
indent[depth] = start[1]
elif visual_indent in (text, unicode):
# Ignore token lined up with matching one from a previous line.
pass
else:
one_indented = (indent_level + rel_indent[open_row] +
DEFAULT_INDENT_SIZE)
# Indent is broken.
if hang <= 0:
error = ('E122', one_indented)
elif indent[depth]:
error = ('E127', indent[depth])
elif hang > DEFAULT_INDENT_SIZE:
error = ('E126', one_indented)
else:
hangs[depth] = hang
error = ('E121', one_indented)
yield (start, '{0} {1}'.format(*error))
# Look for visual indenting.
if (
parens[row] and
token_type not in (tokenize.NL, tokenize.COMMENT) and
not indent[depth]
):
indent[depth] = start[1]
indent_chances[start[1]] = True
# Deal with implicit string concatenation.
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = unicode
# Special case for the "if" statement because len("if (") is equal to
# 4.
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# Keep track of bracket depth.
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
elif text in ')]}' and depth > 0:
# Parent indents should not be more than this one.
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if (
start[1] not in indent_chances and
# This is for purposes of speeding up E121 (GitHub #90).
not last_line.rstrip().endswith(',')
):
# Allow to line up tokens.
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
last_line = line
if (
indent_next and
not last_line_begins_with_multiline and
pep8.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE
):
pos = (start[0], indent[0] + 4)
yield (pos, 'E125 {0}'.format(indent_level +
2 * DEFAULT_INDENT_SIZE))
del pep8._checks['logical_line'][pep8.continued_indentation]
pep8.register_check(continued_indentation)
class FixPEP8(object):
"""Fix invalid code.
Fixer methods are prefixed "fix_". The _fix_source() method looks for these
automatically.
The fixer method can take either one or two arguments (in addition to
self). The first argument is "result", which is the error information from
pep8. The second argument, "logical", is required only for logical-line
fixes.
The fixer method can return the list of modified lines or None. An empty
list would mean that no changes were made. None would mean that only the
line reported in the pep8 error was modified. Note that the modified line
numbers that are returned are indexed at 1. This typically would correspond
with the line number reported in the pep8 error information.
[fixed method list]
- e121,e122,e123,e124,e125,e126,e127,e128,e129
- e201,e202,e203
- e211
- e221,e222,e223,e224,e225
- e231
- e251
- e261,e262
- e271,e272,e273,e274
- e301,e302,e303
- e401
- e502
- e701,e702
- e711
- w291
"""
def __init__(self, filename,
options,
contents=None,
long_line_ignore_cache=None):
self.filename = filename
if contents is None:
self.source = readlines_from_file(filename)
else:
sio = io.StringIO(contents)
self.source = sio.readlines()
self.options = options
self.indent_word = _get_indentword(''.join(self.source))
self.long_line_ignore_cache = (
set() if long_line_ignore_cache is None
else long_line_ignore_cache)
# Many fixers are the same even though pep8 categorizes them
# differently.
self.fix_e115 = self.fix_e112
self.fix_e116 = self.fix_e113
self.fix_e121 = self._fix_reindent
self.fix_e122 = self._fix_reindent
self.fix_e123 = self._fix_reindent
self.fix_e124 = self._fix_reindent
self.fix_e126 = self._fix_reindent
self.fix_e127 = self._fix_reindent
self.fix_e128 = self._fix_reindent
self.fix_e129 = self._fix_reindent
self.fix_e202 = self.fix_e201
self.fix_e203 = self.fix_e201
self.fix_e211 = self.fix_e201
self.fix_e221 = self.fix_e271
self.fix_e222 = self.fix_e271
self.fix_e223 = self.fix_e271
self.fix_e226 = self.fix_e225
self.fix_e227 = self.fix_e225
self.fix_e228 = self.fix_e225
self.fix_e241 = self.fix_e271
self.fix_e242 = self.fix_e224
self.fix_e261 = self.fix_e262
self.fix_e272 = self.fix_e271
self.fix_e273 = self.fix_e271
self.fix_e274 = self.fix_e271
self.fix_e309 = self.fix_e301
self.fix_e501 = (
self.fix_long_line_logically if
options and (options.aggressive >= 2 or options.experimental) else
self.fix_long_line_physically)
self.fix_e703 = self.fix_e702
self.fix_w293 = self.fix_w291
def _fix_source(self, results):
try:
(logical_start, logical_end) = _find_logical(self.source)
logical_support = True
except (SyntaxError, tokenize.TokenError): # pragma: no cover
logical_support = False
completed_lines = set()
for result in sorted(results, key=_priority_key):
if result['line'] in completed_lines:
continue
fixed_methodname = 'fix_' + result['id'].lower()
if hasattr(self, fixed_methodname):
fix = getattr(self, fixed_methodname)
line_index = result['line'] - 1
original_line = self.source[line_index]
is_logical_fix = len(inspect.getargspec(fix).args) > 2
if is_logical_fix:
logical = None
if logical_support:
logical = _get_logical(self.source,
result,
logical_start,
logical_end)
if logical and set(range(
logical[0][0] + 1,
logical[1][0] + 1)).intersection(
completed_lines):
continue
modified_lines = fix(result, logical)
else:
modified_lines = fix(result)
if modified_lines is None:
# Force logical fixes to report what they modified.
assert not is_logical_fix
if self.source[line_index] == original_line:
modified_lines = []
if modified_lines:
completed_lines.update(modified_lines)
elif modified_lines == []: # Empty list means no fix
if self.options.verbose >= 2:
print(
'---> Not fixing {f} on line {l}'.format(
f=result['id'], l=result['line']),
file=sys.stderr)
else: # We assume one-line fix when None.
completed_lines.add(result['line'])
else:
if self.options.verbose >= 3:
print(
"---> '{0}' is not defined.".format(fixed_methodname),
file=sys.stderr)
info = result['info'].strip()
print('---> {0}:{1}:{2}:{3}'.format(self.filename,
result['line'],
result['column'],
info),
file=sys.stderr)
def fix(self):
"""Return a version of the source code with PEP 8 violations fixed."""
pep8_options = {
'ignore': self.options.ignore,
'select': self.options.select,
'max_line_length': self.options.max_line_length,
}
results = _execute_pep8(pep8_options, self.source)
if self.options.verbose:
progress = {}
for r in results:
if r['id'] not in progress:
progress[r['id']] = set()
progress[r['id']].add(r['line'])
print('---> {n} issue(s) to fix {progress}'.format(
n=len(results), progress=progress), file=sys.stderr)
if self.options.line_range:
start, end = self.options.line_range
results = [r for r in results
if start <= r['line'] <= end]
self._fix_source(filter_results(source=''.join(self.source),
results=results,
aggressive=self.options.aggressive))
if self.options.line_range:
# If number of lines has changed then change line_range.
count = sum(sline.count('\n')
for sline in self.source[start - 1:end])
self.options.line_range[1] = start + count - 1
return ''.join(self.source)
def _fix_reindent(self, result):
"""Fix a badly indented line.
This is done by adding or removing from its initial indent only.
"""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
def fix_e112(self, result):
"""Fix under-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
if not target.lstrip().startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = self.indent_word + target
def fix_e113(self, result):
"""Fix over-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
indent = _get_indentation(target)
stripped = target.lstrip()
if not stripped.startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = indent[1:] + stripped
def fix_e125(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
indent = len(_get_indentation(target))
modified_lines = []
while len(_get_indentation(self.source[line_index])) >= indent:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
modified_lines.append(1 + line_index) # Line indexed at 1.
line_index -= 1
return modified_lines
def fix_e201(self, result):
"""Remove extraneous whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement='')
self.source[line_index] = fixed
def fix_e224(self, result):
"""Remove extraneous whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + target[offset:].replace('\t', ' ')
self.source[result['line'] - 1] = fixed
def fix_e225(self, result):
"""Fix missing whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + ' ' + target[offset:]
# Only proceed if non-whitespace characters match.
# And make sure we don't break the indentation.
if (
fixed.replace(' ', '') == target.replace(' ', '') and
_get_indentation(fixed) == _get_indentation(target)
):
self.source[result['line'] - 1] = fixed
else:
return []
def fix_e231(self, result):
"""Add missing whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column']
fixed = target[:offset] + ' ' + target[offset:]
self.source[line_index] = fixed
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pep8 sometimes reports columns that goes
# past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed
def fix_e262(self, result):
"""Fix spacing after comment hash."""
target = self.source[result['line'] - 1]
offset = result['column']
code = target[:offset].rstrip(' \t#')
comment = target[offset:].lstrip(' \t#')
fixed = code + (' # ' + comment if comment.strip() else '\n')
self.source[result['line'] - 1] = fixed
def fix_e271(self, result):
"""Fix extraneous whitespace around keywords."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement=' ')
if fixed == target:
return []
else:
self.source[line_index] = fixed
def fix_e301(self, result):
"""Add missing blank line."""
cr = '\n'
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e302(self, result):
"""Add missing 2 blank lines."""
add_linenum = 2 - int(result['info'].split()[-1])
cr = '\n' * add_linenum
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e303(self, result):
"""Remove extra blank lines."""
delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
delete_linenum = max(1, delete_linenum)
# We need to count because pep8 reports an offset line number if there
# are comments.
cnt = 0
line = result['line'] - 2
modified_lines = []
while cnt < delete_linenum and line >= 0:
if not self.source[line].strip():
self.source[line] = ''
modified_lines.append(1 + line) # Line indexed at 1
cnt += 1
line -= 1
return modified_lines
def fix_e304(self, result):
"""Remove blank line following function decorator."""
line = result['line'] - 2
if not self.source[line].strip():
self.source[line] = ''
def fix_e401(self, result):
"""Put imports on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if not target.lstrip().startswith('import'):
return []
indentation = re.split(pattern=r'\bimport\b',
string=target, maxsplit=1)[0]
fixed = (target[:offset].rstrip('\t ,') + '\n' +
indentation + 'import ' + target[offset:].lstrip('\t ,'))
self.source[line_index] = fixed
def fix_long_line_logically(self, result, logical):
"""Try to make lines fit within --max-line-length characters."""
if (
not logical or
len(logical[2]) == 1 or
self.source[result['line'] - 1].lstrip().startswith('#')
):
return self.fix_long_line_physically(result)
start_line_index = logical[0][0]
end_line_index = logical[1][0]
logical_lines = logical[2]
previous_line = get_item(self.source, start_line_index - 1, default='')
next_line = get_item(self.source, end_line_index + 1, default='')
single_line = join_logical_line(''.join(logical_lines))
try:
fixed = self.fix_long_line(
target=single_line,
previous_line=previous_line,
next_line=next_line,
original=''.join(logical_lines))
except (SyntaxError, tokenize.TokenError):
return self.fix_long_line_physically(result)
if fixed:
for line_index in range(start_line_index, end_line_index + 1):
self.source[line_index] = ''
self.source[start_line_index] = fixed
return range(start_line_index + 1, end_line_index + 1)
else:
return []
def fix_long_line_physically(self, result):
"""Try to make lines fit within --max-line-length characters."""
line_index = result['line'] - 1
target = self.source[line_index]
previous_line = get_item(self.source, line_index - 1, default='')
next_line = get_item(self.source, line_index + 1, default='')
try:
fixed = self.fix_long_line(
target=target,
previous_line=previous_line,
next_line=next_line,
original=target)
except (SyntaxError, tokenize.TokenError):
return []
if fixed:
self.source[line_index] = fixed
return [line_index + 1]
else:
return []
def fix_long_line(self, target, previous_line,
next_line, original):
cache_entry = (target, previous_line, next_line)
if cache_entry in self.long_line_ignore_cache:
return []
if target.lstrip().startswith('#'):
# Wrap commented lines.
return shorten_comment(
line=target,
max_line_length=self.options.max_line_length,
last_comment=not next_line.lstrip().startswith('#'))
fixed = get_fixed_long_line(
target=target,
previous_line=previous_line,
original=original,
indent_word=self.indent_word,
max_line_length=self.options.max_line_length,
aggressive=self.options.aggressive,
experimental=self.options.experimental,
verbose=self.options.verbose)
if fixed and not code_almost_equal(original, fixed):
return fixed
else:
self.long_line_ignore_cache.add(cache_entry)
return None
def fix_e502(self, result):
"""Remove extraneous escape of newline."""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
def fix_e701(self, result):
"""Put colon-separated compound statement on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
c = result['column']
fixed_source = (target[:c] + '\n' +
_get_indentation(target) + self.indent_word +
target[c:].lstrip('\n\r \t\\'))
self.source[result['line'] - 1] = fixed_source
return [result['line'], result['line'] + 1]
def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
# find inline commnet
inline_comment = None
if '# ' == target[offset:].lstrip(';').lstrip()[:2]:
inline_comment = target[offset:].lstrip(';')
if inline_comment:
self.source[line_index] = first + inline_comment
else:
self.source[line_index] = first + '\n' + second
return [line_index + 1]
def fix_e711(self, result):
"""Fix comparison with None."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
if not right.startswith('None'):
return []
if center.strip() == '==':
new_center = 'is'
elif center.strip() == '!=':
new_center = 'is not'
else:
return []
self.source[line_index] = ' '.join([left, new_center, right])
def fix_e712(self, result):
"""Fix (trivial case of) comparison with boolean."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
# Handle very easy "not" special cases.
if re.match(r'^\s*if [\w.]+ == False:$', target):
self.source[line_index] = re.sub(r'if ([\w.]+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if [\w.]+ != True:$', target):
self.source[line_index] = re.sub(r'if ([\w.]+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right
def fix_e713(self, result):
"""Fix (trivial case of) non-membership check."""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
# Handle very easy case only.
if re.match(r'^\s*if not [\w.]+ in [\w.]+:$', target):
self.source[line_index] = re.sub(r'if not ([\w.]+) in ([\w.]+):',
r'if \1 not in \2:',
target,
count=1)
def fix_w291(self, result):
"""Remove trailing whitespace."""
fixed_line = self.source[result['line'] - 1].rstrip()
self.source[result['line'] - 1] = fixed_line + '\n'
def fix_w391(self, _):
"""Remove trailing blank lines."""
blank_count = 0
for line in reversed(self.source):
line = line.rstrip()
if line:
break
else:
blank_count += 1
original_length = len(self.source)
self.source = self.source[:original_length - blank_count]
return range(1, 1 + original_length)
def get_index_offset_contents(result, source):
"""Return (line_index, column_offset, line_contents)."""
line_index = result['line'] - 1
return (line_index,
result['column'] - 1,
source[line_index])
def get_fixed_long_line(target, previous_line, original,
indent_word=' ', max_line_length=79,
aggressive=False, experimental=False, verbose=False):
"""Break up long line and return result.
Do this by generating multiple reformatted candidates and then
ranking the candidates to heuristically select the best option.
"""
indent = _get_indentation(target)
source = target[len(indent):]
assert source.lstrip() == source
# Check for partial multiline.
tokens = list(generate_tokens(source))
candidates = shorten_line(
tokens, source, indent,
indent_word,
max_line_length,
aggressive=aggressive,
experimental=experimental,
previous_line=previous_line)
# Also sort alphabetically as a tie breaker (for determinism).
candidates = sorted(
sorted(set(candidates).union([target, original])),
key=lambda x: line_shortening_rank(
x,
indent_word,
max_line_length,
experimental=experimental))
if verbose >= 4:
print(('-' * 79 + '\n').join([''] + candidates + ['']),
file=wrap_output(sys.stderr, 'utf-8'))
if candidates:
best_candidate = candidates[0]
# Don't allow things to get longer.
if longest_line_length(best_candidate) > longest_line_length(original):
return None
else:
return best_candidate
def longest_line_length(code):
"""Return length of longest line."""
return max(len(line) for line in code.splitlines())
def join_logical_line(logical_line):
"""Return single line based on logical line input."""
indentation = _get_indentation(logical_line)
return indentation + untokenize_without_newlines(
generate_tokens(logical_line.lstrip())) + '\n'
def untokenize_without_newlines(tokens):
"""Return source code based on tokens."""
text = ''
last_row = 0
last_column = -1
for t in tokens:
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
if start_row > last_row:
last_column = 0
if (
(start_column > last_column or token_string == '\n') and
not text.endswith(' ')
):
text += ' '
if token_string != '\n':
text += token_string
last_row = end_row
last_column = end_column
return text.rstrip()
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
def _get_logical(source_lines, result, logical_start, logical_end):
"""Return the logical line corresponding to the result.
Assumes input is already E702-clean.
"""
row = result['line'] - 1
col = result['column'] - 1
ls = None
le = None
for i in range(0, len(logical_start), 1):
assert logical_end
x = logical_end[i]
if x[0] > row or (x[0] == row and x[1] > col):
le = x
ls = logical_start[i]
break
if ls is None:
return None
original = source_lines[ls[0]:le[0] + 1]
return ls, le, original
def get_item(items, index, default=None):
if 0 <= index < len(items):
return items[index]
else:
return default
def reindent(source, indent_size):
"""Reindent all lines."""
reindenter = Reindenter(source)
return reindenter.run(indent_size)
def code_almost_equal(a, b):
"""Return True if code is similar.
Ignore whitespace when comparing specific line.
"""
split_a = split_and_strip_non_empty_lines(a)
split_b = split_and_strip_non_empty_lines(b)
if len(split_a) != len(split_b):
return False
for index in range(len(split_a)):
if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
return False
return True
def split_and_strip_non_empty_lines(text):
"""Return lines split by newline.
Ignore empty lines.
"""
return [line.strip() for line in text.splitlines() if line.strip()]
def fix_e265(source, aggressive=False): # pylint: disable=unused-argument
"""Format block comments."""
if '#' not in source:
# Optimization.
return source
ignored_line_numbers = multiline_string_lines(
source,
include_docstrings=True) | set(commented_out_code_lines(source))
fixed_lines = []
sio = io.StringIO(source)
for (line_number, line) in enumerate(sio.readlines(), start=1):
if (
line.lstrip().startswith('#') and
line_number not in ignored_line_numbers
):
indentation = _get_indentation(line)
line = line.lstrip()
# Normalize beginning if not a shebang.
if len(line) > 1:
pos = next((index for index, c in enumerate(line)
if c != '#'))
if (
# Leave multiple spaces like '# ' alone.
(line[:pos].count('#') > 1 or line[1].isalnum()) and
# Leave stylistic outlined blocks alone.
not line.rstrip().endswith('#')
):
line = '# ' + line.lstrip('# \t')
fixed_lines.append(indentation + line)
else:
fixed_lines.append(line)
return ''.join(fixed_lines)
def refactor(source, fixer_names, ignore=None, filename=''):
"""Return refactored code using lib2to3.
Skip if ignore string is produced in the refactored code.
"""
from lib2to3 import pgen2
try:
new_text = refactor_with_2to3(source,
fixer_names=fixer_names,
filename=filename)
except (pgen2.parse.ParseError,
SyntaxError,
UnicodeDecodeError,
UnicodeEncodeError):
return source
if ignore:
if ignore in new_text and ignore not in source:
return source
return new_text
def code_to_2to3(select, ignore):
fixes = set()
for code, fix in CODE_TO_2TO3.items():
if code_match(code, select=select, ignore=ignore):
fixes |= set(fix)
return fixes
def fix_2to3(source,
aggressive=True, select=None, ignore=None, filename=''):
"""Fix various deprecated code (via lib2to3)."""
if not aggressive:
return source
select = select or []
ignore = ignore or []
return refactor(source,
code_to_2to3(select=select,
ignore=ignore),
filename=filename)
def fix_w602(source, aggressive=True):
"""Fix deprecated form of raising exception."""
if not aggressive:
return source
return refactor(source, ['raise'],
ignore='with_traceback')
def find_newline(source):
"""Return type of newline used in source.
Input is a list of lines.
"""
assert not isinstance(source, unicode)
counter = collections.defaultdict(int)
for line in source:
if line.endswith(CRLF):
counter[CRLF] += 1
elif line.endswith(CR):
counter[CR] += 1
elif line.endswith(LF):
counter[LF] += 1
return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
def _get_indentword(source):
"""Return indentation type."""
indent_word = ' ' # Default in case source has no indentation
try:
for t in generate_tokens(source):
if t[0] == token.INDENT:
indent_word = t[1]
break
except (SyntaxError, tokenize.TokenError):
pass
return indent_word
def _get_indentation(line):
"""Return leading whitespace."""
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
return line[:non_whitespace_index]
else:
return ''
def get_diff_text(old, new, filename):
"""Return text of unified diff between old and new."""
newline = '\n'
diff = difflib.unified_diff(
old, new,
'original/' + filename,
'fixed/' + filename,
lineterm=newline)
text = ''
for line in diff:
text += line
# Work around missing newline (http://bugs.python.org/issue2142).
if text and not line.endswith(newline):
text += newline + r'\ No newline at end of file' + newline
return text
def _priority_key(pep8_result):
"""Key for sorting PEP8 results.
Global fixes should be done first. This is important for things like
indentation.
"""
priority = [
# Fix multiline colon-based before semicolon based.
'e701',
# Break multiline statements early.
'e702',
# Things that make lines longer.
'e225', 'e231',
# Remove extraneous whitespace before breaking lines.
'e201',
# Shorten whitespace in comment before resorting to wrapping.
'e262'
]
middle_index = 10000
lowest_priority = [
# We need to shorten lines last since the logical fixer can get in a
# loop, which causes us to exit early.
'e501'
]
key = pep8_result['id'].lower()
try:
return priority.index(key)
except ValueError:
try:
return middle_index + lowest_priority.index(key) + 1
except ValueError:
return middle_index
def shorten_line(tokens, source, indentation, indent_word, max_line_length,
aggressive=False, experimental=False, previous_line=''):
"""Separate line at OPERATOR.
Multiple candidates will be yielded.
"""
for candidate in _shorten_line(tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
aggressive=aggressive,
previous_line=previous_line):
yield candidate
if aggressive:
for key_token_strings in SHORTEN_OPERATOR_GROUPS:
shortened = _shorten_line_at_tokens(
tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
key_token_strings=key_token_strings,
aggressive=aggressive)
if shortened is not None and shortened != source:
yield shortened
if experimental:
for shortened in _shorten_line_at_tokens_new(
tokens=tokens,
source=source,
indentation=indentation,
max_line_length=max_line_length):
yield shortened
def _shorten_line(tokens, source, indentation, indent_word,
aggressive=False, previous_line=''):
"""Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
"""
for (token_type,
token_string,
start_offset,
end_offset) in token_offsets(tokens):
if (
token_type == tokenize.COMMENT and
not is_probably_part_of_multiline(previous_line) and
not is_probably_part_of_multiline(source) and
not source[start_offset + 1:].strip().lower().startswith(
('noqa', 'pragma:', 'pylint:'))
):
# Move inline comments to previous line.
first = source[:start_offset]
second = source[start_offset:]
yield (indentation + second.strip() + '\n' +
indentation + first.strip() + '\n')
elif token_type == token.OP and token_string != '=':
# Don't break on '=' after keyword as this violates PEP 8.
assert token_type != token.INDENT
first = source[:end_offset]
second_indent = indentation
if first.rstrip().endswith('('):
second_indent += indent_word
elif '(' in first:
second_indent += ' ' * (1 + first.find('('))
else:
second_indent += indent_word
second = (second_indent + source[end_offset:].lstrip())
if (
not second.strip() or
second.lstrip().startswith('#')
):
continue
# Do not begin a line with a comma
if second.lstrip().startswith(','):
continue
# Do end a line with a dot
if first.rstrip().endswith('.'):
continue
if token_string in '+-*/':
fixed = first + ' \\' + '\n' + second
else:
fixed = first + '\n' + second
# Only fix if syntax is okay.
if check_syntax(normalize_multiline(fixed)
if aggressive else fixed):
yield indentation + fixed
# A convenient way to handle tokens.
Token = collections.namedtuple('Token', ['token_type', 'token_string',
'spos', 'epos', 'line'])
class ReformattedLines(object):
"""The reflowed lines of atoms.
Each part of the line is represented as an "atom." They can be moved
around when need be to get the optimal formatting.
"""
###########################################################################
# Private Classes
class _Indent(object):
"""Represent an indentation in the atom stream."""
def __init__(self, indent_amt):
self._indent_amt = indent_amt
def emit(self):
return ' ' * self._indent_amt
@property
def size(self):
return self._indent_amt
class _Space(object):
"""Represent a space in the atom stream."""
def emit(self):
return ' '
@property
def size(self):
return 1
class _LineBreak(object):
"""Represent a line break in the atom stream."""
def emit(self):
return '\n'
@property
def size(self):
return 0
def __init__(self, max_line_length):
self._max_line_length = max_line_length
self._lines = []
self._bracket_depth = 0
self._prev_item = None
self._prev_prev_item = None
def __repr__(self):
return self.emit()
###########################################################################
# Public Methods
def add(self, obj, indent_amt, break_after_open_bracket):
if isinstance(obj, Atom):
self._add_item(obj, indent_amt)
return
self._add_container(obj, indent_amt, break_after_open_bracket)
def add_comment(self, item):
num_spaces = 2
if len(self._lines) > 1:
if isinstance(self._lines[-1], self._Space):
num_spaces -= 1
if len(self._lines) > 2:
if isinstance(self._lines[-2], self._Space):
num_spaces -= 1
while num_spaces > 0:
self._lines.append(self._Space())
num_spaces -= 1
self._lines.append(item)
def add_indent(self, indent_amt):
self._lines.append(self._Indent(indent_amt))
def add_line_break(self, indent):
self._lines.append(self._LineBreak())
self.add_indent(len(indent))
def add_line_break_at(self, index, indent_amt):
self._lines.insert(index, self._LineBreak())
self._lines.insert(index + 1, self._Indent(indent_amt))
def add_space_if_needed(self, curr_text, equal=False):
if (
not self._lines or isinstance(
self._lines[-1], (self._LineBreak, self._Indent, self._Space))
):
return
prev_text = unicode(self._prev_item)
prev_prev_text = (
unicode(self._prev_prev_item) if self._prev_prev_item else '')
if (
# The previous item was a keyword or identifier and the current
# item isn't an operator that doesn't require a space.
((self._prev_item.is_keyword or self._prev_item.is_string or
self._prev_item.is_name or self._prev_item.is_number) and
(curr_text[0] not in '([{.,:}])' or
(curr_text[0] == '=' and equal))) or
# Don't place spaces around a '.', unless it's in an 'import'
# statement.
((prev_prev_text != 'from' and prev_text[-1] != '.' and
curr_text != 'import') and
# Don't place a space before a colon.
curr_text[0] != ':' and
# Don't split up ending brackets by spaces.
((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or
# Put a space after a colon or comma.
prev_text[-1] in ':,' or
# Put space around '=' if asked to.
(equal and prev_text == '=') or
# Put spaces around non-unary arithmetic operators.
((self._prev_prev_item and
(prev_text not in '+-' and
(self._prev_prev_item.is_name or
self._prev_prev_item.is_number or
self._prev_prev_item.is_string)) and
prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in')))))
):
self._lines.append(self._Space())
def previous_item(self):
"""Return the previous non-whitespace item."""
return self._prev_item
def fits_on_current_line(self, item_extent):
return self.current_size() + item_extent <= self._max_line_length
def current_size(self):
"""The size of the current line minus the indentation."""
size = 0
for item in reversed(self._lines):
size += item.size
if isinstance(item, self._LineBreak):
break
return size
def line_empty(self):
return (self._lines and
isinstance(self._lines[-1],
(self._LineBreak, self._Indent)))
def emit(self):
string = ''
for item in self._lines:
if isinstance(item, self._LineBreak):
string = string.rstrip()
string += item.emit()
return string.rstrip() + '\n'
###########################################################################
# Private Methods
def _add_item(self, item, indent_amt):
"""Add an item to the line.
Reflow the line to get the best formatting after the item is
inserted. The bracket depth indicates if the item is being
inserted inside of a container or not.
"""
if self._prev_item and self._prev_item.is_string and item.is_string:
# Place consecutive string literals on separate lines.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
item_text = unicode(item)
if self._lines and self._bracket_depth:
# Adding the item into a container.
self._prevent_default_initializer_splitting(item, indent_amt)
if item_text in '.,)]}':
self._split_after_delimiter(item, indent_amt)
elif self._lines and not self.line_empty():
# Adding the item outside of a container.
if self.fits_on_current_line(len(item_text)):
self._enforce_space(item)
else:
# Line break for the new item.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
self._lines.append(item)
self._prev_item, self._prev_prev_item = item, self._prev_item
if item_text in '([{':
self._bracket_depth += 1
elif item_text in '}])':
self._bracket_depth -= 1
assert self._bracket_depth >= 0
def _add_container(self, container, indent_amt, break_after_open_bracket):
actual_indent = indent_amt + 1
if (
unicode(self._prev_item) != '=' and
not self.line_empty() and
not self.fits_on_current_line(
container.size + self._bracket_depth + 2)
):
if unicode(container)[0] == '(' and self._prev_item.is_name:
# Don't split before the opening bracket of a call.
break_after_open_bracket = True
actual_indent = indent_amt + 4
elif (
break_after_open_bracket or
unicode(self._prev_item) not in '([{'
):
# If the container doesn't fit on the current line and the
# current line isn't empty, place the container on the next
# line.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
break_after_open_bracket = False
else:
actual_indent = self.current_size() + 1
break_after_open_bracket = False
if isinstance(container, (ListComprehension, IfExpression)):
actual_indent = indent_amt
# Increase the continued indentation only if recursing on a
# container.
container.reflow(self, ' ' * actual_indent,
break_after_open_bracket=break_after_open_bracket)
def _prevent_default_initializer_splitting(self, item, indent_amt):
"""Prevent splitting between a default initializer.
When there is a default initializer, it's best to keep it all on
the same line. It's nicer and more readable, even if it goes
over the maximum allowable line length. This goes back along the
current line to determine if we have a default initializer, and,
if so, to remove extraneous whitespaces and add a line
break/indent before it if needed.
"""
if unicode(item) == '=':
# This is the assignment in the initializer. Just remove spaces for
# now.
self._delete_whitespace()
return
if (not self._prev_item or not self._prev_prev_item or
unicode(self._prev_item) != '='):
return
self._delete_whitespace()
prev_prev_index = self._lines.index(self._prev_prev_item)
if (
isinstance(self._lines[prev_prev_index - 1], self._Indent) or
self.fits_on_current_line(item.size + 1)
):
# The default initializer is already the only item on this line.
# Don't insert a newline here.
return
# Replace the space with a newline/indent combo.
if isinstance(self._lines[prev_prev_index - 1], self._Space):
del self._lines[prev_prev_index - 1]
self.add_line_break_at(self._lines.index(self._prev_prev_item),
indent_amt)
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for item in reversed(self._lines):
if (
last_space and
(not isinstance(item, Atom) or not item.is_colon)
):
break
else:
last_space = None
if isinstance(item, self._Space):
last_space = item
if isinstance(item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt)
def _enforce_space(self, item):
"""Enforce a space in certain situations.
There are cases where we will want a space where normally we
wouldn't put one. This just enforces the addition of a space.
"""
if isinstance(self._lines[-1],
(self._Space, self._LineBreak, self._Indent)):
return
if not self._prev_item:
return
item_text = unicode(item)
prev_text = unicode(self._prev_item)
# Prefer a space around a '.' in an import statement, and between the
# 'import' and '('.
if (
(item_text == '.' and prev_text == 'from') or
(item_text == 'import' and prev_text == '.') or
(item_text == '(' and prev_text == 'import')
):
self._lines.append(self._Space())
def _delete_whitespace(self):
"""Delete all whitespace from the end of the line."""
while isinstance(self._lines[-1], (self._Space, self._LineBreak,
self._Indent)):
del self._lines[-1]
class Atom(object):
"""The smallest unbreakable unit that can be reflowed."""
def __init__(self, atom):
self._atom = atom
def __repr__(self):
return self._atom.token_string
def __len__(self):
return self.size
def reflow(
self, reflowed_lines, continued_indent, extent,
break_after_open_bracket=False,
is_list_comp_or_if_expr=False,
next_is_dot=False
):
if self._atom.token_type == tokenize.COMMENT:
reflowed_lines.add_comment(self)
return
total_size = extent if extent else self.size
if self._atom.token_string not in ',:([{}])':
# Some atoms will need an extra 1-sized space token after them.
total_size += 1
prev_item = reflowed_lines.previous_item()
if (
not is_list_comp_or_if_expr and
not reflowed_lines.fits_on_current_line(total_size) and
not (next_is_dot and
reflowed_lines.fits_on_current_line(self.size + 1)) and
not reflowed_lines.line_empty() and
not self.is_colon and
not (prev_item and prev_item.is_name and
unicode(self) == '(')
):
# Start a new line if there is already something on the line and
# adding this atom would make it go over the max line length.
reflowed_lines.add_line_break(continued_indent)
else:
reflowed_lines.add_space_if_needed(unicode(self))
reflowed_lines.add(self, len(continued_indent),
break_after_open_bracket)
def emit(self):
return self.__repr__()
@property
def is_keyword(self):
return keyword.iskeyword(self._atom.token_string)
@property
def is_string(self):
return self._atom.token_type == tokenize.STRING
@property
def is_name(self):
return self._atom.token_type == tokenize.NAME
@property
def is_number(self):
return self._atom.token_type == tokenize.NUMBER
@property
def is_comma(self):
return self._atom.token_string == ','
@property
def is_colon(self):
return self._atom.token_string == ':'
@property
def size(self):
return len(self._atom.token_string)
class Container(object):
"""Base class for all container types."""
def __init__(self, items):
self._items = items
def __repr__(self):
string = ''
last_was_keyword = False
for item in self._items:
if item.is_comma:
string += ', '
elif item.is_colon:
string += ': '
else:
item_string = unicode(item)
if (
string and
(last_was_keyword or
(not string.endswith(tuple('([{,.:}]) ')) and
not item_string.startswith(tuple('([{,.:}])'))))
):
string += ' '
string += item_string
last_was_keyword = item.is_keyword
return string
def __iter__(self):
for element in self._items:
yield element
def __getitem__(self, idx):
return self._items[idx]
def reflow(self, reflowed_lines, continued_indent,
break_after_open_bracket=False):
last_was_container = False
for (index, item) in enumerate(self._items):
next_item = get_item(self._items, index + 1)
if isinstance(item, Atom):
is_list_comp_or_if_expr = (
isinstance(self, (ListComprehension, IfExpression)))
item.reflow(reflowed_lines, continued_indent,
self._get_extent(index),
is_list_comp_or_if_expr=is_list_comp_or_if_expr,
next_is_dot=(next_item and
unicode(next_item) == '.'))
if last_was_container and item.is_comma:
reflowed_lines.add_line_break(continued_indent)
last_was_container = False
else: # isinstance(item, Container)
reflowed_lines.add(item, len(continued_indent),
break_after_open_bracket)
last_was_container = not isinstance(item, (ListComprehension,
IfExpression))
if (
break_after_open_bracket and index == 0 and
# Prefer to keep empty containers together instead of
# separating them.
unicode(item) == self.open_bracket and
(not next_item or unicode(next_item) != self.close_bracket) and
(len(self._items) != 3 or not isinstance(next_item, Atom))
):
reflowed_lines.add_line_break(continued_indent)
break_after_open_bracket = False
else:
next_next_item = get_item(self._items, index + 2)
if (
unicode(item) not in ['.', '%', 'in'] and
next_item and not isinstance(next_item, Container) and
unicode(next_item) != ':' and
next_next_item and (not isinstance(next_next_item, Atom) or
unicode(next_item) == 'not') and
not reflowed_lines.line_empty() and
not reflowed_lines.fits_on_current_line(
self._get_extent(index + 1) + 2)
):
reflowed_lines.add_line_break(continued_indent)
def _get_extent(self, index):
"""The extent of the full element.
E.g., the length of a function call or keyword.
"""
extent = 0
prev_item = get_item(self._items, index - 1)
seen_dot = prev_item and unicode(prev_item) == '.'
while index < len(self._items):
item = get_item(self._items, index)
index += 1
if isinstance(item, (ListComprehension, IfExpression)):
break
if isinstance(item, Container):
if prev_item and prev_item.is_name:
if seen_dot:
extent += 1
else:
extent += item.size
prev_item = item
continue
elif (unicode(item) not in ['.', '=', ':', 'not'] and
not item.is_name and not item.is_string):
break
if unicode(item) == '.':
seen_dot = True
extent += item.size
prev_item = item
return extent
@property
def is_string(self):
return False
@property
def size(self):
return len(self.__repr__())
@property
def is_keyword(self):
return False
@property
def is_name(self):
return False
@property
def is_comma(self):
return False
@property
def is_colon(self):
return False
@property
def open_bracket(self):
return None
@property
def close_bracket(self):
return None
class Tuple(Container):
"""A high-level representation of a tuple."""
@property
def open_bracket(self):
return '('
@property
def close_bracket(self):
return ')'
class List(Container):
"""A high-level representation of a list."""
@property
def open_bracket(self):
return '['
@property
def close_bracket(self):
return ']'
class DictOrSet(Container):
"""A high-level representation of a dictionary or set."""
@property
def open_bracket(self):
return '{'
@property
def close_bracket(self):
return '}'
class ListComprehension(Container):
"""A high-level representation of a list comprehension."""
@property
def size(self):
length = 0
for item in self._items:
if isinstance(item, IfExpression):
break
length += item.size
return length
class IfExpression(Container):
"""A high-level representation of an if-expression."""
def _parse_container(tokens, index, for_or_if=None):
"""Parse a high-level container, such as a list, tuple, etc."""
# Store the opening bracket.
items = [Atom(Token(*tokens[index]))]
index += 1
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
if tok.token_string in ',)]}':
# First check if we're at the end of a list comprehension or
# if-expression. Don't add the ending token as part of the list
# comprehension or if-expression, because they aren't part of those
# constructs.
if for_or_if == 'for':
return (ListComprehension(items), index - 1)
elif for_or_if == 'if':
return (IfExpression(items), index - 1)
# We've reached the end of a container.
items.append(Atom(tok))
# If not, then we are at the end of a container.
if tok.token_string == ')':
# The end of a tuple.
return (Tuple(items), index)
elif tok.token_string == ']':
# The end of a list.
return (List(items), index)
elif tok.token_string == '}':
# The end of a dictionary or set.
return (DictOrSet(items), index)
elif tok.token_string in '([{':
# A sub-container is being defined.
(container, index) = _parse_container(tokens, index)
items.append(container)
elif tok.token_string == 'for':
(container, index) = _parse_container(tokens, index, 'for')
items.append(container)
elif tok.token_string == 'if':
(container, index) = _parse_container(tokens, index, 'if')
items.append(container)
else:
items.append(Atom(tok))
index += 1
return (None, None)
def _parse_tokens(tokens):
"""Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
"""
index = 0
parsed_tokens = []
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
assert tok.token_type != token.INDENT
if tok.token_type == tokenize.NEWLINE:
# There's only one newline and it's at the end.
break
if tok.token_string in '([{':
(container, index) = _parse_container(tokens, index)
if not container:
return None
parsed_tokens.append(container)
else:
parsed_tokens.append(Atom(tok))
index += 1
return parsed_tokens
def _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line):
"""Reflow the lines so that it looks nice."""
if unicode(parsed_tokens[0]) == 'def':
# A function definition gets indented a bit more.
continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
else:
continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
break_after_open_bracket = not start_on_prefix_line
lines = ReformattedLines(max_line_length)
lines.add_indent(len(indentation.lstrip('\r\n')))
if not start_on_prefix_line:
# If splitting after the opening bracket will cause the first element
# to be aligned weirdly, don't try it.
first_token = get_item(parsed_tokens, 0)
second_token = get_item(parsed_tokens, 1)
if (
first_token and second_token and
unicode(second_token)[0] == '(' and
len(indentation) + len(first_token) + 1 == len(continued_indent)
):
return None
for item in parsed_tokens:
lines.add_space_if_needed(unicode(item), equal=True)
save_continued_indent = continued_indent
if start_on_prefix_line and isinstance(item, Container):
start_on_prefix_line = False
continued_indent = ' ' * (lines.current_size() + 1)
item.reflow(lines, continued_indent, break_after_open_bracket)
continued_indent = save_continued_indent
return lines.emit()
def _shorten_line_at_tokens_new(tokens, source, indentation,
max_line_length):
"""Shorten the line taking its length into account.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
# Yield the original source so to see if it's a better choice than the
# shortened candidate lines we generate here.
yield indentation + source
parsed_tokens = _parse_tokens(tokens)
if parsed_tokens:
# Perform two reflows. The first one starts on the same line as the
# prefix. The second starts on the line after the prefix.
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=True)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=False)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
def _shorten_line_at_tokens(tokens, source, indentation, indent_word,
key_token_strings, aggressive):
"""Separate line by breaking at tokens in key_token_strings.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
offsets = []
for (index, _t) in enumerate(token_offsets(tokens)):
(token_type,
token_string,
start_offset,
end_offset) = _t
assert token_type != token.INDENT
if token_string in key_token_strings:
# Do not break in containers with zero or one items.
unwanted_next_token = {
'(': ')',
'[': ']',
'{': '}'}.get(token_string)
if unwanted_next_token:
if (
get_item(tokens,
index + 1,
default=[None, None])[1] == unwanted_next_token or
get_item(tokens,
index + 2,
default=[None, None])[1] == unwanted_next_token
):
continue
if (
index > 2 and token_string == '(' and
tokens[index - 1][1] in ',(%['
):
# Don't split after a tuple start, or before a tuple start if
# the tuple is in a list.
continue
if end_offset < len(source) - 1:
# Don't split right before newline.
offsets.append(end_offset)
else:
# Break at adjacent strings. These were probably meant to be on
# separate lines in the first place.
previous_token = get_item(tokens, index - 1)
if (
token_type == tokenize.STRING and
previous_token and previous_token[0] == tokenize.STRING
):
offsets.append(start_offset)
current_indent = None
fixed = None
for line in split_at_offsets(source, offsets):
if fixed:
fixed += '\n' + current_indent + line
for symbol in '([{':
if line.endswith(symbol):
current_indent += indent_word
else:
# First line.
fixed = line
assert not current_indent
current_indent = indent_word
assert fixed is not None
if check_syntax(normalize_multiline(fixed)
if aggressive > 1 else fixed):
return indentation + fixed
else:
return None
def token_offsets(tokens):
"""Yield tokens and offsets."""
end_offset = 0
previous_end_row = 0
previous_end_column = 0
for t in tokens:
token_type = t[0]
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
# Account for the whitespace between tokens.
end_offset += start_column
if previous_end_row == start_row:
end_offset -= previous_end_column
# Record the start offset of the token.
start_offset = end_offset
# Account for the length of the token itself.
end_offset += len(token_string)
yield (token_type,
token_string,
start_offset,
end_offset)
previous_end_row = end_row
previous_end_column = end_column
def normalize_multiline(line):
"""Normalize multiline-related code that will cause syntax error.
This is for purposes of checking syntax.
"""
if line.startswith('def ') and line.rstrip().endswith(':'):
return line + ' pass'
elif line.startswith('return '):
return 'def _(): ' + line
elif line.startswith('@'):
return line + 'def _(): pass'
elif line.startswith('class '):
return line + ' pass'
elif line.startswith(('if ', 'elif ', 'for ', 'while ')):
return line + ' pass'
else:
return line
def fix_whitespace(line, offset, replacement):
"""Replace whitespace at offset and return fixed line."""
# Replace escaped newlines too
left = line[:offset].rstrip('\n\r \t\\')
right = line[offset:].lstrip('\n\r \t\\')
if right.startswith('#'):
return line
else:
return left + replacement + right
def _execute_pep8(pep8_options, source):
"""Execute pep8 via python method calls."""
class QuietReport(pep8.BaseReport):
"""Version of checker that does not print."""
def __init__(self, options):
super(QuietReport, self).__init__(options)
self.__full_error_results = []
def error(self, line_number, offset, text, check):
"""Collect errors."""
code = super(QuietReport, self).error(line_number,
offset,
text,
check)
if code:
self.__full_error_results.append(
{'id': code,
'line': line_number,
'column': offset + 1,
'info': text})
def full_error_results(self):
"""Return error results in detail.
Results are in the form of a list of dictionaries. Each
dictionary contains 'id', 'line', 'column', and 'info'.
"""
return self.__full_error_results
checker = pep8.Checker('', lines=source,
reporter=QuietReport, **pep8_options)
checker.check_all()
return checker.report.full_error_results()
def _remove_leading_and_normalize(line):
return line.lstrip().rstrip(CR + LF) + '\n'
class Reindenter(object):
"""Reindents badly-indented code to uniformly use four-space indentation.
Released to the public domain, by Tim Peters, 03 October 2000.
"""
def __init__(self, input_text):
sio = io.StringIO(input_text)
source_lines = sio.readlines()
self.string_content_line_numbers = multiline_string_lines(input_text)
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it is a newline.
self.lines = []
for line_number, line in enumerate(source_lines, start=1):
# Do not modify if inside a multiline string.
if line_number in self.string_content_line_numbers:
self.lines.append(line)
else:
# Only expand leading tabs.
self.lines.append(_get_indentation(line).expandtabs() +
_remove_leading_and_normalize(line))
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
self.input_text = input_text
def run(self, indent_size=DEFAULT_INDENT_SIZE):
"""Fix indentation and return modified line numbers.
Line numbers are indexed at 1.
"""
if indent_size < 1:
return self.input_text
try:
stats = _reindent_stats(tokenize.generate_tokens(self.getline))
except (SyntaxError, tokenize.TokenError):
return self.input_text
# Remove trailing empty lines.
lines = self.lines
# Sentinel.
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i + 1][0]
have = _leading_space_count(lines[thisstmt])
want = thislevel * indent_size
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == _leading_space_count(lines[jline]):
want = jlevel * indent_size
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in range(i - 1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = (have + _leading_space_count(
after[jline - 1]) -
_leading_space_count(lines[jline]))
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line_number, line in enumerate(lines[thisstmt:nextstmt],
start=thisstmt):
if line_number in self.string_content_line_numbers:
after.append(line)
elif diff > 0:
if line == '\n':
after.append(line)
else:
after.append(' ' * diff + line)
else:
remove = min(_leading_space_count(line), -diff)
after.append(line[remove:])
return ''.join(after)
def getline(self):
"""Line-getter for tokenize."""
if self.index >= len(self.lines):
line = ''
else:
line = self.lines[self.index]
self.index += 1
return line
def _reindent_stats(tokens):
"""Return list of (lineno, indentlevel) pairs.
One for each stmt and comment line. indentlevel is -1 for comment lines, as
a signal that tokenize doesn't know what to do about them; indeed, they're
our headache!
"""
find_stmt = 1 # Next token begins a fresh stmt?
level = 0 # Current indent level.
stats = []
for t in tokens:
token_type = t[0]
sline = t[2][0]
line = t[4]
if token_type == tokenize.NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
find_stmt = 1
elif token_type == tokenize.INDENT:
find_stmt = 1
level += 1
elif token_type == tokenize.DEDENT:
find_stmt = 1
level -= 1
elif token_type == tokenize.COMMENT:
if find_stmt:
stats.append((sline, -1))
# But we're still looking for a new stmt, so leave
# find_stmt alone.
elif token_type == tokenize.NL:
pass
elif find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
find_stmt = 0
if line: # Not endmarker.
stats.append((sline, level))
return stats
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i
def refactor_with_2to3(source_text, fixer_names, filename=''):
"""Use lib2to3 to refactor the source.
Return the refactored source code.
"""
from lib2to3.refactor import RefactoringTool
fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
tool = RefactoringTool(fixer_names=fixers, explicit=fixers)
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
try:
# The name parameter is necessary particularly for the "import" fixer.
return unicode(tool.refactor_string(source_text, name=filename))
except lib2to3_tokenize.TokenError:
return source_text
def check_syntax(code):
"""Return True if syntax is okay."""
try:
return compile(code, '<string>', 'exec')
except (SyntaxError, TypeError, UnicodeDecodeError):
return False
def filter_results(source, results, aggressive):
"""Filter out spurious reports from pep8.
If aggressive is True, we allow possibly unsafe fixes (E711, E712).
"""
non_docstring_string_line_numbers = multiline_string_lines(
source, include_docstrings=False)
all_string_line_numbers = multiline_string_lines(
source, include_docstrings=True)
commented_out_code_line_numbers = commented_out_code_lines(source)
has_e901 = any(result['id'].lower() == 'e901' for result in results)
for r in results:
issue_id = r['id'].lower()
if r['line'] in non_docstring_string_line_numbers:
if issue_id.startswith(('e1', 'e501', 'w191')):
continue
if r['line'] in all_string_line_numbers:
if issue_id in ['e501']:
continue
# We must offset by 1 for lines that contain the trailing contents of
# multiline strings.
if not aggressive and (r['line'] + 1) in all_string_line_numbers:
# Do not modify multiline strings in non-aggressive mode. Remove
# trailing whitespace could break doctests.
if issue_id.startswith(('w29', 'w39')):
continue
if aggressive <= 0:
if issue_id.startswith(('e711', 'w6')):
continue
if aggressive <= 1:
if issue_id.startswith(('e712', 'e713')):
continue
if r['line'] in commented_out_code_line_numbers:
if issue_id.startswith(('e26', 'e501')):
continue
# Do not touch indentation if there is a token error caused by
# incomplete multi-line statement. Otherwise, we risk screwing up the
# indentation.
if has_e901:
if issue_id.startswith(('e1', 'e7')):
continue
yield r
def multiline_string_lines(source, include_docstrings=False):
"""Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
"""
line_numbers = set()
previous_token_type = ''
try:
for t in generate_tokens(source):
token_type = t[0]
start_row = t[2][0]
end_row = t[3][0]
if token_type == tokenize.STRING and start_row != end_row:
if (
include_docstrings or
previous_token_type != tokenize.INDENT
):
# We increment by one since we want the contents of the
# string.
line_numbers |= set(range(1 + start_row, 1 + end_row))
previous_token_type = token_type
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def commented_out_code_lines(source):
"""Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even more
clutter.
"""
line_numbers = []
try:
for t in generate_tokens(source):
token_type = t[0]
token_string = t[1]
start_row = t[2][0]
line = t[4]
# Ignore inline comments.
if not line.lstrip().startswith('#'):
continue
if token_type == tokenize.COMMENT:
stripped_line = token_string.lstrip('#').strip()
if (
' ' in stripped_line and
'#' not in stripped_line and
check_syntax(stripped_line)
):
line_numbers.append(start_row)
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def shorten_comment(line, max_line_length, last_comment=False):
"""Return trimmed or split long comment line.
If there are no comments immediately following it, do a text wrap.
Doing this wrapping on all comments in general would lead to jagged
comment text.
"""
assert len(line) > max_line_length
line = line.rstrip()
# PEP 8 recommends 72 characters for comment text.
indentation = _get_indentation(line) + '# '
max_line_length = min(max_line_length,
len(indentation) + 72)
MIN_CHARACTER_REPEAT = 5
if (
len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
not line[-1].isalnum()
):
# Trim comments that end with things like ---------
return line[:max_line_length] + '\n'
elif last_comment and re.match(r'\s*#+\s*\w+', line):
split_lines = textwrap.wrap(line.lstrip(' \t#'),
initial_indent=indentation,
subsequent_indent=indentation,
width=max_line_length,
break_long_words=False,
break_on_hyphens=False)
return '\n'.join(split_lines) + '\n'
else:
return line + '\n'
def normalize_line_endings(lines, newline):
"""Return fixed line endings.
All lines will be modified to use the most common line ending.
"""
return [line.rstrip('\n\r') + newline for line in lines]
def mutual_startswith(a, b):
return b.startswith(a) or a.startswith(b)
def code_match(code, select, ignore):
if ignore:
assert not isinstance(ignore, unicode)
for ignored_code in [c.strip() for c in ignore]:
if mutual_startswith(code.lower(), ignored_code.lower()):
return False
if select:
assert not isinstance(select, unicode)
for selected_code in [c.strip() for c in select]:
if mutual_startswith(code.lower(), selected_code.lower()):
return True
return False
return True
def fix_code(source, options=None, encoding=None, apply_config=False):
"""Return fixed source code.
"encoding" will be used to decode "source" if it is a byte string.
"""
options = _get_options(options, apply_config)
if not isinstance(source, unicode):
source = source.decode(encoding or get_encoding())
sio = io.StringIO(source)
return fix_lines(sio.readlines(), options=options)
def _get_options(raw_options, apply_config):
"""Return parsed options."""
if not raw_options:
return parse_args([''], apply_config=apply_config)
if isinstance(raw_options, dict):
options = parse_args([''], apply_config=apply_config)
for name, value in raw_options.items():
if not hasattr(options, name):
raise ValueError("No such option '{}'".format(name))
# Check for very basic type errors.
expected_type = type(getattr(options, name))
if not isinstance(expected_type, (str, unicode)):
if isinstance(value, (str, unicode)):
raise ValueError(
"Option '{}' should not be a string".format(name))
setattr(options, name, value)
else:
options = raw_options
return options
def fix_lines(source_lines, options, filename=''):
"""Return fixed source code."""
# Transform everything to line feed. Then change them back to original
# before returning fixed source code.
original_newline = find_newline(source_lines)
tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
# Keep a history to break out of cycles.
previous_hashes = set()
if options.line_range:
# Disable "apply_local_fixes()" for now due to issue #175.
fixed_source = tmp_source
else:
# Apply global fixes only once (for efficiency).
fixed_source = apply_global_fixes(tmp_source,
options,
filename=filename)
passes = 0
long_line_ignore_cache = set()
while hash(fixed_source) not in previous_hashes:
if options.pep8_passes >= 0 and passes > options.pep8_passes:
break
passes += 1
previous_hashes.add(hash(fixed_source))
tmp_source = copy.copy(fixed_source)
fix = FixPEP8(
filename,
options,
contents=tmp_source,
long_line_ignore_cache=long_line_ignore_cache)
fixed_source = fix.fix()
sio = io.StringIO(fixed_source)
return ''.join(normalize_line_endings(sio.readlines(), original_newline))
def fix_file(filename, options=None, output=None, apply_config=False):
if not options:
options = parse_args([filename], apply_config=apply_config)
original_source = readlines_from_file(filename)
fixed_source = original_source
if options.in_place or output:
encoding = detect_encoding(filename)
if output:
output = LineEndingWrapper(wrap_output(output, encoding=encoding))
fixed_source = fix_lines(fixed_source, options, filename=filename)
if options.diff:
new = io.StringIO(fixed_source)
new = new.readlines()
diff = get_diff_text(original_source, new, filename)
if output:
output.write(diff)
output.flush()
else:
return diff
elif options.in_place:
fp = open_with_encoding(filename, encoding=encoding,
mode='w')
fp.write(fixed_source)
fp.close()
else:
if output:
output.write(fixed_source)
output.flush()
else:
return fixed_source
def global_fixes():
"""Yield multiple (code, function) tuples."""
for function in list(globals().values()):
if inspect.isfunction(function):
arguments = inspect.getargspec(function)[0]
if arguments[:1] != ['source']:
continue
code = extract_code_from_function(function)
if code:
yield (code, function)
def apply_global_fixes(source, options, where='global', filename=''):
"""Run global fixes on source code.
These are fixes that only need be done once (unlike those in
FixPEP8, which are dependent on pep8).
"""
if any(code_match(code, select=options.select, ignore=options.ignore)
for code in ['E101', 'E111']):
source = reindent(source,
indent_size=options.indent_size)
for (code, function) in global_fixes():
if code_match(code, select=options.select, ignore=options.ignore):
if options.verbose:
print('---> Applying {0} fix for {1}'.format(where,
code.upper()),
file=sys.stderr)
source = function(source,
aggressive=options.aggressive)
source = fix_2to3(source,
aggressive=options.aggressive,
select=options.select,
ignore=options.ignore,
filename=filename)
return source
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code
def create_parser():
"""Return command-line parser."""
# Do import locally to be friendly to those who use autopep8 as a library
# and are supporting Python 2.6.
import argparse
parser = argparse.ArgumentParser(description=docstring_summary(__doc__),
prog='autopep8')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='count',
default=0,
help='print verbose messages; '
'multiple -v result in more verbose messages')
parser.add_argument('-d', '--diff', action='store_true',
help='print the diff for the fixed source')
parser.add_argument('-i', '--in-place', action='store_true',
help='make changes to files in place')
parser.add_argument('--global-config', metavar='filename',
default=DEFAULT_CONFIG,
help='path to a global pep8 config file; if this file '
'does not exist then this is ignored '
'(default: {0})'.format(DEFAULT_CONFIG))
parser.add_argument('--ignore-local-config', action='store_true',
help="don't look for and apply local config files; "
'if not passed, defaults are updated with any '
"config files in the project's root directory")
parser.add_argument('-r', '--recursive', action='store_true',
help='run recursively over directories; '
'must be used with --in-place or --diff')
parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1,
help='number of parallel jobs; '
'match CPU count if value is less than 1')
parser.add_argument('-p', '--pep8-passes', metavar='n',
default=-1, type=int,
help='maximum number of additional pep8 passes '
'(default: infinite)')
parser.add_argument('-a', '--aggressive', action='count', default=0,
help='enable non-whitespace changes; '
'multiple -a result in more aggressive changes')
parser.add_argument('--experimental', action='store_true',
help='enable experimental fixes')
parser.add_argument('--exclude', metavar='globs',
help='exclude file/directory names that match these '
'comma-separated globs')
parser.add_argument('--list-fixes', action='store_true',
help='list codes for fixes; '
'used by --ignore and --select')
parser.add_argument('--ignore', metavar='errors', default='',
help='do not fix these errors/warnings '
'(default: {0})'.format(DEFAULT_IGNORE))
parser.add_argument('--select', metavar='errors', default='',
help='fix only these errors/warnings (e.g. E4,W)')
parser.add_argument('--max-line-length', metavar='n', default=79, type=int,
help='set maximum allowed line length '
'(default: %(default)s)')
parser.add_argument('--line-range', '--range', metavar='line',
default=None, type=int, nargs=2,
help='only fix errors found within this inclusive '
'range of line numbers (e.g. 1 99); '
'line numbers are indexed at 1')
parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE,
type=int, metavar='n',
help='number of spaces per indent level '
'(default %(default)s)')
parser.add_argument('files', nargs='*',
help="files to format or '-' for standard in")
return parser
def parse_args(arguments, apply_config=False):
"""Parse command-line options."""
parser = create_parser()
args = parser.parse_args(arguments)
if not args.files and not args.list_fixes:
parser.error('incorrect number of arguments')
args.files = [decode_filename(name) for name in args.files]
if apply_config:
parser = read_config(args, parser)
args = parser.parse_args(arguments)
args.files = [decode_filename(name) for name in args.files]
if '-' in args.files:
if len(args.files) > 1:
parser.error('cannot mix stdin and regular files')
if args.diff:
parser.error('--diff cannot be used with standard input')
if args.in_place:
parser.error('--in-place cannot be used with standard input')
if args.recursive:
parser.error('--recursive cannot be used with standard input')
if len(args.files) > 1 and not (args.in_place or args.diff):
parser.error('autopep8 only takes one filename as argument '
'unless the "--in-place" or "--diff" args are '
'used')
if args.recursive and not (args.in_place or args.diff):
parser.error('--recursive must be used with --in-place or --diff')
if args.in_place and args.diff:
parser.error('--in-place and --diff are mutually exclusive')
if args.max_line_length <= 0:
parser.error('--max-line-length must be greater than 0')
if args.select:
args.select = _split_comma_separated(args.select)
if args.ignore:
args.ignore = _split_comma_separated(args.ignore)
elif not args.select:
if args.aggressive:
# Enable everything by default if aggressive.
args.select = ['E', 'W']
else:
args.ignore = _split_comma_separated(DEFAULT_IGNORE)
if args.exclude:
args.exclude = _split_comma_separated(args.exclude)
else:
args.exclude = []
if args.jobs < 1:
# Do not import multiprocessing globally in case it is not supported
# on the platform.
import multiprocessing
args.jobs = multiprocessing.cpu_count()
if args.jobs > 1 and not args.in_place:
parser.error('parallel jobs requires --in-place')
if args.line_range:
if args.line_range[0] <= 0:
parser.error('--range must be positive numbers')
if args.line_range[0] > args.line_range[1]:
parser.error('First value of --range should be less than or equal '
'to the second')
return args
def read_config(args, parser):
"""Read both user configuration and local configuration."""
try:
from configparser import ConfigParser as SafeConfigParser
from configparser import Error
except ImportError:
from ConfigParser import SafeConfigParser
from ConfigParser import Error
config = SafeConfigParser()
try:
config.read(args.global_config)
if not args.ignore_local_config:
parent = tail = args.files and os.path.abspath(
os.path.commonprefix(args.files))
while tail:
if config.read([os.path.join(parent, fn)
for fn in PROJECT_CONFIG]):
break
(parent, tail) = os.path.split(parent)
defaults = dict((k.lstrip('-').replace('-', '_'), v)
for k, v in config.items('pep8'))
parser.set_defaults(**defaults)
except Error:
# Ignore for now.
pass
return parser
def _split_comma_separated(string):
"""Return a set of strings."""
return set(text.strip() for text in string.split(',') if text.strip())
def decode_filename(filename):
"""Return Unicode filename."""
if isinstance(filename, unicode):
return filename
else:
return filename.decode(sys.getfilesystemencoding())
def supported_fixes():
"""Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
"""
yield ('E101', docstring_summary(reindent.__doc__))
instance = FixPEP8(filename=None, options=None, contents='')
for attribute in dir(instance):
code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
if code:
yield (
code.group(1).upper(),
re.sub(r'\s+', ' ',
docstring_summary(getattr(instance, attribute).__doc__))
)
for (code, function) in sorted(global_fixes()):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
for code in sorted(CODE_TO_2TO3):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__)))
def docstring_summary(docstring):
"""Return summary of docstring."""
return docstring.split('\n')[0]
def line_shortening_rank(candidate, indent_word, max_line_length,
experimental=False):
"""Return rank of candidate.
This is for sorting candidates.
"""
if not candidate.strip():
return 0
rank = 0
lines = candidate.rstrip().split('\n')
offset = 0
if (
not lines[0].lstrip().startswith('#') and
lines[0].rstrip()[-1] not in '([{'
):
for (opening, closing) in ('()', '[]', '{}'):
# Don't penalize empty containers that aren't split up. Things like
# this "foo(\n )" aren't particularly good.
opening_loc = lines[0].find(opening)
closing_loc = lines[0].find(closing)
if opening_loc >= 0:
if closing_loc < 0 or closing_loc != opening_loc + 1:
offset = max(offset, 1 + opening_loc)
current_longest = max(offset + len(x.strip()) for x in lines)
rank += 4 * max(0, current_longest - max_line_length)
rank += len(lines)
# Too much variation in line length is ugly.
rank += 2 * standard_deviation(len(line) for line in lines)
bad_staring_symbol = {
'(': ')',
'[': ']',
'{': '}'}.get(lines[0][-1])
if len(lines) > 1:
if (
bad_staring_symbol and
lines[1].lstrip().startswith(bad_staring_symbol)
):
rank += 20
for lineno, current_line in enumerate(lines):
current_line = current_line.strip()
if current_line.startswith('#'):
continue
for bad_start in ['.', '%', '+', '-', '/']:
if current_line.startswith(bad_start):
rank += 100
# Do not tolerate operators on their own line.
if current_line == bad_start:
rank += 1000
if (
current_line.endswith(('.', '%', '+', '-', '/')) and
"': " in current_line
):
rank += 1000
if current_line.endswith(('(', '[', '{', '.')):
# Avoid lonely opening. They result in longer lines.
if len(current_line) <= len(indent_word):
rank += 100
# Avoid the ugliness of ", (\n".
if (
current_line.endswith('(') and
current_line[:-1].rstrip().endswith(',')
):
rank += 100
# Also avoid the ugliness of "foo.\nbar"
if current_line.endswith('.'):
rank += 100
if has_arithmetic_operator(current_line):
rank += 100
# Avoid breaking at unary operators.
if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')):
rank += 1000
if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')):
rank += 1000
if current_line.endswith(('%', '(', '[', '{')):
rank -= 20
# Try to break list comprehensions at the "for".
if current_line.startswith('for '):
rank -= 50
if current_line.endswith('\\'):
# If a line ends in \-newline, it may be part of a
# multiline string. In that case, we would like to know
# how long that line is without the \-newline. If it's
# longer than the maximum, or has comments, then we assume
# that the \-newline is an okay candidate and only
# penalize it a bit.
total_len = len(current_line)
lineno += 1
while lineno < len(lines):
total_len += len(lines[lineno])
if lines[lineno].lstrip().startswith('#'):
total_len = max_line_length
break
if not lines[lineno].endswith('\\'):
break
lineno += 1
if total_len < max_line_length:
rank += 10
else:
rank += 100 if experimental else 1
# Prefer breaking at commas rather than colon.
if ',' in current_line and current_line.endswith(':'):
rank += 10
# Avoid splitting dictionaries between key and value.
if current_line.endswith(':'):
rank += 100
rank += 10 * count_unbalanced_brackets(current_line)
return max(0, rank)
def standard_deviation(numbers):
"""Return standard devation."""
numbers = list(numbers)
if not numbers:
return 0
mean = sum(numbers) / len(numbers)
return (sum((n - mean) ** 2 for n in numbers) /
len(numbers)) ** .5
def has_arithmetic_operator(line):
"""Return True if line contains any arithmetic operators."""
for operator in pep8.ARITHMETIC_OP:
if operator in line:
return True
return False
def count_unbalanced_brackets(line):
"""Return number of unmatched open/close brackets."""
count = 0
for opening, closing in ['()', '[]', '{}']:
count += abs(line.count(opening) - line.count(closing))
return count
def split_at_offsets(line, offsets):
"""Split line at offsets.
Return list of strings.
"""
result = []
previous_offset = 0
current_offset = 0
for current_offset in sorted(offsets):
if current_offset < len(line) and previous_offset != current_offset:
result.append(line[previous_offset:current_offset].strip())
previous_offset = current_offset
result.append(line[current_offset:])
return result
class LineEndingWrapper(object):
r"""Replace line endings to work with sys.stdout.
It seems that sys.stdout expects only '\n' as the line ending, no matter
the platform. Otherwise, we get repeated line endings.
"""
def __init__(self, output):
self.__output = output
def write(self, s):
self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n'))
def flush(self):
self.__output.flush()
def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False
if fnmatch.fnmatch(filename, pattern):
return False
if not os.path.isdir(filename) and not is_python_file(filename):
return False
return True
def find_files(filenames, recursive, exclude):
"""Yield filenames."""
while filenames:
name = filenames.pop(0)
if recursive and os.path.isdir(name):
for root, directories, children in os.walk(name):
filenames += [os.path.join(root, f) for f in children
if match_file(os.path.join(root, f),
exclude)]
directories[:] = [d for d in directories
if match_file(os.path.join(root, d),
exclude)]
else:
yield name
def _fix_file(parameters):
"""Helper function for optionally running fix_file() in parallel."""
if parameters[1].verbose:
print('[file:{0}]'.format(parameters[0]), file=sys.stderr)
try:
fix_file(*parameters)
except IOError as error:
print(unicode(error), file=sys.stderr)
def fix_multiple_files(filenames, options, output=None):
"""Fix list of files.
Optionally fix files recursively.
"""
filenames = find_files(filenames, options.recursive, options.exclude)
if options.jobs > 1:
import multiprocessing
pool = multiprocessing.Pool(options.jobs)
pool.map(_fix_file,
[(name, options) for name in filenames])
else:
for name in filenames:
_fix_file((name, options, output))
def is_python_file(filename):
"""Return True if filename is Python file."""
if filename.endswith('.py'):
return True
try:
with open_with_encoding(filename) as f:
first_line = f.readlines(1)[0]
except (IOError, IndexError):
return False
if not PYTHON_SHEBANG_REGEX.match(first_line):
return False
return True
def is_probably_part_of_multiline(line):
"""Return True if line is likely part of a multiline string.
When multiline strings are involved, pep8 reports the error as being
at the start of the multiline string, which doesn't work for us.
"""
return (
'"""' in line or
"'''" in line or
line.rstrip().endswith('\\')
)
def wrap_output(output, encoding):
"""Return output with specified encoding."""
return codecs.getwriter(encoding)(output.buffer
if hasattr(output, 'buffer')
else output)
def get_encoding():
"""Return preferred encoding."""
return locale.getpreferredencoding() or sys.getdefaultencoding()
def main(argv=None, apply_config=True):
"""Command-line entry."""
if argv is None:
argv = sys.argv
try:
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
args = parse_args(argv[1:], apply_config=apply_config)
if args.list_fixes:
for code, description in sorted(supported_fixes()):
print('{code} - {description}'.format(
code=code, description=description))
return 0
if args.files == ['-']:
assert not args.in_place
encoding = sys.stdin.encoding or get_encoding()
# LineEndingWrapper is unnecessary here due to the symmetry between
# standard in and standard out.
wrap_output(sys.stdout, encoding=encoding).write(
fix_code(sys.stdin.read(), args, encoding=encoding))
else:
if args.in_place or args.diff:
args.files = list(set(args.files))
else:
assert len(args.files) == 1
assert not args.recursive
fix_multiple_files(args.files, args, sys.stdout)
except KeyboardInterrupt:
return 1 # pragma: no cover
class CachedTokenizer(object):
"""A one-element cache around tokenize.generate_tokens().
Original code written by Ned Batchelder, in coverage.py.
"""
def __init__(self):
self.last_text = None
self.last_tokens = None
def generate_tokens(self, text):
"""A stand-in for tokenize.generate_tokens()."""
if text != self.last_text:
string_io = io.StringIO(text)
self.last_tokens = list(
tokenize.generate_tokens(string_io.readline)
)
self.last_text = text
return self.last_tokens
_cached_tokenizer = CachedTokenizer()
generate_tokens = _cached_tokenizer.generate_tokens
if __name__ == '__main__':
sys.exit(main())
|
JetChars/vim
|
vim/bundle/python-mode/pymode/autopep8.py
|
Python
|
apache-2.0
| 120,700 | 0.000033 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing
instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely
on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
- mariadb was added in version 2.2
required: false
default: null
choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee',
'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore.
If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only
when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or
command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
Starting at 2.0 it automatically defaults to what is expected for each C(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- >
Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is
assigned. Used only when command=create or command=modify.
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- >
Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or
command=modify.
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with
no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for
the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next
preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
version_added: "1.9"
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds:
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug:
msg: "The new db endpoint is {{ rds.instance.endpoint }}"
'''
RETURN = '''
engine:
description: the name of the database engine
returned: when RDS instance exists
type: string
sample: "oracle-se"
engine_version:
description: the version of the database engine
returned: when RDS instance exists
type: string
sample: "11.2.0.4.v6"
license_model:
description: the license model information
returned: when RDS instance exists
type: string
sample: "bring-your-own-license"
character_set_name:
description: the name of the character set that this instance is associated with
returned: when RDS instance exists
type: string
sample: "AL32UTF8"
allocated_storage:
description: the allocated storage size in gigabytes (GB)
returned: when RDS instance exists
type: string
sample: "100"
publicly_accessible:
description: the accessibility options for the DB instance
returned: when RDS instance exists
type: boolean
sample: "true"
latest_restorable_time:
description: the latest time to which a database can be restored with point-in-time restore
returned: when RDS instance exists
type: string
sample: "1489707802.0"
secondary_avaialbility_zone:
description: the name of the secondary AZ for a DB instance with multi-AZ support
returned: when RDS instance exists and is multy-AZ
type: string
sample: "eu-west-1b"
backup_window:
description: the daily time range during which automated backups are created if automated backups are enabled
returned: when RDS instance exists and automated backups are enabled
type: string
sample: "03:00-03:30"
auto_minor_version_upgrade:
description: indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window
returned: when RDS instance exists
type: boolean
sample: "true"
read_replica_source_dbinstance_identifier:
description: the identifier of the source DB instance if this RDS instance is a read replica
returned: when read replica RDS instance exists
type: string
sample: "null"
db_name:
description: the name of the database to create when the DB instance is created
returned: when RDS instance exists
type: string
sample: "ASERTG"
parameter_groups:
description: the list of DB parameter groups applied to this RDS instance
returned: when RDS instance exists and parameter groups are defined
type: complex
contains:
parameter_apply_status:
description: the status of parameter updates
returned: when RDS instance exists
type: string
sample: "in-sync"
parameter_group_name:
description: the name of the DP parameter group
returned: when RDS instance exists
type: string
sample: "testawsrpprodb01spfile-1ujg7nrs7sgyz"
option_groups:
description: the list of option group memberships for this RDS instance
returned: when RDS instance exists
type: complex
contains:
option_group_name:
description: the option group name for this RDS instance
returned: when RDS instance exists
type: string
sample: "default:oracle-se-11-2"
status:
description: the status of the RDS instance's option group membership
returned: when RDS instance exists
type: string
sample: "in-sync"
pending_modified_values:
description: a dictionary of changes to the RDS instance that are pending
returned: when RDS instance exists
type: complex
contains:
db_instance_class:
description: the new DB instance class for this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: string
sample: "null"
db_instance_identifier:
description: the new DB instance identifier this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: string
sample: "null"
allocated_storage:
description: the new allocated storage size for this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: string
sample: "null"
backup_retention_period:
description: the pending number of days for which automated backups are retained
returned: when RDS instance exists
type: string
sample: "null"
engine_version:
description: indicates the database engine version
returned: when RDS instance exists
type: string
sample: "null"
iops:
description: the new provisioned IOPS value for this RDS instance that will be applied or is being applied
returned: when RDS instance exists
type: string
sample: "null"
master_user_password:
description: the pending or in-progress change of the master credentials for this RDS instance
returned: when RDS instance exists
type: string
sample: "null"
multi_az:
description: indicates that the single-AZ RDS instance is to change to a multi-AZ deployment
returned: when RDS instance exists
type: string
sample: "null"
port:
description: specifies the pending port for this RDS instance
returned: when RDS instance exists
type: string
sample: "null"
db_subnet_groups:
description: information on the subnet group associated with this RDS instance
returned: when RDS instance exists
type: complex
contains:
description:
description: the subnet group associated with the DB instance
returned: when RDS instance exists
type: string
sample: "Subnets for the UAT RDS SQL DB Instance"
name:
description: the name of the DB subnet group
returned: when RDS instance exists
type: string
sample: "samplesubnetgrouprds-j6paiqkxqp4z"
status:
description: the status of the DB subnet group
returned: when RDS instance exists
type: string
sample: "complete"
subnets:
description: the description of the DB subnet group
returned: when RDS instance exists
type: complex
contains:
availability_zone:
description: subnet availability zone information
returned: when RDS instance exists
type: complex
contains:
name:
description: avaialbility zone
returned: when RDS instance exists
type: string
sample: "eu-west-1b"
provisioned_iops_capable:
description: whether provisioned iops are available in AZ subnet
returned: when RDS instance exists
type: boolean
sample: "false"
identifier:
description: the identifier of the subnet
returned: when RDS instance exists
type: string
sample: "subnet-3fdba63e"
status:
description: the status of the subnet
returned: when RDS instance exists
type: string
sample: "active"
'''
import sys
import time
from ansible.module_utils.ec2 import AWSRetry
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
DEFAULT_PORTS = {
'aurora': 3306,
'mariadb': 3306,
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError as e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(
db_instance_identifier=instancename
)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound as e:
return None
except Exception as e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(
db_snapshot_identifier=snapshotid,
snapshot_type='manual'
)['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password,
**params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(
instance_name,
source_instance,
**params
)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(
instance_name,
snapshot,
**params
)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance.create_time,
'status': self.status,
'availability_zone': self.instance.availability_zone,
'backup_retention': self.instance.backup_retention_period,
'backup_window': self.instance.preferred_backup_window,
'maintenance_window': self.instance.preferred_maintenance_window,
'multi_zone': self.instance.multi_az,
'instance_type': self.instance.instance_class,
'username': self.instance.master_username,
'iops': self.instance.iops
}
# Only assign an Endpoint if one is available
if hasattr(self.instance, 'endpoint'):
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
if self.instance.DBName:
d['DBName'] = self.instance.DBName
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception as e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'engine': self.instance['Engine'],
'engine_version': self.instance['EngineVersion'],
'license_model': self.instance['LicenseModel'],
'character_set_name': self.instance['CharacterSetName'],
'allocated_storage': self.instance['AllocatedStorage'],
'publicly_accessible': self.instance['PubliclyAccessible'],
'latest_restorable_time': self.instance['LatestRestorableTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'secondary_avaialbility_zone': self.instance['SecondaryAvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'backup_window': self.instance['PreferredBackupWindow'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'auto_minor_version_upgrade': self.instance['AutoMinorVersionUpgrade'],
'read_replica_source_dbinstance_identifier': self.instance['ReadReplicaSourceDBInstanceIdentifier'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'db_name': self.instance['DBName'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance['DBParameterGroups'] is not None:
parameter_groups = []
for x in self.instance['DBParameterGroups']:
parameter_groups.append({'parameter_group_name': x['DBParameterGroupName'], 'parameter_apply_status': x['ParameterApplyStatus']})
d['parameter_groups'] = parameter_groups
if self.instance['OptionGroupMemberships'] is not None:
option_groups = []
for x in self.instance['OptionGroupMemberships']:
option_groups.append({'status': x['Status'], 'option_group_name': x['OptionGroupName']})
d['option_groups'] = option_groups
if self.instance['PendingModifiedValues'] is not None:
pdv = self.instance['PendingModifiedValues']
d['pending_modified_values'] = {
'multi_az': pdv['MultiAZ'],
'master_user_password': pdv['MasterUserPassword'],
'port': pdv['Port'],
'iops': pdv['Iops'],
'allocated_storage': pdv['AllocatedStorage'],
'engine_version': pdv['EngineVersion'],
'backup_retention_period': pdv['BackupRetentionPeriod'],
'db_instance_class': pdv['DBInstanceClass'],
'db_instance_identifier': pdv['DBInstanceIdentifier']
}
if self.instance["DBSubnetGroup"] is not None:
dsg = self.instance["DBSubnetGroup"]
db_subnet_groups = {}
db_subnet_groups['vpc_id'] = dsg['VpcId']
db_subnet_groups['name'] = dsg['DBSubnetGroupName']
db_subnet_groups['status'] = dsg['SubnetGroupStatus'].lower()
db_subnet_groups['description'] = dsg['DBSubnetGroupDescription']
db_subnet_groups['subnets'] = []
for x in dsg["Subnets"]:
db_subnet_groups['subnets'].append({
'status': x['SubnetStatus'].lower(),
'identifier': x['SubnetIdentifier'],
'availability_zone': {
'name': x['SubnetAvailabilityZone']['Name'],
'provisioned_iops_capable': x['SubnetAvailabilityZone']['ProvisionedIopsCapable']
}
})
d['db_subnet_groups'] = db_subnet_groups
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
d['endpoint'] = self.instance["Endpoint"].get('Address', None)
d['port'] = self.instance["Endpoint"].get('Port', None)
else:
d['endpoint'] = None
d['port'] = None
if self.instance["DBName"]:
d['DBName'] = self.instance['DBName']
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id': self.name,
'create_time': self.snapshot.snapshot_create_time,
'status': self.status,
'availability_zone': self.snapshot.availability_zone,
'instance_id': self.snapshot.instance_id,
'instance_created': self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id': self.name,
'create_time': self.snapshot['SnapshotCreateTime'],
'status': self.status,
'availability_zone': self.snapshot['AvailabilityZone'],
'instance_id': self.snapshot['DBInstanceIdentifier'],
'instance_created': self.snapshot['InstanceCreateTime'],
'snapshot_type': self.snapshot['SnapshotType'],
'iops': self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
start_time = time.time()
wait_timeout = module.params.get('wait_timeout') + start_time
check_interval = 5
while wait_timeout > time.time() and resource.status != status:
time.sleep(check_interval)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
# Back off if we're getting throttled, since we're just waiting anyway
resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
# Back off if we're getting throttled, since we're just waiting anyway
resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name)
if resource is None:
break
# Some RDS resources take much longer than others to be ready. Check
# less aggressively for slow ones to avoid throttling.
if time.time() > start_time + 90:
check_interval = 20
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group', 'port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException as e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException as e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception as e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException as e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if has_rds2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) is not None and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
if module.params.get(k) is False:
pass
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name=dict(required=False),
source_instance=dict(required=False),
db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
'sqlserver-web', 'postgres', 'aurora'], required=False),
size=dict(required=False),
instance_type=dict(aliases=['type'], required=False),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(required=False),
engine_version=dict(required=False),
parameter_group=dict(required=False),
license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone=dict(type='bool', required=False),
iops=dict(required=False),
security_groups=dict(required=False),
vpc_security_groups=dict(type='list', required=False),
port=dict(required=False, type='int'),
upgrade=dict(type='bool', default=False),
option_group=dict(required=False),
maint_window=dict(required=False),
backup_window=dict(required=False),
backup_retention=dict(required=False),
zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet=dict(required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
snapshot=dict(required=False),
apply_immediately=dict(type='bool', default=False),
new_instance_name=dict(required=False),
tags=dict(type='dict', required=False),
publicly_accessible=dict(required=False),
character_set_name=dict(required=False),
force_failover=dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
# set port to per db defaults if not specified
if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
if '-' in module.params['db_engine']:
engine = module.params['db_engine'].split('-')[0]
else:
engine = module.params['db_engine']
module.params['port'] = DEFAULT_PORTS[engine.lower()]
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/rds.py
|
Python
|
bsd-3-clause
| 56,122 | 0.002441 |
# orm/interfaces.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines the now deprecated ORM extension classes as well
as ORM internals.
Other than the deprecated extensions, this module and the
classes within should be considered mostly private.
"""
from __future__ import absolute_import
from .. import exc as sa_exc, util, inspect
from ..sql import operators
from collections import deque
orm_util = util.importlater('sqlalchemy.orm', 'util')
collections = util.importlater('sqlalchemy.orm', 'collections')
__all__ = (
'AttributeExtension',
'EXT_CONTINUE',
'EXT_STOP',
'ExtensionOption',
'InstrumentationManager',
'LoaderStrategy',
'MapperExtension',
'MapperOption',
'MapperProperty',
'PropComparator',
'PropertyOption',
'SessionExtension',
'StrategizedOption',
'StrategizedProperty',
)
EXT_CONTINUE = util.symbol('EXT_CONTINUE')
EXT_STOP = util.symbol('EXT_STOP')
ONETOMANY = util.symbol('ONETOMANY')
MANYTOONE = util.symbol('MANYTOONE')
MANYTOMANY = util.symbol('MANYTOMANY')
from .deprecated_interfaces import AttributeExtension, \
SessionExtension, \
MapperExtension
NOT_EXTENSION = util.symbol('NOT_EXTENSION')
"""Symbol indicating an :class:`_InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`._InspectionAttr.extension_type`
attibute.
"""
class _InspectionAttr(object):
"""A base class applied to all ORM objects that can be returned
by the :func:`.inspect` function.
The attributes defined here allow the usage of simple boolean
checks to test basic facts about the object returned.
While the boolean checks here are basically the same as using
the Python isinstance() function, the flags here can be used without
the need to import all of these classes, and also such that
the SQLAlchemy class system can change while leaving the flags
here intact for forwards-compatibility.
"""
is_selectable = False
"""Return True if this object is an instance of :class:`.Selectable`."""
is_aliased_class = False
"""True if this object is an instance of :class:`.AliasedClass`."""
is_instance = False
"""True if this object is an instance of :class:`.InstanceState`."""
is_mapper = False
"""True if this object is an instance of :class:`.Mapper`."""
is_property = False
"""True if this object is an instance of :class:`.MapperProperty`."""
is_attribute = False
"""True if this object is a Python :term:`descriptor`.
This can refer to one of many types. Usually a
:class:`.QueryableAttribute` which handles attributes events on behalf
of a :class:`.MapperProperty`. But can also be an extension type
such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
The :attr:`._InspectionAttr.extension_type` will refer to a constant
identifying the specific subtype.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
is_clause_element = False
"""True if this object is an instance of :class:`.ClauseElement`."""
extension_type = NOT_EXTENSION
"""The extension type, if any.
Defaults to :data:`.interfaces.NOT_EXTENSION`
.. versionadded:: 0.8.0
.. seealso::
:data:`.HYBRID_METHOD`
:data:`.HYBRID_PROPERTY`
:data:`.ASSOCIATION_PROXY`
"""
class _MappedAttribute(object):
"""Mixin for attributes which should be replaced by mapper-assigned
attributes.
"""
class MapperProperty(_MappedAttribute, _InspectionAttr):
"""Manage the relationship of a ``Mapper`` to a single class
attribute, as well as that attribute as it appears on individual
instances of the class, including attribute instrumentation,
attribute access, loading behavior, and dependency calculations.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
"""
is_property = True
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
pass
def create_row_processor(self, context, path,
mapper, row, adapter):
"""Return a 3-tuple consisting of three row processing functions.
"""
return None, None, None
def cascade_iterator(self, type_, state, visited_instances=None,
halt_on=None):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
See PropertyLoader for the related instance implementation.
"""
return iter(())
def set_parent(self, parent, init):
self.parent = parent
def instrument_class(self, mapper): # pragma: no-coverage
raise NotImplementedError()
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.MapperProperty`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
_configure_started = False
_configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
MapperProperty."""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
pass
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
"""
pass
def is_primary(self):
"""Return True if this ``MapperProperty``'s mapper is the
primary mapper for its class.
This flag is used to indicate that the ``MapperProperty`` can
define attribute instrumentation for the class at the class
level (as opposed to the individual instance level).
"""
return not self.parent.non_primary
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object"""
pass
def compare(self, operator, value, **kw):
"""Return a compare operation for the columns represented by
this ``MapperProperty`` to the given value, which may be a
column value or an instance. 'operator' is an operator from
the operators module, or from sql.Comparator.
By default uses the PropComparator attached to this MapperProperty
under the attribute name "comparator".
"""
return operator(self.comparator, value)
def __repr__(self):
return '<%s at 0x%x; %s>' % (
self.__class__.__name__,
id(self), getattr(self, 'key', 'no key'))
class PropComparator(operators.ColumnOperators):
"""Defines boolean, comparison, and other operators for
:class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \\
ColumnProperty,\\
CompositeProperty,\\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
See also:
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parentmapper = parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parentmapper, adapt_to_entity)
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@util.memoized_property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
"""Redefine this object in terms of a polymorphic subclass.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
"""
return self.operate(PropComparator.of_type_op, class_)
def any(self, criterion=None, **kwargs):
"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
"""
strategy_wildcard_key = None
@util.memoized_property
def _wildcard_path(self):
if self.strategy_wildcard_key:
return ('loaderstrategy', (self.strategy_wildcard_key,))
else:
return None
def _get_context_strategy(self, context, path):
strategy_cls = path._inlined_get_for(self, context, 'loaderstrategy')
if not strategy_cls:
wc_key = self._wildcard_path
if wc_key and wc_key in context.attributes:
strategy_cls = context.attributes[wc_key]
if strategy_cls:
try:
return self._strategies[strategy_cls]
except KeyError:
return self.__init_strategy(strategy_cls)
return self.strategy
def _get_strategy(self, cls):
try:
return self._strategies[cls]
except KeyError:
return self.__init_strategy(cls)
def __init_strategy(self, cls):
self._strategies[cls] = strategy = cls(self)
return strategy
def setup(self, context, entity, path, adapter, **kwargs):
self._get_context_strategy(context, path).\
setup_query(context, entity, path,
adapter, **kwargs)
def create_row_processor(self, context, path, mapper, row, adapter):
return self._get_context_strategy(context, path).\
create_row_processor(context, path,
mapper, row, adapter)
def do_init(self):
self._strategies = {}
self.strategy = self.__init_strategy(self.strategy_class)
def post_instrument_class(self, mapper):
if self.is_primary() and \
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
Query object generated by scalar or object lazy loaders.
"""
def process_query(self, query):
pass
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
Used when secondary loaders resend existing options to a new
Query."""
self.process_query(query)
class PropertyOption(MapperOption):
"""A MapperOption that is applied to a property off the mapper or
one of its child mappers, identified by a dot-separated key
or list of class-bound attributes. """
def __init__(self, key, mapper=None):
self.key = key
self.mapper = mapper
def process_query(self, query):
self._process(query, True)
def process_query_conditionally(self, query):
self._process(query, False)
def _process(self, query, raiseerr):
paths = self._process_paths(query, raiseerr)
if paths:
self.process_query_property(query, paths)
def process_query_property(self, query, paths):
pass
def __getstate__(self):
d = self.__dict__.copy()
d['key'] = ret = []
for token in util.to_list(self.key):
if isinstance(token, PropComparator):
ret.append((token._parentmapper.class_, token.key))
else:
ret.append(token)
return d
def __setstate__(self, state):
ret = []
for key in state['key']:
if isinstance(key, tuple):
cls, propkey = key
ret.append(getattr(cls, propkey))
else:
ret.append(key)
state['key'] = tuple(ret)
self.__dict__ = state
def _find_entity_prop_comparator(self, query, token, mapper, raiseerr):
if orm_util._is_aliased_class(mapper):
searchfor = mapper
else:
searchfor = orm_util._class_to_mapper(mapper)
for ent in query._mapper_entities:
if ent.corresponds_to(searchfor):
return ent
else:
if raiseerr:
if not list(query._mapper_entities):
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
"can't find property named '%s'."
% (token, )
)
else:
raise sa_exc.ArgumentError(
"Can't find property '%s' on any entity "
"specified in this Query. Note the full path "
"from root (%s) to target entity must be specified."
% (token, ",".join(str(x) for
x in query._mapper_entities))
)
else:
return None
def _find_entity_basestring(self, query, token, raiseerr):
for ent in query._mapper_entities:
# return only the first _MapperEntity when searching
# based on string prop name. Ideally object
# attributes are used to specify more exactly.
return ent
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
"can't find property named '%s'."
% (token, )
)
else:
return None
def _process_paths(self, query, raiseerr):
"""reconcile the 'key' for this PropertyOption with
the current path and entities of the query.
Return a list of affected paths.
"""
path = orm_util.PathRegistry.root
entity = None
paths = []
no_result = []
# _current_path implies we're in a
# secondary load with an existing path
current_path = list(query._current_path.path)
tokens = deque(self.key)
while tokens:
token = tokens.popleft()
if isinstance(token, str):
# wildcard token
if token.endswith(':*'):
return [path.token(token)]
sub_tokens = token.split(".", 1)
token = sub_tokens[0]
tokens.extendleft(sub_tokens[1:])
# exhaust current_path before
# matching tokens to entities
if current_path:
if current_path[1].key == token:
current_path = current_path[2:]
continue
else:
return no_result
if not entity:
entity = self._find_entity_basestring(
query,
token,
raiseerr)
if entity is None:
return no_result
path_element = entity.entity_zero
mapper = entity.mapper
if hasattr(mapper.class_, token):
prop = getattr(mapper.class_, token).property
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't find property named '%s' on the "
"mapped entity %s in this Query. " % (
token, mapper)
)
else:
return no_result
elif isinstance(token, PropComparator):
prop = token.property
# exhaust current_path before
# matching tokens to entities
if current_path:
if current_path[0:2] == \
[token._parententity, prop]:
current_path = current_path[2:]
continue
else:
return no_result
if not entity:
entity = self._find_entity_prop_comparator(
query,
prop.key,
token._parententity,
raiseerr)
if not entity:
return no_result
path_element = entity.entity_zero
mapper = entity.mapper
else:
raise sa_exc.ArgumentError(
"mapper option expects "
"string key or list of attributes")
assert prop is not None
if raiseerr and not prop.parent.common_parent(mapper):
raise sa_exc.ArgumentError("Attribute '%s' does not "
"link from element '%s'" % (token, path_element))
path = path[path_element][prop]
paths.append(path)
if getattr(token, '_of_type', None):
ac = token._of_type
ext_info = inspect(ac)
path_element = mapper = ext_info.mapper
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper, aliased=True,
_use_mapper_path=True)
ext_info = inspect(ac)
path.set(query._attributes, "path_with_polymorphic", ext_info)
else:
path_element = mapper = getattr(prop, 'mapper', None)
if mapper is None and tokens:
raise sa_exc.ArgumentError(
"Attribute '%s' of entity '%s' does not "
"refer to a mapped entity" %
(token, entity)
)
if current_path:
# ran out of tokens before
# current_path was exhausted.
assert not tokens
return no_result
return paths
class StrategizedOption(PropertyOption):
"""A MapperOption that affects which LoaderStrategy will be used
for an operation by a StrategizedProperty.
"""
chained = False
def process_query_property(self, query, paths):
strategy = self.get_strategy_class()
if self.chained:
for path in paths:
path.set(
query._attributes,
"loaderstrategy",
strategy
)
else:
paths[-1].set(
query._attributes,
"loaderstrategy",
strategy
)
def get_strategy_class(self):
raise NotImplementedError()
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
Simple column attributes may add their represented column to the
list of selected columns, *eager loading* properties may add
``LEFT OUTER JOIN`` clauses to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
def __init__(self, parent):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
def init_class_attribute(self, mapper):
pass
def setup_query(self, context, entity, path, adapter, **kwargs):
pass
def create_row_processor(self, context, path, mapper,
row, adapter):
"""Return row processing functions which fulfill the contract
specified by MapperProperty.create_row_processor.
StrategizedProperty delegates its create_row_processor method
directly to this method. """
return None, None, None
def __str__(self):
return str(self.parent_property)
|
sauloal/PiCastPy
|
sqlalchemy/orm/interfaces.py
|
Python
|
mit
| 28,330 | 0.000671 |
#!/usr/bin/env python
import os
import shutil
import logging
from unicode_helper import p
__all__ = ["Renamer"]
def log():
"""Returns the logger for current file
"""
return logging.getLogger(__name__)
def same_partition(f1, f2):
"""Returns True if both files or directories are on the same partition
"""
return os.stat(f1).st_dev == os.stat(f2).st_dev
def delete_file(fpath):
"""On OS X: Trashes a path using the Finder, via OS X's Scripting Bridge.
On other platforms: unlinks file.
"""
try:
from AppKit import NSURL
from ScriptingBridge import SBApplication
except ImportError:
p("Deleting %s" % fpath)
log().debug("Deleting %r" % fpath)
os.unlink(fpath)
else:
p("Trashing %s" % fpath)
log().debug("Trashing %r" % fpath)
targetfile = NSURL.fileURLWithPath_(fpath)
finder = SBApplication.applicationWithBundleIdentifier_("com.apple.Finder")
items = finder.items().objectAtLocation_(targetfile)
items.delete()
def rename_file(old, new):
"""Rename 'old' file to 'new'. Both files must be on the same partition.
Preserves access and modification time.
"""
p("Renaming %s to %s" % (old, new))
log().debug("Renaming %r to %r" % (old, new))
stat = os.stat(old)
os.rename(old, new)
os.utime(new, (stat.st_atime, stat.st_mtime))
def copy_file(old, new):
"""Copy 'old' file to 'new'.
"""
p("Copying %s to %s" % (old, new))
log().debug("Copying %r to %r" % (old, new))
shutil.copyfile(old, new)
shutil.copystat(old, new)
def symlink_file(target, name):
"""Create symbolic link named 'name' pointing to 'target'.
"""
p("Creating symlink %s to %s" % (name, target))
log().debug("Creating symlink %r to %r" % (name, target))
os.symlink(target, name)
class Renamer(object):
"""Deals with renaming of files
"""
def __init__(self, filename):
self.filename = os.path.abspath(filename)
def rename(self, new_fullpath, force=False, always_copy=False, always_move=False, leave_symlink=False, create_dirs=True):
"""Moves the file to a new path.
If it is on the same partition, it will be moved (unless always_copy is True)
If it is on a different partition, it will be copied, and the original
only deleted if always_move is True.
If the target file already exists, it will raise OSError unless force is True.
If it was moved, a symlink will be left behind with the original name
pointing to the file's new destination if leave_symlink is True.
"""
new_dir = os.path.dirname(new_fullpath)
if create_dirs:
p("Creating directory %s" % new_dir)
try:
os.makedirs(new_dir)
except OSError, e:
if e.errno != 17:
raise
if os.path.exists(new_fullpath):
# If the destination exists, raise exception unless force is True
if not force:
raise OSError("File %s already exists, not forcefully moving %s" % (
new_fullpath, self.filename))
if same_partition(self.filename, new_dir):
if always_copy:
# Same partition, but forced to copy
copy_file(self.filename, new_fullpath)
else:
# Same partition, just rename the file to move it
rename_file(self.filename, new_fullpath)
# Leave a symlink behind if configured to do so
if leave_symlink:
symlink_file(new_fullpath, self.filename)
else:
# File is on different partition (different disc), copy it
copy_file(self.filename, new_fullpath)
if always_move:
# Forced to move file, we just trash old file
delete_file(self.filename)
# Leave a symlink behind if configured to do so
if leave_symlink:
symlink_file(new_fullpath, self.filename)
self.filename = new_fullpath
|
lahwaacz/tvnamer
|
tvnamer/renamer.py
|
Python
|
unlicense
| 4,157 | 0.001203 |
#!/usr/bin/python2
#!/usr/bin/env python
#
# Copyright 2010 dan collins <danc@badbytes.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
'''
Requires the following...
srate,timeaxes,data,chanlabels,
'''
import sys,os
from gtk import gdk
from numpy import * #fromstring, arange, int16, float, log10
from matplotlib import rcParams
from meg import nearest
from pylab import xticks,ion
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as \
FigureCanvas
import matplotlib.cm as cm
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
#from meg import megcontour_gtk
from pdf2py import pdf, readwrite
from gui.gtk import contour as contour_gtk
from gui.gtk import meg_assistant,event_process#,offset_correct
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
print("GTK Not Availible")
sys.exit(1)
class setup_gui:
def __init__(self):
self.builder = gtk.Builder()
self.builder.add_from_file(os.path.splitext(__file__)[0]+".glade")
self.window = self.builder.get_object("window")
dic = {
"on_toolbutton_refresh_clicked" : self.generate_testdata,
"on_button1_clicked" : self.generate_testdata,
"on_vboxMain_button_press_event" : self.button_press_event,
"on_vboxMain_button_release_event" : self.button_release_event,
"on_vboxMain_drag" : self.drag_begin,
"on_vboxMain_motion_notify_event" : self.drag_begin,
"on_toolbar_clear_clicked" : self.clear_selections,
"on_toolbar_zoomin_clicked" : self.zoomin_time,
"on_toolbar_zoomout_clicked" : self.zoomout_time,
"on_go_back_clicked" : self.page_back,
"on_go_forward_clicked" : self.page_forward,
"on_toolbutton_setup_toggled" : self.preferences_open,
"on_button_channel_apply_clicked" : self.channel_selection_apply,
"set_channel_groups" : self.set_channel_groups,
"showpopupmenu" : self.showpopupmenu,
"on_toolbar_plot_clicked" : self.plot_contour,
"on_plot_contour_activate" : self.plot_contour,
"on_button_delete_selection_clicked" : self.event_selection_delete,
"gtk_widget_hide" : self.hideinsteadofdelete,
"on_button_display_apply_clicked": self.display_apply,
"on_go_up_clicked" : self.page_up,
"on_go_down_clicked" : self.page_down,
"on_toolbutton_load_clicked" : self.load_data,
"on_menu_offset_correct_clicked" : self.offset_correct,
"on_button_epoch_clicked" : self.add_selections_to_event_process,
"on_store_event_clicked" : self.store_event,
"on_menu_save_noise_activate" : self.store_noise,
"on_menu_save_event_activate" : self.store_event,
"on_key_press_event" : self.key_press_event,
}
self.builder.connect_signals(dic)
try: self.prefs = readwrite.readdata(os.getenv('HOME')+'/.pymeg.pym')
except IOError: self.prefs = {}; readwrite.writedata(self.prefs, os.getenv('HOME')+'/.pymeg')
try:
self.line_r,self.line_g,self.line_b = self.prefs['LineColor'][0],self.prefs['LineColor'][1],self.prefs['LineColor'][2]
self.back_r,self.back_g,self.back_b = self.prefs['BackColor'][0],self.prefs['BackColor'][1],self.prefs['BackColor'][2]
except:
self.line_r,self.line_g,self.line_b = 1.,1.,1.
self.back_r,self.back_g,self.back_b = .9,.9,.9
self.color = (self.line_r,self.line_g,self.line_b)
self.create_draw_frame('none')
self.create_spec_frame('none')
self.create_csd_frame('none')
self.space = 0
#self.generate_testdata(None)
self.preferences_open(None)
def printtest(self,widget):
print 'something'
def store_noise(self,widget):
print widget,'wid',widget.get_parent().get_name()
self.callback(widget)
def store_event(self,widget):
print widget,'wid',widget.get_parent().get_name()
self.callback(widget)
def create_draw_frame(self,widget):
self.fig = Figure(figsize=[100,100], dpi=40)
self.canvas = FigureCanvas(self.fig)
self.canvas.connect("scroll_event", self.scroll_event)
self.canvas.connect("key-press-event", self.key_press_event)
#self.canvas.connect('button_press_event', self.button_press_event)
self.canvas.show()
self.figure = self.canvas.figure
self.axes = self.fig.add_axes([0.045, 0.05, 0.93, 0.925], \
axisbg=(self.back_r,self.back_g,self.back_b))
#axisbg='#FFFFCC')
self.vb = self.builder.get_object("vbox3")
self.vb.pack_start(self.canvas, gtk.TRUE, gtk.TRUE)
self.vb.show()
def create_spec_frame(self,widget):
self.specfig = Figure(figsize=[10,10], dpi=40)
self.specfig.text(0.25,0.5,'Middle Click Channel for Specgram',\
fontsize=20)
self.speccanvas = FigureCanvas(self.specfig)
self.speccanvas.show()
self.specfigure = self.speccanvas.figure
self.specaxes = self.specfig.add_axes([0.045, 0.05, 0.93, 0.925], \
axisbg=(self.back_r,self.back_g,self.back_b))
#self.specaxes.axis('off')
self.vb2 = self.builder.get_object("vbox8")
self.vb2.pack_end(self.speccanvas, gtk.TRUE, gtk.TRUE)
self.vb2.show()
def create_csd_frame(self,widget):
self.csdfig = Figure(figsize=[10,10], dpi=40)
self.csdfig.text(0.25,0.5,'Middle Click Channel for CSD',fontsize=20)
self.csdcanvas = FigureCanvas(self.csdfig)
self.csdcanvas.show()
self.csdfigure = self.csdcanvas.figure
self.csdaxes = self.csdfig.add_axes([0.045, 0.05, 0.93, 0.925], \
axisbg=(self.back_r,self.back_g,self.back_b))
#self.csdaxes.axis('off')
self.vb3 = self.builder.get_object("vbox9")
self.vb3.pack_end(self.csdcanvas, gtk.TRUE, gtk.TRUE)
self.vb3.show()
def data_loaded_setup(self):
self.channel_tree(None)
self.builder.get_object("spinbutton1").set_range(0,self.numchannels)
self.builder.get_object("spinbutton1").set_value(self.numchannels)
self.builder.get_object("spinbutton2").set_range(self.t[0],self.t[-1])
self.builder.get_object("spinbutton2").set_value(self.t[0])
self.builder.get_object("spinbutton3").set_range(self.t[0],self.t[-1])
#if self.t[-1] - self.t[0] > 1: #alot of time, save time in plotting and set low
if len(self.t) > 1000:
self.builder.get_object("spinbutton3").set_value(self.t[1000])
print '.....reducing time var'
else:
print '.....showing all time'
self.builder.get_object("spinbutton3").set_value(self.t[-1])
#self.builder.get_object("spinbutton3").set_value(self.t[-1])
#self.builder.get_object("spinbutton5").set_value(self.scalefact)
self.builder.get_object("entry1").set_text(str(self.space))
self.builder.get_object("entry2").set_text(str(self.scalefact))
def preferences_open(self,widget):
self.win_prefs = self.builder.get_object("window_prefs")
#try: self.prefs = readwrite.readdata(os.getenv('HOME')+'/.pymeg.pym')
#except IOError: self.prefs = {}; readwrite.writedata(self.prefs, os.getenv('HOME')+'/.pymeg')
try:
#r,g,b = self.prefs['LineColor'][0],self.prefs['LineColor'][1],self.prefs['LineColor'][2]
self.builder.get_object("colorbutton1").set_color(color=gtk.gdk.Color(self.line_r,self.line_g,self.line_b))
self.builder.get_object("colorbutton2").set_color(color=gtk.gdk.Color(self.back_r,self.back_g,self.back_b))
except IOError:
pass
#print 'color',self.builder.get_object("colorbutton1").get_color()
#self.builder.get_object("colorbutton1").set_color(color=gtk.gdk.Color(111))
#print 'color',self.builder.get_object("colorbutton1").get_color()
if self.builder.get_object('toolbutton12').get_active() == True:
self.win_prefs.show()
else:
self.win_prefs.hide()
self.selections_tree(None)
def key_press_event(self, widget, event):
print event.keyval
def scroll_event(self, widget, event):
if event.direction == gdk.SCROLL_UP:
direction = 1
self.space = self.space + 2*self.scalefact
print 'sf',self.scalefact
else:
direction = -1
self.space = self.space - 2*self.scalefact
print 'sf',self.scalefact
if self.space < 0:
self.space = 0
print 'space', self.space
#print (arange(0,size(self.data2plot,1))*(self.space))
self.space_data()
self.redraw(None)
def space_data(self,space=None):
self.data2plot = self.data[self.tstart:self.tstop,self.chanind2plot]+\
(arange(0,size(self.data[self.tstart:self.tstop,self.chanind2plot],1))*\
(self.space))
def get_cursor_position(self,event):
ap = self.axes.get_position()
x,y = self.canvas.get_width_height()
posx = ((event.x/x)-ap.x0)*(1/(ap.x1-ap.x0))
posy = ((event.y/y)-(1-ap.y0))*(1/(ap.y0-ap.y1))
self.sx = (posx*(self.time[-1]-self.time[0]))+self.time[0]
self.sy = (posy*(self.data2plot.max()-self.data2plot.min())) + \
self.data2plot.min()
#print self.sx, self.sy
def button_press_event(self,widget,event):
self.get_cursor_position(event)
#print 'button pushed',event.button,event.type
if event.type == gtk.gdk.BUTTON_PRESS:
print "single click"
if event.button == 1:
self.xstart = self.sx
#elif event.type == gtk.gdk._2BUTTON_PRESS:
#print "double click"
#elif event.type == gtk.gdk._3BUTTON_PRESS:
#print "triple click. ouch, you hurt your user."
if event.type == gtk.gdk.BUTTON_PRESS and event.button == 2:
closest_data = nearest.nearest(self.data2plot[0,:],self.sy)
print 'nearest',closest_data
print 'highlighting channel'
self.axes.axhspan(self.data2plot[:,closest_data].min(), \
self.data2plot[:,closest_data].max(), xmin=0, xmax=1, color='g',\
alpha=0.2)
self.canvas.draw()
self.specaxes.cla()
NFFT = 1024
Fs = self.srate #(1/self.srate)
print NFFT,int(Fs),'d'
self.specaxes.specgram(
self.data2plot[:,closest_data[0]], NFFT=NFFT, Fs=Fs,noverlap=900)
#self.specaxes.axis('off')
self.speccanvas.draw()
self.csdaxes.csd(self.time,
self.data2plot[:,closest_data[0]], NFFT=NFFT, Fs=Fs)
#, noverlap=Noverlap,
#cmap=cm.jet)#, xextent=xextent)
#self.csdaxes.axis('off')
self.csdcanvas.draw()
def button_release_event(self,widget,event):
self.get_cursor_position(event)
if event.type == gtk.gdk.BUTTON_RELEASE and event.button == 1:
self.axes.axvspan(ymin=0, ymax=1, xmin=self.xstart, xmax=self.sx, \
color='b',alpha=0.4)
if self.xstart > self.sx: #selection going from later to earlier
tmp = copy(self.sx)
self.sx = copy(self.xstart)
self.xstart = tmp
try: self.selections = vstack((self.selections,\
[self.xstart,self.sx]))
except AttributeError: self.selections = \
array([[self.xstart,self.sx]])
print 'sels',self.selections
self.canvas.draw()
self.selections_tree(None)
def clear_selections(self,widget):
del self.selections
self.redraw(None)
def drag_begin(self,widget,event):
pass
def redraw(self,widget):
print len(self.time),self.data2plot.shape
#self.color = 'black'
self.axes.cla()
self.axes = self.figure.axes[0]
print 'cur color', self.color
self.axes.plot(self.time, self.data2plot,color=self.color)
self.axes.axis('tight')
try:
print 'current selections',self.selections
for i in self.selections:
self.axes.axvspan(ymin=0,ymax=1,xmin=i[0],xmax=i[1],color='b',\
alpha=.4)
except:
pass
self.axes.yaxis.set_ticks((arange(0,size(self.data2plot,1)) * \
(self.space)))
self.axes.yaxis.set_ticklabels(self.chanlabels2plot, fontsize=17)
self.canvas.draw()
ion()
def zoomin_time(self,widget):
startind = self.tstart;
stopind = self.tstop-((self.tstop-self.tstart)/2)
self.check_scale(startind,stopind)
self.redraw(None)
def zoomout_time(self,widget):
startind = self.tstart;
stopind = self.tstop+((self.tstop-self.tstart)*2)
self.check_scale(startind,stopind)
self.redraw(None)
def page_forward(self,widget):
startind = ((self.tstop-self.tstart)/2)+self.tstart;
stopind = ((self.tstop-self.tstart)/2)+self.tstop;
self.check_scale(startind,stopind)
self.redraw(None)
def page_back(self,widget):
startind = self.tstart-((self.tstop-self.tstart)/2);
stopind = self.tstop-((self.tstop-self.tstart)/2);
self.check_scale(startind,stopind)
self.redraw(None)
def page_up(self,widget):
self.curchannel = self.curchannel+self.numofch
if self.curchannel >= len(self.chanind):
self.curchannel = len(self.chanind)-self.numofch
self.chanind2plot = \
self.chanind[self.curchannel:self.curchannel+self.numofch]
self.chanlabels2plot = \
self.chanlabels[self.curchannel:self.curchannel+self.numofch]
self.check_scale(self.tstart,self.tstop)
self.redraw(None)#self.display_apply(None)
def page_down(self,widget):
self.curchannel = self.curchannel-self.numofch
if self.curchannel < 0:
self.curchannel = 0
self.chanind2plot = \
self.chanind[self.curchannel:self.curchannel+self.numofch]
self.chanlabels2plot = \
self.chanlabels[self.curchannel:self.curchannel+self.numofch]
self.check_scale(self.tstart,self.tstop)
self.redraw(None)#self.display_apply(None)
def display_apply(self,widget):
color = self.builder.get_object('colorbutton1')
r = color.get_color().red_float
g = color.get_color().green_float
b = color.get_color().blue_float
self.line_color = self.color = (r,g,b)
self.prefs['LineColor'] = self.line_color
color = self.builder.get_object('colorbutton2')
r = color.get_color().red_float
g = color.get_color().green_float
b = color.get_color().blue_float
self.back_color = (r,g,b)
self.prefs['BackColor'] = self.back_color
readwrite.writedata(self.prefs, os.getenv('HOME')+'/.pymeg')
self.numofch = int(self.builder.get_object("spinbutton1").get_value())
self.chanind2plot = \
self.chanind[self.curchannel:self.curchannel+self.numofch]
self.chanlabels2plot = \
self.chanlabels[self.curchannel:self.curchannel+self.numofch]
st = float(self.builder.get_object("spinbutton2").get_value())
ed = float(self.builder.get_object("spinbutton3").get_value())
self.space = float(self.builder.get_object("entry1").get_text())
self.scalefact = float(self.builder.get_object("entry2").get_text())
#print 'se',st,ed, self.t
startind = nearest.nearest(self.t,st)[0]
stopind = nearest.nearest(self.t,ed)[0]
print 'se',startind,stopind
self.check_scale(startind,stopind)
self.space_data()
self.redraw(None)
def check_scale(self,startind,stopind):
print 'req',startind,stopind, self.tstart,self.tstop
if startind < 0:
startind = 0
stopind = self.tstop
if stopind > len(self.t):
startind = self.tstart
stopind = len(self.t)
if stopind < 0:
stopind = self.tstop
print 'set',startind,stopind,self.tstart,self.tstop
self.tstart = startind
self.tstop = stopind
self.time = self.t[self.tstart:self.tstop]
self.data2plot = self.data[self.tstart:self.tstop,self.chanind2plot]
self.space_data()
#self.redraw(None)
def channel_tree(self,widget):
print('updating list')
self.View = self.builder.get_object("treeview1")
self.dataList = gtk.ListStore(int,str)
self.AddListColumn('Number', 0, self.View)
self.AddListColumn('Label', 1, self.View)
for k in range(0,self.numchannels):
iter = self.dataList.append([k,self.chanlabels[k]])
self.View.set_model(self.dataList)
print 'adding channels'
def AddListColumn(self, title, columnId, viewtype):
column = gtk.TreeViewColumn(title,gtk.CellRendererText(),text=columnId)
column.set_resizable(True)
column.set_sort_column_id(columnId)
viewtype.append_column(column)
viewtype.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
def channel_selection_apply(self, widget):
liststore,iter = self.View.get_selection().get_selected_rows()
self.chanind = [];
self.chanlabels = [];
for i in iter:
self.chanind.append(int(liststore[i][0]))
self.chanlabels.append(liststore[i][1])
print self.chanlabels
self.chanind2plot = self.chanind
self.chanlabels2plot = self.chanlabels
self.space_data()
self.redraw(None)
def set_channel_groups(self,widget):
l = self.View.get_model()
i = l.get_iter_first()
v = []
while ( i != None ):
v.append(l.get_value(i,1))
i = l.iter_next(i)
print widget.get_label(), widget
if widget.get_label() == 'meg' and widget.get_active() == True:
for i in range(0,len(v)):
if v[i].startswith('A'):
self.View.get_selection().select_path(i)
if widget.get_label() == 'De-Select All':
self.View.get_selection().unselect_all()
if widget.get_label() == 'Select All':
self.View.get_selection().select_all()
if widget.get_label() == 'reference' and widget.get_active() == True:
for i in range(0,len(v)):
if v[i].startswith('M') or v[i].startswith('G'):
self.View.get_selection().select_path(i)
if widget.get_label() == 'trigger' and widget.get_active() == True:
for i in range(0,len(self.chanlabels)):
if v[i].startswith('TRIGG'):
self.View.get_selection().select_path(i)
if widget.get_label() == 'response' and widget.get_active() == True:
for i in range(0,len(v)):
if v[i].startswith('RESP'):
self.View.get_selection().select_path(i)
def selections_tree(self,widget):
try:
if self.win_prefs.get_property('visible') == True:
print('updating selections')
self.SelView = self.builder.get_object("treeview2")
self.selectionList = gtk.ListStore(int,str)
if self.SelView.get_columns() == []:
self.AddListColumn('Event Number', 0,self.SelView)
self.AddListColumn('Selection', 1,self.SelView)
for k in range(0,len(self.selections)):
iter=self.selectionList.append([k,str(self.selections[k])])
self.SelView.set_model(self.selectionList)
print 'adding selections'
except AttributeError:
pass #window not initiated yet
def event_selection_delete(self, widget):
liststore,iter = self.SelView.get_selection().get_selected_rows()
#self.selections = delete(self.selections,iter,axis=0)
del_ind = []
for i in iter:
print 'deleting event:',liststore[i][0]
del_ind.append(liststore[i][0])
self.selections = delete(self.selections,del_ind,axis=0)
self.selections_tree(None)
self.redraw(None)
def showpopupmenu(self,widget,event):
print('button ',event.button)
if event.button == 3:
m = self.builder.get_object("menufunctions")
print(widget, event)
m.show_all()
m.popup(None,None,None,3,0)
def get_time_selection(self,widget,current=True):
print 'name',widget.get_parent().get_name()
sel_ind = []
sel_onset_ind = []
def selection_to_ind(sels,sele,inc):
print 'getting sel'
if sele == sels: #only one point selected
sele = sels+inc
nearest.nearest(self.t,arange(sels,sele,inc))
sel_ind = nearest.nearest(self.t,arange(sels,sele,inc))
return sel_ind
if widget.get_parent().get_name() == 'GtkMenu' and current == True: #call from editor menu
print 'call from right click menu'
try:
self.sel_ind = selection_to_ind(self.selections[-1][0],\
self.selections[-1][1],self.t[1]-self.t[0])
except AttributeError:
print 'no selections yet'
return -1
else: #call from selector
print 'call from selector window'
liststore,iter = self.SelView.get_selection().get_selected_rows()
for i in iter:
j = int(liststore[i][0])
sel_ind.extend(selection_to_ind(self.selections[j][0],\
self.selections[j][1],self.t[1]-self.t[0]))
sel_onset_ind.extend(selection_to_ind(self.selections[j][0],\
self.selections[j][0],self.t[1]-self.t[0]))
self.sel_ind = sel_ind
self.sel_onset_ind = sel_onset_ind
def plot_contour(self,widget):
if size(self.data,1) < 4:
self.builder.get_object("messagedialog1").format_secondary_text\
('Contour Plot Requires at least 4 Channels')
self.builder.get_object("messagedialog1").show()
return -1
print widget.get_parent().get_name()
if self.get_time_selection(widget) == -1: #no selections
self.builder.get_object("messagedialog1").format_secondary_text\
('No Selection Made Yet')
self.builder.get_object("messagedialog1").show()
return -1
try:
print 'state',self.mc.window.get_property('visible')
if self.mc.window.get_property('visible') == False:
#someone closed the window
self.mc.window.show()
print 'done replotting'
except AttributeError: #first call. setup
print 'first plot'
self.mc = contour_gtk.setup_gui()
self.mc.window.show()
self.mc.fig.clf()
self.mc.display(self.data[self.sel_ind,:],self.channels, subplot='on', labels=self.chanlabels)
def generate_testdata(self,widget):
self.quick_load_pdf_script()
#numpts = 100
#self.numchannels = 10
#self.t = arange(0,numpts, .01)
#self.data = zeros((len(self.t),self.numchannels))
#self.scalefact = 1e-9
#for i in arange(0,self.numchannels):
#r = random.randn()
#self.data[:,i] = float32((sin(2*0.32*pi*self.t*r) * \
#sin(2*2.44*pi*self.t*r)))#+ self.space
#self.data[:,0] = random.randn((len(self.t)))
#self.data = self.data * self.scalefact
#self.tstart = 0; self.tstop = len(self.t)
#self.time = copy(self.t[self.tstart:self.tstop])
#print self.tstart,self.tstop
#self.chanind = arange(0,self.numchannels)
#self.chanlabels = arange(0,self.numchannels)
self.data2plot = self.data
self.display_apply(None)
#self.space_data()
#self.redraw(None)
def quick_load_pdf_script(self):
from pdf2py import pdf
datapath = '/home/danc/programming/python/data/'
p = pdf.read(datapath+'test/e,rfhp1.0Hz,ra')
#p = pdf.read(datapath+'0611/0611piez/e,rfhp1.0Hz')
#p = pdf.read(datapath+'data/0611/drawing3/01%01%01@01:01/2/c,rfDC')
p.data.setchannels('meg')
#p.data.setchannellabels(['A1','A69','A130'])#meg')
#p.data.setchannellabels(['A178'])
p.data.getdata(0,p.data.pnts_in_file)
self.numchannels = size(p.data.data_block,1)
self.t = p.data.wintime #eventtime
self.data = p.data.data_block
self.tstart = 0; self.tstop = len(self.t)
self.time = copy(self.t[self.tstart:self.tstop])
self.chanind = arange(self.numchannels)
self.chanlabels = p.data.channels.labellist
self.scalefact = (p.data.data_block.min()+p.data.data_block.max())/2
self.channels = p.data.channels.chanlocs
self.srate = p.hdr.header_data.sample_period
self.data_loaded_setup()
self.curchannel = 0
def hideinsteadofdelete(self,widget, ev=None):
widget.hide()
return True
def load_data(self,widget):
from gui.gtk import filechooser
fn = filechooser.open()
try: #pdf load method
self.data_assist = meg_assistant.setup(path = fn[0], \
callback=self.load_data_callback)
except:
print 'something wrong with load'
return -1
def load_data_callback(self, widget):
print 'DONE!'
p = self.data_assist.pdfdata #4D MEG file format
input_dict = {'data_block':p.data.data_block,'srate':p.data.srate,'wintime':p.data.wintime,'labellist':p.data.channels.labellist,'chanlocs':p.data.channels.chanlocs}
self.data_handler(widget, input_dict)
def data_handler(self, widget, input_dict, callback=None):
'''
datahandler(data,srate,wintime,chanlabels,chanlocs)
-
data = 2D array
srate = type(float or int)
wintime = type(list or array) of same length as first dimension of data
chanlabels = type(list of strings) of same length as
second dimension of data
chanlocs = shape is 2Xnumber of channels, ie, (2,248) and contains page
coordinates for each channel. Position of X and Y is between -.5
and .5
'''
####!!!!!!!
'''should rerwite the following as well as the filechooser method to make simple and compatible with dictionary based load and read'''
data = input_dict['data_block']
srate = input_dict['srate']
wintime = input_dict['wintime']
chanlabels = input_dict['labellist']
chanlocs = input_dict['chanlocs']
print type(data),srate,type(wintime),type(chanlabels),type(chanlocs)
print len(chanlabels),size(data,1),len(wintime),size(data,0),\
size(chanlocs,1)
if len(chanlabels) != size(data,1) or len(wintime) != size(data,0):
#or size(chanlocs,1) != size(data,1):
print 'error matching wintime or chlabels or chanlocs with data'
#self.builder.get_object("messagedialog1").format_secondary_text\
#('error matching wintime or chlabels or chanlocs with data')
#self.builder.get_object("messagedialog1").show()
#raise RuntimeError
self.data = data
self.srate = srate
self.chanlabels = chanlabels
self.t = array(wintime)
self.tstart = 0; self.tstop = len(self.t)
self.time = copy(self.t[self.tstart:self.tstop])
self.numchannels = size(data,1)
self.chanind = arange(self.numchannels)
print 'DEBUG',data
self.scalefact = (data.min()+data.max())/2
print 'scalefact', self.scalefact
self.channels = chanlocs
self.curchannel = 0
self.tstart = 0; self.tstop = len(self.t)
self.data_loaded_setup()
self.data2plot = self.data
self.display_apply(None)
try: callback(widget); self.callback = callback
except TypeError, NameError: print('no callback')
def offset_correct(self,widget):
print self.get_time_selection(widget)
if self.get_time_selection(widget) == -1: #no selections
###self.builder.get_object("messagedialog1").format_secondary_text\
###('No Selection Made Yet')
###self.builder.get_object("messagedialog1").show()
print('no selections detected')
return -1
self.data = self.data - average(self.data[self.sel_ind,:],axis=0)
print 'Data offset corrected, now trying to replot'
self.display_apply(None)
print widget,'wid:',widget.get_label()
self.callback(widget)
def add_selections_to_event_process(self,widget):
try:
if self.ed.window.get_property('visible') == False:
#self.ed = event_process.setup_gui()
self.ed.window.show()
except AttributeError: #first call. setup
self.ed = event_process.setup_gui()
self.ed.window.show()
if self.get_time_selection(widget) == -1:
print('no selections detected')
return -1
print('passing selection indices',self.sel_onset_ind)
self.ed.set_selected_events_passed(None,self.data,self.sel_onset_ind,self.t)
self.ed.builder.get_object("button1").set_sensitive(False)
if __name__ == "__main__":
mainwindow = setup_gui()
mainwindow.window.show()
print 'testing'
ion()
gtk.main()
|
badbytes/pymeg
|
gui/gtk/data_editor.py
|
Python
|
gpl-3.0
| 30,723 | 0.01494 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.