text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from sevenbridges.meta.resource import Resource
from sevenbridges.meta.fields import StringField, DictField
class BatchGroup(Resource):
"""
Batch group for a batch task.
Represents the group that is assigned to the child task
from the batching criteria that was used when the task was started.
"""
value = StringField(read_only=True)
fields = DictField(read_only=True)
def __str__(self):
return '<Batch group>'
|
sbg/sevenbridges-python
|
sevenbridges/models/compound/tasks/batch_group.py
|
Python
|
apache-2.0
| 454 | 0 |
# from django.shortcuts import render, get_object_or_404
# from .models import Album, Song
# def index(request):
# all_albums = Album.objects.all()
# context = {
# 'all_albums':all_albums,
# }
# return render(request, 'music/index.html', context)
# def detail(request, album_id):
# album = get_object_or_404(Album, pk=album_id)
# return render(request, 'music/detail.html', {'album': album})
# def favourite(request, album_id):
# album = get_object_or_404(Album, pk=album_id)
# try:
# selected_song = album.song_set.get(pk=request.POST['song'])
# except(KeyError, Song.DoesNotExist):
# return render(request, 'music/detail.html', {
# 'album': album,
# 'error_message':'Did not select a valid song'
# })
# else:
# selected_song.is_favourite = True
# selected_song.save()
# return render(request, 'music/detail.html', {'album': album})
from django.views import generic
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.views.generic import View
from .forms import UserForm
from .models import Album
class IndexView(generic.ListView):
template_name = "music/index.html"
def get_queryset(self):
return Album.objects.all()
class DetailView(generic.DetailView):
model = Album
template_name = "music/detail.html"
class AlbumCreate(CreateView):
model = Album
fields = ['artist', 'title', 'genre', 'logo']
class AlbumUpdate(UpdateView):
model = Album
fields = ['artist', 'title', 'genre', 'logo']
class AlbumDelete(DeleteView):
model = Album
success_url = reverse_lazy('music:index')
class UserFormView(View):
form_class = UserForm
template_name = 'music/registration_form.html'
#blank form (POST)
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form':form})
#process form data (POST)
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
#cleaned data
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
#return user objects if correct credentials
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
#request.user.username
return redirect('music:index')
return render(request, self.template_name, {'form':form})
|
TheCoderNextdoor/DjangoSites
|
django_tut/website/music/views.py
|
Python
|
gpl-3.0
| 3,632 | 0.020099 |
import sys
from logging import warning
from glob import iglob
import json
import os
import shutil
from ..common import chdir, run
from .cache import cache_specs
from .dirs import get_specs_dir
def load_all_specs(*, basedir=get_specs_dir(), skip_update_check=True):
os.makedirs(basedir, exist_ok=True)
if not skip_update_check:
with chdir(basedir):
res, _, _ = run(['git', 'fetch', 'origin'])
if res != 'success':
print("Error fetching specs", file=sys.stderr)
_, res, _ = run(['git', 'log', 'HEAD..origin/master'])
if res != '':
print("Spec updates found - Updating", file=sys.stderr)
with chdir(basedir):
run(['git', 'pull', 'origin', 'master'])
# the repo has a /specs folder
basedir = os.path.join(basedir, 'specs')
cache_specs(basedir)
spec_files = iglob(os.path.join(basedir, '_cache', '*.json'))
# load_spec returns a (name, spec) tuple, so we just let the dict() constructor
# turn that into the {name: spec} pairs of a dictionary for us
return dict([load_spec(filename, basedir) for filename in spec_files])
def load_some_specs(idents, *, basedir=get_specs_dir()):
# the repo has a /specs folder
basedir = os.path.join(basedir, 'specs')
cache_specs(basedir)
wanted_spec_files = [os.path.join(basedir, '_cache', '{}.json'.format(ident)) for ident in idents]
all_spec_files = iglob(os.path.join(basedir, '_cache', '*.json'))
loadable_spec_files = set(all_spec_files).intersection(wanted_spec_files)
# load_spec returns a (name, spec) tuple, so we just let the dict() constructor
# turn that into the {name: spec} pairs of a dictionary for us
return dict([load_spec(filename) for filename in loadable_spec_files])
def load_spec(filename, basedir):
with open(filename, 'r', encoding='utf-8') as specfile:
loaded_spec = json.load(specfile)
name = os.path.splitext(os.path.basename(filename))[0]
assignment = loaded_spec['assignment']
# Ask if user wants to re-cache specs to fix discrepancy
if name != assignment:
warning('assignment "{}" does not match the filename {}'.format(assignment, filename))
recache = input("Re-cache specs? (Y/N)")
if recache and recache.lower()[0] == "y":
shutil.rmtree(os.path.join(basedir, '_cache'))
cache_specs(basedir)
return assignment, loaded_spec
|
StoDevX/cs251-toolkit
|
cs251tk/specs/load.py
|
Python
|
mit
| 2,467 | 0.001621 |
from django.apps import AppConfig
class MainConfig(AppConfig):
name = 'main'
|
edisondotme/motoPi
|
main/apps.py
|
Python
|
mit
| 80 | 0.0125 |
import unittest
from chat.commands.commandlist import CommandList
from chat.command import Command
from tests.structs.dummychat import DummyChat
class TestCommands(unittest.TestCase):
def setUp(self):
self.chat = DummyChat()
def test_get(self):
command = CommandList.get('help', self.chat, 'message')
self.assertTrue(command and isinstance(command, Command), 'Command get failed')
def test_validate(self):
fail_msg = 'Command validate failed'
self.assertTrue(CommandList.validate('help'), fail_msg)
self.assertTrue(CommandList.validate('!help'), fail_msg)
self.assertTrue(CommandList.validate('song'), fail_msg)
self.assertTrue(CommandList.validate('!song'), fail_msg)
self.assertTrue(CommandList.validate('restart'), fail_msg)
self.assertTrue(CommandList.validate('!restart'), fail_msg)
self.assertFalse(CommandList.validate('not a function'), fail_msg)
self.assertFalse(CommandList.validate('!not a function'), fail_msg)
|
jk977/twitch-plays
|
bot/tests/commands.py
|
Python
|
gpl-3.0
| 1,035 | 0.002899 |
class TestRailTestCase:
def __init__(self, title, section, suite, steps):
self.title = title
self.section_name = section
self.suite_name = suite
self.steps = steps
self.type_id = 1
self.priority_id = 4
def to_json_dict(self):
return {
'title': self.title,
'type_id': self.type_id,
'priority_id': self.priority_id,
'custom_steps_separated': self.steps
}
|
2gis/pytestrail
|
testrail/testcase.py
|
Python
|
mit
| 474 | 0 |
"""
Tools for sending email.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
# Imported for backwards compatibility, and for the sake
# of a cleaner namespace. These symbols used to be in
# django/core/mail.py before the introduction of email
# backends and the subsequent reorganization (See #10355)
from django.core.mail.utils import CachedDnsName, DNS_NAME
from django.core.mail.message import \
EmailMessage, EmailMultiAlternatives, \
SafeMIMEText, SafeMIMEMultipart, \
DEFAULT_ATTACHMENT_MIME_TYPE, make_msgid, \
BadHeaderError, forbid_multi_line_headers
from django.core.mail.backends.smtp import EmailBackend as _SMTPConnection
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an e-mail backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
path = backend or settings.EMAIL_BACKEND
try:
mod_name, klass_name = path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError, e:
raise ImproperlyConfigured(('Error importing email backend module %s: "%s"'
% (mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
return klass(fail_silently=fail_silently, **kwds)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
return EmailMessage(subject, message, from_email, recipient_list,
connection=connection).send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of e-mails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
if not settings.MANAGERS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
class SMTPConnection(_SMTPConnection):
def __init__(self, *args, **kwds):
import warnings
warnings.warn(
'mail.SMTPConnection is deprecated; use mail.get_connection() instead.',
DeprecationWarning
)
super(SMTPConnection, self).__init__(*args, **kwds)
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/core/mail/__init__.py
|
Python
|
bsd-3-clause
| 5,072 | 0.002957 |
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test_support':
raise ImportError, 'test_support must be imported from the test package'
import sys
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
import os
try:
os.unlink(filename)
except OSError:
pass
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
import os
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
def bind_port(sock, host='', preferred_port=54321):
"""Try to bind the sock to a port. If we are running multiple
tests and we don't try multiple ports, the test can fails. This
makes the test more robust."""
import socket, errno
# some random ports that hopefully no one is listening on.
for port in [preferred_port, 9907, 10243, 32999]:
try:
sock.bind((host, port))
return port
except socket.error, (err, msg):
if err != errno.EADDRINUSE:
raise
print >>sys.__stderr__, \
' WARNING: failed to listen on port %d, trying another' % port
raise TestFailed, 'unable to find port to listen on'
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if type(x) == type(0.0) or type(y) == type(0.0):
try:
x, y = coerce(x, y)
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and type(x) in (type(()), type([])):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return cmp(len(x), len(y))
return cmp(x, y)
try:
unicode
have_unicode = 1
except NameError:
have_unicode = 0
is_jython = sys.platform.startswith('java')
import os
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNICODE_UNENCODEABLE
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print ('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN))
if fp is not None:
fp.close()
unlink(TESTFN)
del os, fp
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
import os
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def verify(condition, reason='test failed'):
"""Verify that condition is true. If not, raise TestFailed.
The optional argument reason can be given to provide
a better error text.
"""
if not condition:
raise TestFailed(reason)
def vereq(a, b):
"""Raise TestFailed if a == b is false.
This is better than verify(a == b) because, in case of failure, the
error message incorporates repr(a) and repr(b) so you can see the
inputs.
Note that "not (a == b)" isn't necessarily the same as "a != b"; the
former is tested.
"""
if not (a == b):
raise TestFailed, "%r == %r" % (a, b)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = dict.items()
items.sort()
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def check_syntax(statement):
try:
compile(statement, '<string>', 'exec')
except SyntaxError:
pass
else:
print 'Missing SyntaxError: "%s"' % statement
def open_urlresource(url):
import urllib, urlparse
import os.path
filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
for path in [os.path.curdir, os.path.pardir]:
fn = os.path.join(path, filename)
if os.path.exists(fn):
return open(fn)
requires('urlfetch')
print >> get_original_stdout(), '\tfetching %s ...' % url
fn, _ = urllib.urlretrieve(url, filename)
return open(fn)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.func_name = func.func_name
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
# Hack to get at the maximum value an internal index can take.
class _Dummy:
def __getslice__(self, i, j):
return j
MAX_Py_ssize_t = _Dummy()[:]
def set_memlimit(limit):
import re
global max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independant of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.failIf(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# Preliminary PyUNIT integration.
import unittest
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def run_suite(suite, testclass=None):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
if testclass is None:
msg = "errors occurred; run in verbose mode for details"
else:
msg = "errors occurred in %s.%s" \
% (testclass.__module__, testclass.__name__)
raise TestFailed(msg)
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, (unittest.TestSuite, unittest.TestCase)):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
if len(classes)==1:
testclass = classes[0]
else:
testclass = None
run_suite(suite, testclass)
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
return f, t
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
def threading_setup():
import threading
return len(threading._active), len(threading._limbo)
def threading_cleanup(num_active, num_limbo):
import threading
import time
_MAX_COUNT = 10
count = 0
while len(threading._active) != num_active and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
count = 0
while len(threading._limbo) != num_limbo and count < _MAX_COUNT:
count += 1
time.sleep(0.1)
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
import os
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
|
ruamel/ordereddict
|
test/unit/test_support.py
|
Python
|
mit
| 17,653 | 0.003852 |
import mock
import lxml.etree as ET
from .utils import make_cobertura
def test_parse_path():
from pycobertura import Cobertura
xml_path = 'foo.xml'
with mock.patch('pycobertura.cobertura.os.path.exists', return_value=True):
with mock.patch('pycobertura.cobertura.ET.parse') as mock_parse:
cobertura = Cobertura(xml_path)
assert cobertura.xml is mock_parse.return_value.getroot.return_value
def test_version():
cobertura = make_cobertura()
assert cobertura.version == '1.9'
def test_line_rate():
cobertura = make_cobertura()
assert cobertura.line_rate() == 0.9
def test_line_rate_by_class():
cobertura = make_cobertura()
expected_line_rates = {
'Main': 1.0,
'search.BinarySearch': 0.9166666666666666,
'search.ISortedArraySearch': 1.0,
'search.LinearSearch': 0.7142857142857143,
}
for class_name in cobertura.classes():
assert cobertura.line_rate(class_name) == \
expected_line_rates[class_name]
def test_branch_rate():
cobertura = make_cobertura()
assert cobertura.branch_rate() == 0.75
def test_branch_rate_by_class():
cobertura = make_cobertura()
expected_branch_rates = {
'Main': 1.0,
'search.BinarySearch': 0.8333333333333334,
'search.ISortedArraySearch': 1.0,
'search.LinearSearch': 0.6666666666666666,
}
for class_name in cobertura.classes():
assert cobertura.branch_rate(class_name) == \
expected_branch_rates[class_name]
def test_total_misses():
cobertura = make_cobertura()
assert cobertura.total_misses() == 3
def test_missed_statements_by_class_name():
cobertura = make_cobertura()
expected_missed_statements = {
'Main': [],
'search.BinarySearch': [24],
'search.ISortedArraySearch': [],
'search.LinearSearch': [19, 24],
}
for class_name in cobertura.classes():
assert cobertura.missed_statements(class_name) == \
expected_missed_statements[class_name]
def test_list_packages():
cobertura = make_cobertura()
packages = cobertura.packages()
assert packages == ['', 'search']
def test_list_classes():
cobertura = make_cobertura()
classes = cobertura.classes()
assert classes == [
'Main',
'search.BinarySearch',
'search.ISortedArraySearch',
'search.LinearSearch'
]
def test_hit_lines__by_iterating_over_classes():
cobertura = make_cobertura()
expected_lines = {
'Main': [10, 16, 17, 18, 19, 23, 25, 26, 28, 29, 30],
'search.BinarySearch': [12, 16, 18, 20, 21, 23, 25, 26, 28, 29, 31],
'search.ISortedArraySearch': [],
'search.LinearSearch': [9, 13, 15, 16, 17],
}
for class_name in cobertura.classes():
assert cobertura.hit_statements(class_name) == expected_lines[class_name]
def test_missed_lines():
cobertura = make_cobertura()
expected_lines = {
'Main': [],
'search.BinarySearch': [24],
'search.ISortedArraySearch': [],
'search.LinearSearch': [19, 20, 21, 22, 23, 24],
}
for class_name in cobertura.classes():
assert cobertura.missed_lines(class_name) == expected_lines[class_name]
def test_total_statements():
cobertura = make_cobertura()
assert cobertura.total_statements() == 30
def test_total_statements_by_class():
cobertura = make_cobertura()
expected_total_statements = {
'Main': 11,
'search.BinarySearch': 12,
'search.ISortedArraySearch': 0,
'search.LinearSearch': 7,
}
for class_name in cobertura.classes():
assert cobertura.total_statements(class_name) == \
expected_total_statements[class_name]
def test_total_misses():
cobertura = make_cobertura()
assert cobertura.total_misses() == 3
def test_total_misses_by_class():
cobertura = make_cobertura()
expected_total_misses = {
'Main': 0,
'search.BinarySearch': 1,
'search.ISortedArraySearch': 0,
'search.LinearSearch': 2,
}
for class_name in cobertura.classes():
assert cobertura.total_misses(class_name) == \
expected_total_misses[class_name]
def test_total_hits():
cobertura = make_cobertura()
assert cobertura.total_hits() == 27
def test_total_hits_by_class():
cobertura = make_cobertura()
expected_total_misses = {
'Main': 11,
'search.BinarySearch': 11,
'search.ISortedArraySearch': 0,
'search.LinearSearch': 5,
}
for class_name in cobertura.classes():
assert cobertura.total_hits(class_name) == \
expected_total_misses[class_name]
def test_filename():
cobertura = make_cobertura()
expected_filenames = {
'Main': 'Main.java',
'search.BinarySearch': 'search/BinarySearch.java',
'search.ISortedArraySearch': 'search/ISortedArraySearch.java',
'search.LinearSearch': 'search/LinearSearch.java',
}
for class_name in cobertura.classes():
assert cobertura.filename(class_name) == \
expected_filenames[class_name]
def test_filepath():
base_path = 'foo/bar/baz'
cobertura = make_cobertura(base_path=base_path)
expected_filepaths = {
'Main': 'foo/bar/baz/Main.java',
'search.BinarySearch': 'foo/bar/baz/search/BinarySearch.java',
'search.ISortedArraySearch': 'foo/bar/baz/search/ISortedArraySearch.java',
'search.LinearSearch': 'foo/bar/baz/search/LinearSearch.java',
}
for class_name in cobertura.classes():
assert cobertura.filepath(class_name) == \
expected_filepaths[class_name]
def test_class_source__sources_not_found():
cobertura = make_cobertura('tests/cobertura.xml')
expected_sources = {
'Main': [(0, 'tests/Main.java not found', None)],
'search.BinarySearch': [(0, 'tests/search/BinarySearch.java not found', None)],
'search.ISortedArraySearch': [(0, 'tests/search/ISortedArraySearch.java not found', None)],
'search.LinearSearch': [(0, 'tests/search/LinearSearch.java not found', None)],
}
for class_name in cobertura.classes():
assert cobertura.class_source(class_name) == expected_sources[class_name]
def test_line_statuses():
cobertura = make_cobertura('tests/dummy.source1/coverage.xml')
expected_line_statuses = {
'dummy/__init__': [],
'dummy/dummy': [
(1, True),
(2, True),
(4, True),
(5, False),
(6, False),
],
'dummy/dummy2': [
(1, True),
(2, True),
],
'dummy/dummy4': [
(1, False),
(2, False),
(4, False),
(5, False),
(6, False)
],
}
for class_name in cobertura.classes():
assert cobertura.line_statuses(class_name) == \
expected_line_statuses[class_name]
def test_class_source__sources_found():
cobertura = make_cobertura('tests/dummy.source1/coverage.xml')
expected_sources = {
'dummy/__init__': [],
'dummy/dummy': [
(1, 'def foo():\n', True),
(2, ' pass\n', True),
(3, '\n', None),
(4, 'def bar():\n', True),
(5, " a = 'a'\n", False),
(6, " b = 'b'\n", False),
],
'dummy/dummy2': [
(1, 'def baz():\n', True),
(2, ' pass\n', True)
],
'dummy/dummy4': [
(1, 'def barbaz():\n', False),
(2, ' pass\n', False),
(3, '\n', None),
(4, 'def foobarbaz():\n', False),
(5, ' a = 1 + 3\n', False),
(6, ' pass\n', False)
],
}
for class_name in cobertura.classes():
assert cobertura.class_source(class_name) == \
expected_sources[class_name]
|
msabramo/pycobertura
|
tests/test_cobertura.py
|
Python
|
mit
| 7,957 | 0.000754 |
from optparse import make_option
from django.core.management.base import BaseCommand
from crits.core.mongo_tools import mongo_connector
import pprint
class Command(BaseCommand):
"""
Gets a count of indicator types and object types in CRITs
"""
help = "Gets a count of indicator types and object types in CRITs"
option_list = BaseCommand.option_list + (
make_option('--sort_count',
'-s',
dest='sort_count',
default=False,
action="store_true",
help='Sort by count instead of by the type\'s name.'
),
make_option('--agg_obj_by_collection',
'-a',
dest='agg_obj_by_collection',
default=False,
action="store_true",
help='For object types: Aggregate by collection instead of '
'combining all results.'
),
)
all_object_collections = [
"actors",
"backdoors",
"campaigns",
"certificates",
"domains",
"email",
"events",
"exploits",
"indicators",
"ips",
"pcaps",
"raw_data",
"sample",
"screenshots",
"targets",
"yara_rules"
]
def handle(self, *args, **kwargs):
sort_count = kwargs.get('sort_count')
agg_obj_by_collection = kwargs.get('agg_obj_by_collection')
pp = pprint.PrettyPrinter(indent=4)
self.aggregate_indicator_types(sort_count, pp)
self.aggregate_object_types(sort_count, agg_obj_by_collection, pp)
def aggregate_indicator_types(self, sort_count, pp):
collection = "indicators"
pipe = [ { "$group": {"_id":"$type" , "count":{"$sum": 1}}}, {"$sort": {"_id": 1}} ]
if sort_count is True:
pipe.append({"$sort": {"count": 1}})
else:
pipe.append({"$sort": {"_id": 1}})
db = mongo_connector(collection)
results = db.aggregate(pipeline=pipe)
print "INDICATOR TYPES IN COLLECTION [%s]" % collection
pp.pprint(results)
print
def aggregate_object_for_collection(self, collection, sort_count):
pipe = [
{"$unwind": "$objects"},
{"$group" :
{"_id":
{"obj_type":
{"$cond":
{"if":
{"$and":
[{"$gt":["$objects.name", None] },
{"$ne": ["$objects.type", "$objects.name"]}]
},
"then": {"$concat": [ "$objects.type", " - ", "$objects.name" ]},
"else": "$objects.type"
}
}
},
"count": {"$sum": 1}
}
}
]
if sort_count is True:
pipe.append({"$sort": {"count": 1}})
else:
pipe.append({"$sort": {"_id": 1}})
db = mongo_connector(collection)
results = db.aggregate(pipeline=pipe)
return results
def aggregate_object_types(self, sort_count, is_agg_per_collection, pp):
results = {}
for collection in self.all_object_collections:
object_types = self.aggregate_object_for_collection(collection, sort_count)
results[collection] = object_types
if is_agg_per_collection:
for collection in self.all_object_collections:
print "OBJECT TYPES FOR COLLECTION: [%s]" % collection.upper()
if len(results[collection]['result']) != 0:
pp.pprint(results[collection]['result'])
else:
print "None found."
print
else:
all_obj_types = {}
for collection in self.all_object_collections:
collection_results = results[collection]
for collection_result in collection_results['result']:
obj_type = collection_result['_id']['obj_type']
all_obj_types[obj_type] = collection_result['count'] + all_obj_types.get(obj_type, 0);
print "OBJECT TYPES FOR ALL COLLECTIONS"
if(sort_count):
import operator
sorted_x = sorted(all_obj_types.items(), key=operator.itemgetter(1))
pp.pprint(sorted_x)
else:
pp.pprint(all_obj_types)
print
print
|
thelok/crits_scripts
|
crits/core/managament/commands/get_indicator_types.py
|
Python
|
mit
| 4,701 | 0.005956 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.tasks.jvm_task import JvmTask
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.target import Target
from pants.console.stty_utils import preserve_stty_settings
from pants.java.util import execute_java
class ScalaRepl(JvmToolTaskMixin, JvmTask):
@classmethod
def register_options(cls, register):
super(ScalaRepl, cls).register_options(register)
register('--main', default='scala.tools.nsc.MainGenericRunner',
help='The entry point for running the repl.')
cls.register_jvm_tool(register, 'scala-repl', default=['//:scala-repl'])
@classmethod
def prepare(cls, options, round_manager):
super(ScalaRepl, cls).prepare(options, round_manager)
# TODO(John Sirois): these are fake requirements in order to force compile run before this
# goal. Introduce a RuntimeClasspath product for JvmCompile and PrepareResources to populate
# and depend on that.
# See: https://github.com/pantsbuild/pants/issues/310
round_manager.require_data('resources_by_target')
round_manager.require_data('classes_by_target')
def execute(self):
(accept_predicate, reject_predicate) = Target.lang_discriminator('java')
targets = self.require_homogeneous_targets(accept_predicate, reject_predicate)
if targets:
tools_classpath = self.tool_classpath('scala-repl')
self.context.release_lock()
with preserve_stty_settings():
classpath = self.classpath(targets, cp=tools_classpath)
# The scala repl requires -Dscala.usejavacp=true since Scala 2.8 when launching in the way
# we do here (not passing -classpath as a program arg to scala.tools.nsc.MainGenericRunner).
jvm_options = self.jvm_options
if not any(opt.startswith('-Dscala.usejavacp=') for opt in jvm_options):
jvm_options.append('-Dscala.usejavacp=true')
print('') # Start REPL output on a new line.
try:
# NOTE: We execute with no workunit, as capturing REPL output makes it very sluggish.
execute_java(classpath=classpath,
main=self.get_options().main,
jvm_options=jvm_options,
args=self.args)
except KeyboardInterrupt:
# TODO(John Sirois): Confirm with Steve Gury that finally does not work on mac and an
# explicit catch of KeyboardInterrupt is required.
pass
|
areitz/pants
|
src/python/pants/backend/jvm/tasks/scala_repl.py
|
Python
|
apache-2.0
| 2,743 | 0.008385 |
#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import fractions
import logging
from typing import Any, List, Tuple
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5 import QtGui
from noisicaa.core.typing_extra import down_cast
from noisicaa import audioproc
from noisicaa import core
from noisicaa import music
from noisicaa.ui.track_list import base_track_editor
from noisicaa.ui.track_list import time_view_mixin
from noisicaa.ui.track_list import tools
from . import model
logger = logging.getLogger(__name__)
class EditControlPointsTool(tools.ToolBase):
track = None # type: ControlTrackEditor
def __init__(self, **kwargs: Any) -> None:
super().__init__(
type=tools.ToolType.EDIT_CONTROL_POINTS,
group=tools.ToolGroup.EDIT,
**kwargs)
self.__moving_point = None # type: ControlPoint
self.__moving_point_original_pos = None # type: QtCore.QPoint
self.__moving_point_offset = None # type: QtCore.QPoint
self.__move_mode = 'any'
self.__move_range = None # type: Tuple[int, int]
def iconName(self) -> str:
return 'edit-control-points'
def mousePressEvent(self, evt: QtGui.QMouseEvent) -> None:
self.track.updateHighlightedPoint()
if (evt.button() == Qt.LeftButton
and evt.modifiers() == Qt.NoModifier
and self.track.highlightedPoint() is not None):
self.__moving_point = self.track.highlightedPoint()
self.__moving_point_original_pos = self.__moving_point.pos()
self.__moving_point_offset = evt.pos() - self.__moving_point.pos()
self.__move_mode = 'any'
point_index = self.__moving_point.index
if point_index > 0:
range_left = self.track.points[point_index - 1].pos().x() + 1
else:
range_left = self.track.timeToX(audioproc.MusicalTime(0, 1))
if point_index < len(self.track.points) - 1:
range_right = self.track.points[point_index + 1].pos().x() - 1
else:
range_right = self.track.timeToX(self.track.projectEndTime())
self.__move_range = (range_left, range_right)
evt.accept()
return
if (evt.button() == Qt.LeftButton
and evt.modifiers() == Qt.ShiftModifier
and self.track.highlightedPoint() is not None):
with self.project.apply_mutations('%s: Remove control point' % self.track.track.name):
self.track.track.delete_control_point(self.track.highlightedPoint().point)
evt.accept()
return
if evt.button() == Qt.RightButton and self.__moving_point is not None:
self.track.setPointPos(self.__moving_point, self.__moving_point_original_pos)
self.__moving_point = None
evt.accept()
return
super().mousePressEvent(evt)
def mouseMoveEvent(self, evt: QtGui.QMouseEvent) -> None:
if self.__moving_point is not None:
new_pos = evt.pos() - self.__moving_point_offset
if evt.modifiers() == Qt.ControlModifier:
delta = new_pos - self.__moving_point_original_pos
if self.__move_mode == 'any' and delta.manhattanLength() > 5:
if abs(delta.x()) > abs(delta.y()):
self.__move_mode = 'horizontal'
else:
self.__move_mode = 'vertical'
else:
self.__move_mode = 'any'
if self.__move_mode == 'horizontal':
new_pos.setY(self.__moving_point_original_pos.y())
elif self.__move_mode == 'vertical':
new_pos.setX(self.__moving_point_original_pos.x())
range_left, range_right = self.__move_range
if new_pos.x() < range_left:
new_pos.setX(range_left)
elif new_pos.x() > range_right:
new_pos.setX(range_right)
if new_pos.y() < 0:
new_pos.setY(0)
elif new_pos.y() > self.track.height() - 1:
new_pos.setY(self.track.height() - 1)
self.track.setPointPos(self.__moving_point, new_pos)
evt.accept()
return
self.track.updateHighlightedPoint()
super().mouseMoveEvent(evt)
def mouseReleaseEvent(self, evt: QtGui.QMouseEvent) -> None:
if evt.button() == Qt.LeftButton and self.__moving_point is not None:
pos = self.__moving_point.pos()
self.__moving_point = None
if self.__move_mode != 'vertical':
new_time = self.track.xToTime(pos.x())
else:
new_time = None
if self.__move_mode != 'horizontal':
new_value = self.track.yToValue(pos.y())
else:
new_value = None
with self.project.apply_mutations('%s: Change control point' % self.track.track.name):
self.track.highlightedPoint().point.time = new_time
self.track.highlightedPoint().point.value = new_value
evt.accept()
return
super().mouseReleaseEvent(evt)
def mouseDoubleClickEvent(self, evt: QtGui.QMouseEvent) -> None:
if evt.button() == Qt.LeftButton and evt.modifiers() == Qt.NoModifier:
# If the first half of the double click initiated a move,
# cancel that move now.
if self.__moving_point is not None:
self.track.setPointPos(self.__moving_point, self.__moving_point_original_pos)
self.__moving_point = None
time = self.track.xToTime(evt.pos().x())
for point in self.track.track.points:
if point.time == time:
with self.project.apply_mutations(
'%s: Change control point' % self.track.track.name):
point.value = self.track.yToValue(evt.pos().y())
break
else:
with self.project.apply_mutations(
'%s: Insert control point' % self.track.track.name):
self.track.track.create_control_point(
self.track.xToTime(evt.pos().x()),
self.track.yToValue(evt.pos().y()))
evt.accept()
return
super().mouseDoubleClickEvent(evt)
class ControlTrackToolBox(tools.ToolBox):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.addTool(EditControlPointsTool)
class ControlPoint(core.AutoCleanupMixin, object):
def __init__(self, track_editor: 'ControlTrackEditor', point: model.ControlPoint) -> None:
super().__init__()
self.__track_editor = track_editor
self.__point = point
self.__pos = QtCore.QPoint(
self.__track_editor.timeToX(self.__point.time),
self.__track_editor.valueToY(self.__point.value))
self.__listeners = core.ListenerList()
self.add_cleanup_function(self.__listeners.cleanup)
self.__listeners.add(self.__point.time_changed.add(self.onTimeChanged))
self.__listeners.add(self.__point.value_changed.add(self.onValueChanged))
def onTimeChanged(self, change: music.PropertyValueChange[audioproc.MusicalTime]) -> None:
self.__pos = QtCore.QPoint(
self.__track_editor.timeToX(change.new_value),
self.__pos.y())
self.__track_editor.update()
def onValueChanged(self, change: music.PropertyValueChange[float]) -> None:
self.__pos = QtCore.QPoint(
self.__pos.x(),
self.__track_editor.valueToY(change.new_value))
self.__track_editor.update()
@property
def index(self) -> int:
return self.__point.index
@property
def point(self) -> model.ControlPoint:
return self.__point
@property
def point_id(self) -> int:
return self.__point.id
@property
def time(self) -> audioproc.MusicalTime:
return self.__point.time
def pos(self) -> QtCore.QPoint:
return self.__pos
def setPos(self, pos: QtCore.QPoint) -> None:
if pos is None:
self.__pos = QtCore.QPoint(
self.__track_editor.timeToX(self.__point.time),
self.__track_editor.valueToY(self.__point.value))
else:
self.__pos = pos
def recomputePos(self) -> None:
self.__pos = QtCore.QPoint(
self.__track_editor.timeToX(self.__point.time),
self.__track_editor.valueToY(self.__point.value))
class ControlTrackEditor(time_view_mixin.ContinuousTimeMixin, base_track_editor.BaseTrackEditor):
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.__mouse_pos = None # type: QtCore.QPoint
self.__highlighted_point = None # type: ControlPoint
self.__playback_time = None # type: audioproc.MusicalTime
self.__listeners = core.ListenerList()
self.points = [] # type: List[ControlPoint]
for point in self.track.points:
self.addPoint(len(self.points), point)
self.__listeners.add(self.track.points_changed.add(self.onPointsChanged))
self.setDefaultHeight(120)
self.scaleXChanged.connect(self.__onScaleXChanged)
self.playbackPositionChanged.connect(self.__playbackPositionChanged)
def cleanup(self) -> None:
for points in self.points:
points.cleanup()
self.points.clear()
super().cleanup()
def createToolBox(self) -> ControlTrackToolBox:
return ControlTrackToolBox(track=self, context=self.context)
def __onScaleXChanged(self, scale_x: fractions.Fraction) -> None:
for cpoint in self.points:
cpoint.recomputePos()
self.update()
@property
def track(self) -> model.ControlTrack:
return down_cast(model.ControlTrack, super().track)
def setHighlightedPoint(self, cpoint: ControlPoint) -> None:
if cpoint is not self.__highlighted_point:
self.__highlighted_point = cpoint
self.update()
def highlightedPoint(self) -> ControlPoint:
return self.__highlighted_point
def updateHighlightedPoint(self) -> None:
if self.__mouse_pos is None:
self.setHighlightedPoint(None)
return
closest_cpoint = None # type: ControlPoint
closest_dist = None # type: int
for cpoint in self.points:
dist = ((cpoint.pos().x() - self.__mouse_pos.x()) ** 2
+ (cpoint.pos().y() - self.__mouse_pos.y()) ** 2)
if dist < 20**2 and (closest_dist is None or dist < closest_dist):
closest_dist = dist
closest_cpoint = cpoint
self.setHighlightedPoint(closest_cpoint)
def setPointPos(self, cpoint: ControlPoint, pos: QtCore.QPoint) -> None:
cpoint.setPos(pos)
self.update()
def addPoint(self, insert_index: int, point: model.ControlPoint) -> None:
cpoint = ControlPoint(track_editor=self, point=point)
self.points.insert(insert_index, cpoint)
self.update()
def removePoint(self, remove_index: int, point: QtCore.QPoint) -> None:
cpoint = self.points.pop(remove_index)
cpoint.cleanup()
self.update()
def onPointsChanged(self, change: music.PropertyListChange[model.ControlPoint]) -> None:
if isinstance(change, music.PropertyListInsert):
self.addPoint(change.index, change.new_value)
self.updateHighlightedPoint()
elif isinstance(change, music.PropertyListDelete):
self.removePoint(change.index, change.old_value)
self.updateHighlightedPoint()
else:
raise TypeError(type(change))
def __playbackPositionChanged(self, time: audioproc.MusicalTime) -> None:
if self.__playback_time is not None:
x = self.timeToX(self.__playback_time)
self.update(x - self.xOffset(), 0, 2, self.height())
self.__playback_time = time
if self.__playback_time is not None:
x = self.timeToX(self.__playback_time)
self.update(x - self.xOffset(), 0, 2, self.height())
def valueToY(self, value: float) -> int:
return int(self.height() - int(self.height() * value))
def yToValue(self, y: int) -> float:
return float(self.height() - y) / self.height()
def leaveEvent(self, evt: QtCore.QEvent) -> None:
self.__mouse_pos = None
self.setHighlightedPoint(None)
super().leaveEvent(evt)
def mousePressEvent(self, evt: QtGui.QMouseEvent) -> None:
self.__mouse_pos = evt.pos() + self.offset()
super().mousePressEvent(evt)
def mouseMoveEvent(self, evt: QtGui.QMouseEvent) -> None:
self.__mouse_pos = evt.pos() + self.offset()
super().mouseMoveEvent(evt)
def mouseReleaseEvent(self, evt: QtGui.QMouseEvent) -> None:
self.__mouse_pos = evt.pos() + self.offset()
super().mouseReleaseEvent(evt)
def mouseDoubleClickEvent(self, evt: QtGui.QMouseEvent) -> None:
self.__mouse_pos = evt.pos() + self.offset()
super().mouseDoubleClickEvent(evt)
def _paint(self, painter: QtGui.QPainter, paint_rect: QtCore.QRect) -> None:
self.renderTimeGrid(painter, paint_rect)
points = self.points[:]
px, py = None, None # type: int, int
for cpoint in points:
x = cpoint.pos().x()
y = cpoint.pos().y()
if px is not None:
painter.setPen(Qt.black)
painter.drawLine(px, py, x, y)
px, py = x, y
for cpoint in points:
x = cpoint.pos().x()
y = cpoint.pos().y()
if cpoint is self.__highlighted_point:
painter.setPen(Qt.black)
painter.drawLine(x - 4, y - 4, x + 4, y - 4)
painter.drawLine(x + 4, y - 4, x + 4, y + 4)
painter.drawLine(x + 4, y + 4, x - 4, y + 4)
painter.drawLine(x - 4, y + 4, x - 4, y - 4)
painter.fillRect(x - 3, y - 3, 7, 7, QtGui.QColor(160, 160, 255))
else:
painter.setPen(Qt.black)
painter.drawLine(x - 3, y - 3, x + 3, y - 3)
painter.drawLine(x + 3, y - 3, x + 3, y + 3)
painter.drawLine(x + 3, y + 3, x - 3, y + 3)
painter.drawLine(x - 3, y + 3, x - 3, y - 3)
if self.__playback_time is not None:
pos = self.timeToX(self.__playback_time)
painter.fillRect(pos, 0, 2, self.height(), QtGui.QColor(0, 0, 160))
|
odahoda/noisicaa
|
noisicaa/builtin_nodes/control_track/track_ui.py
|
Python
|
gpl-2.0
| 15,641 | 0.000895 |
# -*- coding: utf-8 -*-
# Copyright 2015 AvanzOsc (http://www.avanzosc.es)
# Copyright 2015-2017 - Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
{
"name": "Procurement Purchase No Grouping",
"version": "10.0.1.0.0",
"author": "AvanzOSC,"
"Tecnativa,"
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/purchase-workflow",
"category": "Procurements",
"depends": [
'purchase',
'procurement',
],
"data": [
'views/product_category_view.xml',
],
'installable': True,
'license': 'AGPL-3',
}
|
Eficent/purchase-workflow
|
procurement_purchase_no_grouping/__manifest__.py
|
Python
|
agpl-3.0
| 662 | 0 |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Extract from notebook for Serving Optimization on Keras """
from __future__ import print_function
from datetime import datetime
import os
import sh
import sys
import tensorflow as tf
from tensorflow import data
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python import ops
from tensorflow.tools.graph_transforms import TransformGraph
from inference_test import inference_test, load_mnist_keras
from optimize_graph import (run_experiment, get_graph_def_from_saved_model,
describe_graph, get_size, get_metagraph, get_graph_def_from_file,
convert_graph_def_to_saved_model, freeze_model, optimize_graph, TRANSFORMS)
NUM_CLASSES = 10
MODELS_LOCATION = 'models/mnist'
MODEL_NAME = 'keras_classifier'
def keras_model_fn(params):
inputs = tf.keras.layers.Input(shape=(28, 28), name='input_image')
input_layer = tf.keras.layers.Reshape(target_shape=(28, 28, 1), name='reshape')(inputs)
# convolutional layers
conv_inputs = input_layer
for i in range(params.num_conv_layers):
filters = params.init_filters * (2**i)
conv = tf.keras.layers.Conv2D(kernel_size=3, filters=filters, strides=1, padding='SAME', activation='relu')(conv_inputs)
max_pool = tf.keras.layers.MaxPool2D(pool_size=2, strides=2, padding='SAME')(conv)
batch_norm = tf.keras.layers.BatchNormalization()(max_pool)
conv_inputs = batch_norm
flatten = tf.keras.layers.Flatten(name='flatten')(conv_inputs)
# fully-connected layers
dense_inputs = flatten
for i in range(len(params.hidden_units)):
dense = tf.keras.layers.Dense(units=params.hidden_units[i], activation='relu')(dense_inputs)
dropout = tf.keras.layers.Dropout(params.dropout)(dense)
dense_inputs = dropout
# softmax classifier
logits = tf.keras.layers.Dense(units=NUM_CLASSES, name='logits')(dense_inputs)
softmax = tf.keras.layers.Activation('softmax', name='softmax')(logits)
# keras model
model = tf.keras.models.Model(inputs, softmax)
return model
def create_estimator_keras(params, run_config):
keras_model = keras_model_fn(params)
print(keras_model.summary())
optimizer = tf.keras.optimizers.Adam(lr=params.learning_rate)
keras_model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
mnist_classifier = tf.keras.estimator.model_to_estimator(
keras_model=keras_model,
config=run_config
)
return mnist_classifier
#### Train and Export Model
def train_and_export_model(train_data, train_labels):
model_dir = os.path.join(MODELS_LOCATION, MODEL_NAME)
hparams = tf.contrib.training.HParams(
batch_size=100,
hidden_units=[512, 512],
num_conv_layers=3,
init_filters=64,
dropout=0.2,
max_training_steps=50,
eval_throttle_secs=10,
learning_rate=1e-3,
debug=True
)
run_config = tf.estimator.RunConfig(
tf_random_seed=19830610,
save_checkpoints_steps=1000,
keep_checkpoint_max=3,
model_dir=model_dir
)
if tf.gfile.Exists(model_dir):
print('Removing previous artifacts...')
tf.gfile.DeleteRecursively(model_dir)
os.makedirs(model_dir)
estimator = run_experiment(hparams, train_data, train_labels, run_config, create_estimator_keras)
def make_serving_input_receiver_fn():
inputs = {'input_image': tf.placeholder(
shape=[None,28,28], dtype=tf.float32, name='serving_input_image')}
return tf.estimator.export.build_raw_serving_input_receiver_fn(inputs)
export_dir = os.path.join(model_dir, 'export')
if tf.gfile.Exists(export_dir):
tf.gfile.DeleteRecursively(export_dir)
estimator.export_savedmodel(
export_dir_base=export_dir,
serving_input_receiver_fn=make_serving_input_receiver_fn()
)
return export_dir
def setup_model():
train_data, train_labels, eval_data, eval_labels = load_mnist_keras()
export_dir = train_and_export_model(train_data, train_labels)
return export_dir, eval_data
NUM_TRIALS = 10
def main(args):
if len(args) > 1 and args[1] == '--inference':
export_dir = args[2]
_, _, eval_data, _ = load_mnist_keras()
total_load_time = 0.0
total_serve_time = 0.0
saved_model_dir = os.path.join(
export_dir, [f for f in os.listdir(export_dir) if f.isdigit()][0])
for i in range(0, NUM_TRIALS):
load_time, serving_time = inference_test(saved_model_dir, eval_data, repeat=10000)
total_load_time += load_time
total_serve_time += serving_time
print("****************************************")
print("*** Load time on original model: {:.2f}".format(total_load_time / NUM_TRIALS))
print("*** Serve time on original model: {:.2f}".format(total_serve_time / NUM_TRIALS))
print("****************************************")
total_load_time = 0.0
total_serve_time = 0.0
optimized_export_dir = os.path.join(export_dir, 'optimized')
for i in range(0, NUM_TRIALS):
load_time, serving_time = inference_test(optimized_export_dir, eval_data,
signature='serving_default',
repeat=10000)
total_load_time += load_time
total_serve_time += serving_time
print("****************************************")
print("*** Load time on optimized model: {:.2f}".format(total_load_time / NUM_TRIALS))
print("*** Serve time on optimized model: {:.2f}".format(total_serve_time / NUM_TRIALS))
print("****************************************")
else:
# generate and output original model
export_dir, eval_data = setup_model()
saved_model_dir = os.path.join(export_dir, os.listdir(export_dir)[-1])
describe_graph(get_graph_def_from_saved_model(saved_model_dir))
get_size(saved_model_dir, 'saved_model.pb')
get_metagraph(saved_model_dir)
# freeze model and describe it
freeze_model(saved_model_dir, 'softmax/Softmax', 'frozen_model.pb')
frozen_filepath = os.path.join(saved_model_dir, 'frozen_model.pb')
describe_graph(get_graph_def_from_file(frozen_filepath))
get_size(saved_model_dir, 'frozen_model.pb', include_vars=False)
# optimize model and describe it
optimize_graph(saved_model_dir, 'frozen_model.pb', TRANSFORMS, 'softmax/Softmax')
optimized_filepath = os.path.join(saved_model_dir, 'optimized_model.pb')
describe_graph(get_graph_def_from_file(optimized_filepath))
get_size(saved_model_dir, 'optimized_model.pb', include_vars=False)
# convert to saved model and output metagraph again
optimized_export_dir = os.path.join(export_dir, 'optimized')
convert_graph_def_to_saved_model(optimized_export_dir, optimized_filepath,
'softmax', 'softmax/Softmax:0')
get_size(optimized_export_dir, 'saved_model.pb')
get_metagraph(optimized_export_dir)
if __name__ == '__main__':
main(sys.argv)
|
GoogleCloudPlatform/tf-estimator-tutorials
|
00_Miscellaneous/model_optimisation/optimize_graph_keras.py
|
Python
|
apache-2.0
| 7,518 | 0.009178 |
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''Volume driver for Dell Storage Center.'''
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.dell import dell_storagecenter_common
from cinder.volume.drivers import san
LOG = logging.getLogger(__name__)
class DellStorageCenterISCSIDriver(san.SanISCSIDriver,
dell_storagecenter_common.DellCommonDriver):
'''Implements commands for Dell StorageCenter ISCSI management.
To enable the driver add the following line to the cinder configuration:
volume_driver=cinder.volume.drivers.dell.DellStorageCenterISCSIDriver
'''
VERSION = '1.0.2'
def __init__(self, *args, **kwargs):
super(DellStorageCenterISCSIDriver, self).__init__(*args, **kwargs)
self.backend_name = (
self.configuration.safe_get('volume_backend_name')
or 'Dell-iSCSI')
def initialize_connection(self, volume, connector):
# Initialize_connection will find or create a server identified by the
# connector on the Dell backend. It will then map the volume to it
# and return the properties as follows..
# {'driver_volume_type': 'iscsi',
# data = {'target_discovered': False,
# 'target_iqn': preferred iqn,
# 'target_iqns': all iqns,
# 'target_portal': preferred portal,
# 'target_portals': all portals,
# 'target_lun': preferred lun,
# 'target_luns': all luns,
# 'access_mode': access_mode
# }
# We use id to name the volume name as it is a
# known unique name.
volume_name = volume.get('id')
initiator_name = connector.get('initiator')
multipath = connector.get('multipath', False)
LOG.info(_LI('initialize_ connection: %(vol)s:%(initiator)s'),
{'vol': volume_name,
'initiator': initiator_name})
with self._client.open_connection() as api:
try:
# Find our server.
server = api.find_server(initiator_name)
# No? Create it.
if server is None:
server = api.create_server(initiator_name)
# Find the volume on the storage center.
scvolume = api.find_volume(volume_name)
# if we have a server and a volume lets bring them together.
if server is not None and scvolume is not None:
mapping = api.map_volume(scvolume,
server)
if mapping is not None:
# Since we just mapped our volume we had best update
# our sc volume object.
scvolume = api.find_volume(volume_name)
# Our return.
iscsiprops = {}
ip = None
port = None
if not multipath:
# We want to make sure we point to the specified
# ip address for our target_portal return. This
# isn't an issue with multipath since it should
# try all the alternate portal.
ip = self.configuration.iscsi_ip_address
port = self.configuration.iscsi_port
# Three cases that should all be satisfied with the
# same return of Target_Portal and Target_Portals.
# 1. Nova is calling us so we need to return the
# Target_Portal stuff. It should ignore the
# Target_Portals stuff.
# 2. OS brick is calling us in multipath mode so we
# want to return Target_Portals. It will ignore
# the Target_Portal stuff.
# 3. OS brick is calling us in single path mode so
# we want to return Target_Portal and
# Target_Portals as alternates.
iscsiprops = (api.find_iscsi_properties(scvolume,
ip,
port))
# Return our iscsi properties.
return {'driver_volume_type': 'iscsi',
'data': iscsiprops}
except Exception:
error = (_('Failed to initialize connection '
'%(initiator)s %(vol)s') %
{'initiator': initiator_name,
'vol': volume_name})
LOG.error(error)
raise exception.VolumeBackendAPIException(error)
# We get here because our mapping is none or we have no valid iqn to
# return so blow up.
raise exception.VolumeBackendAPIException(
_('Unable to map volume'))
def terminate_connection(self, volume, connector, force=False, **kwargs):
# Grab some initial info.
initiator_name = connector.get('initiator')
volume_name = volume.get('id')
LOG.debug('Terminate connection: %(vol)s:%(initiator)s',
{'vol': volume_name,
'initiator': initiator_name})
with self._client.open_connection() as api:
try:
scserver = api.find_server(initiator_name)
# Find the volume on the storage center.
scvolume = api.find_volume(volume_name)
# If we have a server and a volume lets pull them apart.
if (scserver is not None and
scvolume is not None and
api.unmap_volume(scvolume, scserver) is True):
LOG.debug('Connection terminated')
return
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to terminate connection '
'%(initiator)s %(vol)s'),
{'initiator': initiator_name,
'vol': volume_name})
raise exception.VolumeBackendAPIException(
_('Terminate connection failed'))
|
saeki-masaki/cinder
|
cinder/volume/drivers/dell/dell_storagecenter_iscsi.py
|
Python
|
apache-2.0
| 7,182 | 0 |
from pytest import fixture
from itertools import combinations
import msgpack as pymsgpack
values = [
42, 7, 3.14, 2.71, 'lorem', 'ipsum', True, False, None, b'lorem', b'ipsum', [], [
'lorem', 42, 3.14, True, None, ['ipsum']], dict(), {
'lorem': 'ipsum', 'dolor': 42, 'sit': 3.14, 'amet': [
True, None], 'consectetur':{
'adipisicing': 'elit'}}]
pairs = tuple(combinations(values, 2))
@fixture
def cxxjson():
from cxx import json
return json
@fixture
def cxxmsgpack():
from cxx import msgpack
return msgpack
|
attugit/cxxjson
|
test/conftest.py
|
Python
|
mit
| 587 | 0.001704 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=unused-import,wildcard-import,unused-wildcard-import
"""The qutebrowser test suite conftest file."""
import os
import sys
import warnings
import pytest
import hypothesis
from PyQt5.QtCore import PYQT_VERSION
pytest.register_assert_rewrite('helpers')
from helpers import logfail
from helpers.logfail import fail_on_logging
from helpers.messagemock import message_mock
from helpers.fixtures import *
from qutebrowser.utils import qtutils
# Set hypothesis settings
hypothesis.settings.register_profile('default',
hypothesis.settings(strict=True))
hypothesis.settings.load_profile('default')
def _apply_platform_markers(config, item):
"""Apply a skip marker to a given item."""
markers = [
('posix', os.name != 'posix', "Requires a POSIX os"),
('windows', os.name != 'nt', "Requires Windows"),
('linux', not sys.platform.startswith('linux'), "Requires Linux"),
('mac', sys.platform != 'darwin', "Requires macOS"),
('not_mac', sys.platform == 'darwin', "Skipped on macOS"),
('not_frozen', getattr(sys, 'frozen', False),
"Can't be run when frozen"),
('frozen', not getattr(sys, 'frozen', False),
"Can only run when frozen"),
('ci', 'CI' not in os.environ, "Only runs on CI."),
('issue2478', os.name == 'nt' and config.webengine,
"Broken with QtWebEngine on Windows"),
]
for searched_marker, condition, default_reason in markers:
marker = item.get_marker(searched_marker)
if not marker or not condition:
continue
if 'reason' in marker.kwargs:
reason = '{}: {}'.format(default_reason, marker.kwargs['reason'])
del marker.kwargs['reason']
else:
reason = default_reason + '.'
skipif_marker = pytest.mark.skipif(condition, *marker.args,
reason=reason, **marker.kwargs)
item.add_marker(skipif_marker)
def pytest_collection_modifyitems(config, items):
"""Handle custom markers.
pytest hook called after collection has been performed.
Adds a marker named "gui" which can be used to filter gui tests from the
command line.
For example:
pytest -m "not gui" # run all tests except gui tests
pytest -m "gui" # run only gui tests
It also handles the platform specific markers by translating them to skipif
markers.
Args:
items: list of _pytest.main.Node items, where each item represents
a python test that will be executed.
Reference:
http://pytest.org/latest/plugins.html
"""
remaining_items = []
deselected_items = []
for item in items:
deselected = False
if 'qapp' in getattr(item, 'fixturenames', ()):
item.add_marker('gui')
if hasattr(item, 'module'):
module_path = os.path.relpath(
item.module.__file__,
os.path.commonprefix([__file__, item.module.__file__]))
module_root_dir = module_path.split(os.sep)[0]
assert module_root_dir in ['end2end', 'unit', 'helpers',
'test_conftest.py']
if module_root_dir == 'end2end':
item.add_marker(pytest.mark.end2end)
_apply_platform_markers(config, item)
if item.get_marker('xfail_norun'):
item.add_marker(pytest.mark.xfail(run=False))
if item.get_marker('js_prompt'):
if config.webengine:
js_prompt_pyqt_version = 0x050700
else:
js_prompt_pyqt_version = 0x050300
item.add_marker(pytest.mark.skipif(
PYQT_VERSION <= js_prompt_pyqt_version,
reason='JS prompts are not supported with this PyQt version'))
if deselected:
deselected_items.append(item)
else:
remaining_items.append(item)
config.hook.pytest_deselected(items=deselected_items)
items[:] = remaining_items
def pytest_ignore_collect(path):
"""Ignore BDD tests if we're unable to run them."""
skip_bdd = hasattr(sys, 'frozen')
rel_path = path.relto(os.path.dirname(__file__))
return rel_path == os.path.join('end2end', 'features') and skip_bdd
@pytest.fixture(scope='session')
def qapp(qapp):
"""Change the name of the QApplication instance."""
qapp.setApplicationName('qute_test')
return qapp
def pytest_addoption(parser):
parser.addoption('--qute-delay', action='store', default=0, type=int,
help="Delay between qutebrowser commands.")
parser.addoption('--qute-profile-subprocs', action='store_true',
default=False, help="Run cProfile for subprocesses.")
parser.addoption('--qute-bdd-webengine', action='store_true',
help='Use QtWebEngine for BDD tests')
def pytest_configure(config):
webengine_arg = config.getoption('--qute-bdd-webengine')
webengine_env = os.environ.get('QUTE_BDD_WEBENGINE', '')
config.webengine = bool(webengine_arg or webengine_env)
# Fail early if QtWebEngine is not available
# pylint: disable=unused-variable
if config.webengine:
import PyQt5.QtWebEngineWidgets
@pytest.fixture(scope='session', autouse=True)
def check_display(request):
if (not request.config.getoption('--no-xvfb') and
'QUTE_BUILDBOT' in os.environ and
request.config.xvfb is not None):
raise Exception("Xvfb is running on buildbot!")
if sys.platform == 'linux' and not os.environ.get('DISPLAY', ''):
raise Exception("No display and no Xvfb available!")
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Make test information available in fixtures.
See http://pytest.org/latest/example/simple.html#making-test-result-information-available-in-fixtures
"""
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
|
pkill-nine/qutebrowser
|
tests/conftest.py
|
Python
|
gpl-3.0
| 6,876 | 0.000873 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import DAG
from airflow.api_connexion.schemas.event_log_schema import (
EventLogCollection,
event_log_collection_schema,
event_log_schema,
)
from airflow.models import Log, TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.utils import timezone
from airflow.utils.session import create_session, provide_session
class TestEventLogSchemaBase(unittest.TestCase):
def setUp(self) -> None:
with create_session() as session:
session.query(Log).delete()
self.default_time = "2020-06-09T13:00:00+00:00"
self.default_time2 = '2020-06-11T07:00:00+00:00'
def tearDown(self) -> None:
with create_session() as session:
session.query(Log).delete()
def _create_task_instance(self):
with DAG(
'TEST_DAG_ID',
start_date=timezone.parse(self.default_time),
end_date=timezone.parse(self.default_time),
):
op1 = DummyOperator(task_id="TEST_TASK_ID", owner="airflow")
return TaskInstance(task=op1, execution_date=timezone.parse(self.default_time))
class TestEventLogSchema(TestEventLogSchemaBase):
@provide_session
def test_serialize(self, session):
event_log_model = Log(event="TEST_EVENT", task_instance=self._create_task_instance())
session.add(event_log_model)
session.commit()
event_log_model.dttm = timezone.parse(self.default_time)
log_model = session.query(Log).first()
deserialized_log = event_log_schema.dump(log_model)
self.assertEqual(
deserialized_log,
{
"event_log_id": event_log_model.id,
"event": "TEST_EVENT",
"dag_id": "TEST_DAG_ID",
"task_id": "TEST_TASK_ID",
"execution_date": self.default_time,
"owner": 'airflow',
"when": self.default_time,
"extra": None,
},
)
class TestEventLogCollection(TestEventLogSchemaBase):
@provide_session
def test_serialize(self, session):
event_log_model_1 = Log(event="TEST_EVENT_1", task_instance=self._create_task_instance())
event_log_model_2 = Log(event="TEST_EVENT_2", task_instance=self._create_task_instance())
event_logs = [event_log_model_1, event_log_model_2]
session.add_all(event_logs)
session.commit()
event_log_model_1.dttm = timezone.parse(self.default_time)
event_log_model_2.dttm = timezone.parse(self.default_time2)
instance = EventLogCollection(event_logs=event_logs, total_entries=2)
deserialized_event_logs = event_log_collection_schema.dump(instance)
self.assertEqual(
deserialized_event_logs,
{
"event_logs": [
{
"event_log_id": event_log_model_1.id,
"event": "TEST_EVENT_1",
"dag_id": "TEST_DAG_ID",
"task_id": "TEST_TASK_ID",
"execution_date": self.default_time,
"owner": 'airflow',
"when": self.default_time,
"extra": None,
},
{
"event_log_id": event_log_model_2.id,
"event": "TEST_EVENT_2",
"dag_id": "TEST_DAG_ID",
"task_id": "TEST_TASK_ID",
"execution_date": self.default_time,
"owner": 'airflow',
"when": self.default_time2,
"extra": None,
},
],
"total_entries": 2,
},
)
|
airbnb/airflow
|
tests/api_connexion/schemas/test_event_log_schema.py
|
Python
|
apache-2.0
| 4,612 | 0.000867 |
"""Subclass of NewMember, which is generated by wxFormBuilder."""
import copy
import wx
from beatle import model
from beatle.lib import wxx
from beatle.activity.models.ui import ui as ui
# Implementing NewMember
class MemberDialog(ui.NewMember):
"""
This dialog allows to setup data member of class
or struct. You can set default value for using
in constructors or as initialization of static
members.
"""
@wxx.SetInfo(__doc__)
def __init__(self, parent, container):
"""Dialog initialization"""
import beatle.app.resources as rc
super(MemberDialog, self).__init__(parent)
self._container = container
scoped = lambda x: (hasattr(x, 'scoped') and x.scoped) or x.name
self._types = dict([scoped(x), x] for x in container.types)
self._autoname = '' # proposed name
# add types but not on-the-fly template type
self.m_type.AppendItems([x for x in self._types.keys() if x != '@'])
# we need to add types from template nested classes
classes = container.nested_classes
self._nested_template_types = []
for clase in classes:
for x in clase._template_types:
if x not in self._nested_template_types:
self._nested_template_types.append(scoped(x))
if len(self._nested_template_types) > 0:
self.m_type.AppendItems(self._nested_template_types)
self.choiceStr = ""
self.m_type.SetFocus()
icon = wx.EmptyIcon()
icon.CopyFromBitmap(rc.GetBitmap("member"))
self.SetIcon(icon)
self._register_keybindings()
def AutoName(self):
"""Suggest the argument name, based on type"""
iSel = self.m_type.GetCurrentSelection()
if iSel == wx.NOT_FOUND:
return
s = self.m_name.GetValue()
if self._autoname != s and s:
return
kwargs = {
'const': (self.m_const.IsChecked() and 'c') or '',
'reference': (self.m_reference.IsChecked() and 'r') or '',
'ptr': (self.m_ptr.IsChecked() and 'p') or '',
'pptr': (self.m_pptr.IsChecked() and 'p') or '',
'constptr': (self.m_constptr.IsChecked() and 'c') or '',
'array': (self.m_array.IsChecked() and 'a') or '',
'typename': self.m_type.GetString(iSel).replace('::', '_'),
}
#volatile = (self.m_volatile.IsChecked() and 'v') or ''
self._autoname = '{const}{reference}{ptr}{pptr}{constptr}{array}{typename}'.format(
**kwargs)
self.m_name.SetValue(self._autoname)
def _register_keybindings(self):
"""Register accelerators for static labels that must change the focus"""
newId_t = wx.NewId()
newId_n = wx.NewId()
newId_a = wx.NewId()
newId_d = wx.NewId()
newId_o = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnActivateType, id=newId_t)
self.Bind(wx.EVT_MENU, self.OnActivateName, id=newId_n)
self.Bind(wx.EVT_MENU, self.OnActivateAccess, id=newId_a)
self.Bind(wx.EVT_MENU, self.OnActivateDefault, id=newId_d)
self.Bind(wx.EVT_MENU, self.OnActivateNotes, id=newId_o)
aTable = wx.AcceleratorTable([
wx.AcceleratorEntry(wx.ACCEL_ALT, ord('T'), newId_t),
wx.AcceleratorEntry(wx.ACCEL_ALT, ord('N'), newId_n),
wx.AcceleratorEntry(wx.ACCEL_ALT, ord('A'), newId_a),
wx.AcceleratorEntry(wx.ACCEL_ALT, ord('D'), newId_d),
wx.AcceleratorEntry(wx.ACCEL_ALT, ord('O'), newId_o)
])
self.SetAcceleratorTable(aTable)
def OnActivateType(self, event):
"""activate type combo"""
self.m_type.SetFocus()
def OnActivateName(self, event):
"""activate name entry"""
self.m_name.SetFocus()
def OnActivateAccess(self, event):
"""activate acces combo"""
self.m_choice2.SetFocus()
def OnActivateDefault(self, event):
"""activate default value"""
self.m_textCtrl8.SetFocus()
def OnActivateNotes(self, event):
"""Activate notes"""
self.m_richText1.SetFocus()
def OnEnterName(self, event):
"""This event is generated when the enter is pressed in the
name entry"""
self.m_choice2.SetFocus()
def OnTypeChanged(self, event):
"""This event happens when the return type is changed. The main goal
of this callback is handling template types for argument specification"""
iSel = self.m_type.GetCurrentSelection()
_type = self._types.get(self.m_type.GetString(iSel), None)
template_args = False
if _type is not None:
if _type._template is not None:
template_args = True
if template_args is True:
self.m_staticText67.Enable(True)
self.m_template_args.Enable(True)
self.m_staticText68.Enable(True)
else:
self.m_staticText67.Enable(False)
self.m_template_args.Enable(False)
self.m_staticText68.Enable(False)
self.m_template_args.SetValue('')
self.AutoName()
def CopyAttributes(self, member):
"""Get the atributes"""
member._name = self._name
member._typei = copy.copy(self._typei)
member._access = self._access
member._static = self._static
member._default = self._default
member._volatile = self._volatile
member._mutable = self._mutable
member._bitField = self._bitField
if self._bitField:
member._bitFieldSize = self._bitFieldSize
member._note = self._note
member.inner_class.AutoInit()
def SetAttributes(self, member):
"""Set the attributes"""
self.m_name.SetValue(member._name)
ti = member._typei
iSel = self.m_type.FindString(ti.scoped)
self.m_type.SetSelection(iSel)
iSel = self.m_choice2.FindString(member._access)
self.m_choice2.SetSelection(iSel)
self.m_checkBox105.SetValue(member._static)
self.m_textCtrl8.SetValue(member._default)
self.m_checkBox49.SetValue(member._volatile)
self.m_checkBox48.SetValue(member._mutable)
self.m_const.SetValue(ti._const)
self.m_ptr.SetValue(ti._ptr)
self.m_reference.SetValue(ti._ref)
self.m_pptr.SetValue(ti._ptr_to_ptr)
self.m_constptr.SetValue(ti._const_ptr)
self.m_array.SetValue(ti._array)
if ti._array is True:
self.m_textCtrl7.Show(True)
self.m_textCtrl7.Enable(True)
self.m_textCtrl7.SetValue(str(ti._array_size))
else:
self.m_textCtrl7.SetValue('0')
self.m_checkBox51.SetValue(member._bitField)
if ti._type_args is not None:
self.m_staticText67.Enable(True)
self.m_template_args.Enable(True)
self.m_staticText68.Enable(True)
self.m_template_args.SetValue(ti._type_args)
if member._bitField is True:
self.m_textCtrl39.Show(True)
self.m_textCtrl39.Enable(True)
self.m_textCtrl39.SetValue(str(member._bitFieldSize))
self.m_richText1.SetValue(member._note)
self.SetTitle("Edit member")
def Validate(self):
"""Dialog validation"""
self._name = self.m_name.GetValue()
if len(self._name) == 0:
wx.MessageBox("Member name must not be empty", "Error",
wx.OK | wx.CENTER | wx.ICON_ERROR, self)
return False
iSel = self.m_type.GetCurrentSelection()
if iSel == wx.NOT_FOUND:
wx.MessageBox("Invalid type", "Error",
wx.OK | wx.CENTER | wx.ICON_ERROR, self)
return False
typename = self.m_type.GetString(iSel)
iSel = self.m_choice2.GetCurrentSelection()
if iSel == wx.NOT_FOUND:
wx.MessageBox("Invalid access", "Error",
wx.OK | wx.CENTER | wx.ICON_ERROR, self)
return False
self._static = self.m_checkBox105.IsChecked()
self._access = self.m_choice2.GetString(iSel)
self._default = self.m_textCtrl8.GetValue()
self._volatile = self.m_checkBox49.GetValue()
self._mutable = self.m_checkBox48.GetValue()
if self.m_array.IsChecked():
try:
asize = int(self.m_textCtrl7.GetValue())
except:
asize = ''
else:
asize = None
if typename in self._nested_template_types:
self._typei = model.cc.typeinst(
type=self._types['@'],
type_alias=typename,
const=self.m_const.IsChecked(),
ptr=self.m_ptr.IsChecked(),
ref=self.m_reference.IsChecked(),
ptrptr=self.m_pptr.IsChecked(),
constptr=self.m_constptr.IsChecked(),
array=self.m_array.IsChecked(),
arraysize=asize
)
else:
_type = self._types[typename]
if _type._template is not None:
#we construct type instance with explicit arguments
type_args = self.m_template_args.GetValue()
self._typei = model.cc.typeinst(
type=_type,
type_args=type_args,
const=self.m_const.IsChecked(),
ptr=self.m_ptr.IsChecked(),
ref=self.m_reference.IsChecked(),
ptrptr=self.m_pptr.IsChecked(),
constptr=self.m_constptr.IsChecked(),
array=self.m_array.IsChecked(),
arraysize=asize
)
else:
self._typei = model.cc.typeinst(
type=self._types[typename],
const=self.m_const.IsChecked(),
ptr=self.m_ptr.IsChecked(),
ref=self.m_reference.IsChecked(),
ptrptr=self.m_pptr.IsChecked(),
constptr=self.m_constptr.IsChecked(),
array=self.m_array.IsChecked(),
arraysize=asize
)
self._bitField = self.m_checkBox51.IsChecked()
if self._bitField is True:
self._bitFieldSize = int(self.m_textCtrl39.GetValue())
else:
self._bitFieldSize = 0
self._note = self.m_richText1.GetValue()
return True
def get_kwargs(self):
"""return arguments for object instance"""
return {'parent': self._container, 'name': self._name,
'type': self._typei, 'access': self._access, 'static': self._static,
'volatile': self._volatile, 'mutable': self._mutable,
'bitfield': self._bitField, 'bitfieldsize': self._bitFieldSize,
'default': self._default}
# Handlers for NewMember events.
def OnKeyDown(self, event):
"""Listbox selection"""
keycode = event.GetKeyCode()
if keycode == wx.WXK_UP or keycode == wx.WXK_NUMPAD_UP:
i = self.m_type.GetSelection()
if i is not wx.NOT_FOUND and i > 0:
self.m_type.SetSelection(i - 1)
elif keycode == wx.WXK_DOWN or keycode == wx.WXK_NUMPAD_DOWN:
i = self.m_type.GetSelection() + 1
if i > wx.NOT_FOUND and i < len(self._types):
self.m_type.SetSelection(i)
elif keycode < 256:
keychar = chr(keycode)
if keychar.isalnum() or keycode is wx.WXK_SPACE:
self.choiceStr += keychar.lower()
for t in self._types:
tl = t.lower()
if tl.find(self.choiceStr) == 0:
sel = self.m_type.FindString(t)
if sel is not wx.NOT_FOUND:
self.m_type.SetSelection(sel)
if keycode is not wx.WXK_SPACE:
event.Skip()
return
self.choiceStr = ""
event.Skip()
def OnPointerToggle(self, event):
"""ptr toggle gui"""
if self.m_ptr.IsChecked():
self.m_constptr.Enable(True)
self.m_checkBox50.Enable(True)
self.m_pptr.Enable(True)
else:
self.m_constptr.Enable(False)
self.m_checkBox50.Enable(False)
self.m_pptr.Enable(False)
self.m_constptr.SetValue(False)
self.m_checkBox50.SetValue(False)
self.m_pptr.SetValue(False)
self.AutoName()
def OnToggleArray(self, event):
"toggle array event"
if self.m_array.IsChecked():
self.m_checkBox51.SetValue(False)
self.m_textCtrl39.Show(False)
self.m_textCtrl39.Enable(False)
self.m_textCtrl7.Show(True)
self.m_textCtrl7.Enable(True)
else:
self.m_textCtrl7.Show(False)
self.m_textCtrl7.Enable(False)
self.AutoName()
def OnToggleStatic(self, event):
"""toggle static event"""
if self.m_checkBox105.IsChecked():
#disable bit field
self.m_checkBox51.SetValue(False)
self.m_checkBox51.Enable(False)
self.m_textCtrl39.Show(False)
self.m_textCtrl39.Enable(False)
#disable mutable
self.m_checkBox48.SetValue(False)
self.m_checkBox48.Enable(False)
else:
self.m_checkBox51.Enable(True)
self.m_checkBox48.Enable(True)
event.Skip()
def OnToggleBitFiled(self, event):
"toggle array event"
if self.m_checkBox51.IsChecked():
self.m_array.SetValue(False)
self.m_textCtrl7.Show(False)
self.m_textCtrl7.Enable(False)
self.m_textCtrl39.Show(True)
self.m_textCtrl39.Enable(True)
else:
self.m_textCtrl39.Show(False)
self.m_textCtrl39.Enable(False)
event.Skip()
def OnCancel(self, event):
"""cancel event handler"""
self.EndModal(wx.ID_CANCEL)
def OnOK(self, event):
"""ok event handler"""
if self.Validate():
self.EndModal(wx.ID_OK)
|
melviso/phycpp
|
beatle/activity/models/ui/dlg/cc/Member.py
|
Python
|
gpl-2.0
| 14,333 | 0.001535 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a top-level glue class that operates the Transport and Flasher classes."""
import logging
import time
from .._ffi import get_global_func
from ..contrib import graph_runtime
from ..rpc import RPCSession
from .transport import TransportLogger
try:
from .base import _rpc_connect
except ImportError:
raise ImportError("micro tvm is not enabled. Set USE_MICRO to ON in config.cmake")
class Session:
"""MicroTVM Device Session
Parameters
----------
config : dict
configuration for this session (as generated by
`tvm.micro.device.host.default_config()`, for example)
Example
--------
.. code-block:: python
c_mod = ... # some module generated with "c" as the target
dev_config = micro.device.arm.stm32f746xx.default_config('127.0.0.1', 6666)
with tvm.micro.Session(dev_config) as sess:
micro_mod = sess.create_micro_mod(c_mod)
"""
def __init__(
self, binary=None, flasher=None, transport_context_manager=None, session_name="micro-rpc"
):
"""Configure a new session.
Parameters
----------
binary : MicroBinary
If given, `flasher` must also be given. During session initialization, this binary will
be flashed to the device before the transport is created.
flasher : Flasher
If given, `binary` must also be given. Used to flash `binary` during session
initialization.
transport_context_manager : ContextManager[transport.Transport]
If given, `flasher` and `binary` should not be given. On entry, this context manager
should establish a tarnsport between this TVM instance and the device.
session_name : str
Name of the session, used for debugging.
"""
self.binary = binary
self.flasher = flasher
self.transport_context_manager = transport_context_manager
self.session_name = session_name
self._rpc = None
self._graph_runtime = None
def get_system_lib(self):
return self._rpc.get_function("runtime.SystemLib")()
def __enter__(self):
"""Initialize this session and establish an RPC session with the on-device RPC server.
Returns
-------
Session :
Returns self.
"""
if self.flasher is not None:
self.transport_context_manager = self.flasher.flash(self.binary)
time.sleep(3.0)
self.transport = TransportLogger(
self.session_name, self.transport_context_manager, level=logging.INFO
).__enter__()
self._rpc = RPCSession(
_rpc_connect(self.session_name, self.transport.write, self.transport.read)
)
self.context = self._rpc.cpu(0)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Tear down this session and associated RPC session resources."""
self.transport.__exit__(exc_type, exc_value, exc_traceback)
def create_local_graph_runtime(graph_json_str, mod, ctx):
"""Create a local graph runtime driving execution on the remote CPU context given.
Parameters
----------
graph_json_str : str
A string containing the graph representation.
mod : tvm.runtime.Module
The remote module containing functions in graph_json_str.
ctx : tvm.Context
The remote CPU execution context.
Returns
-------
tvm.contrib.GraphRuntime :
A local graph runtime instance that executes on the remote device.
"""
device_type_id = [ctx.device_type, ctx.device_id]
fcreate = get_global_func("tvm.graph_runtime.create")
return graph_runtime.GraphModule(fcreate(graph_json_str, mod, *device_type_id))
|
sxjscience/tvm
|
python/tvm/micro/session.py
|
Python
|
apache-2.0
| 4,567 | 0.002847 |
"""
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
from __future__ import print_function
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from logistic_sgd import LogisticRegression, load_data
# start-snippet-1
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
# start-snippet-2
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out
)
# end-snippet-2 start-snippet-3
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
# end-snippet-3
# keep track of model input
self.input = input
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
dataset='mnist.pkl.gz', batch_size=200, n_hidden=100):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(
rng=rng,
input=x,
n_in=28 * 28,
n_hidden=n_hidden,
n_out=10
)
# start-snippet-4
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = (
classifier.negative_log_likelihood(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# end-snippet-4
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# start-snippet-5
# compute the gradient of cost with respect to theta (sorted in params)
# the resulting gradients will be stored in a list gparams
gparams = [T.grad(cost, param) for param in classifier.params]
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
# given two lists of the same length, A = [a1, a2, a3, a4] and
# B = [b1, b2, b3, b4], zip generates a list C of same size, where each
# element is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-5
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
# print(iter)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in range(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
if __name__ == '__main__':
test_mlp()
|
vmayoral/basic_reinforcement_learning
|
tutorial5/tests/theano_mnist_mlp.py
|
Python
|
gpl-3.0
| 14,310 | 0.001048 |
import pytest
import requests
import time
from threading import Thread
from bottle import default_app, WSGIRefServer
from tomviz.acquisition import server
class Server(Thread):
def __init__(self, dev=False, port=9999):
super(Server, self).__init__()
self.host = 'localhost'
self.port = port
self.base_url = 'http://%s:%d' % (self.host, self.port)
self.url = '%s/acquisition' % self.base_url
self.dev = dev
self._server = WSGIRefServer(host=self.host, port=self.port)
def run(self):
self.setup()
self._server.run(app=default_app())
def start(self):
super(Server, self).start()
# Wait for bottle to start
while True:
try:
requests.get(self.base_url)
break
except requests.ConnectionError:
time.sleep(0.1)
def setup(self, adapter=None):
server.setup(dev=self.dev, adapter=adapter)
def stop(self):
self._server.srv.shutdown()
# Force the socket to close so we can reuse the same port
self._server.srv.socket.close()
@pytest.fixture(scope="module")
def acquisition_server():
srv = Server()
srv.start()
yield srv
srv.stop()
srv.join()
@pytest.fixture(scope="module")
def acquisition_dev_server():
srv = Server(dev=True, port=9998)
srv.start()
yield srv
srv.stop()
srv.join()
|
cjh1/tomviz
|
acquisition/tests/conftest.py
|
Python
|
bsd-3-clause
| 1,440 | 0 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compare a txt file of predictions with gold targets from a TSV file."""
from absl import app
from absl import flags
from language.compgen.nqg.tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("gold", "", "tsv file containing gold targets.")
flags.DEFINE_string("predictions", "", "txt file with predicted targets.")
def main(unused_argv):
gold_examples = tsv_utils.read_tsv(FLAGS.gold)
preds = []
with gfile.GFile(FLAGS.predictions, "r") as f:
for line in f:
preds.append(line.rstrip())
correct = 0
incorrect = 0
for pred, gold_example in zip(preds, gold_examples):
if pred == gold_example[1]:
correct += 1
else:
incorrect += 1
print("Incorrect for example %s.\nTarget: %s\nPrediction: %s" %
(gold_example[0], gold_example[1], pred))
print("correct: %s" % correct)
print("incorrect: %s" % incorrect)
print("pct: %s" % str(float(correct) / float(correct + incorrect)))
if __name__ == "__main__":
app.run(main)
|
google-research/language
|
language/compgen/nqg/tasks/compare_predictions.py
|
Python
|
apache-2.0
| 1,653 | 0.008469 |
# -*- encoding: utf-8 -*-
"""Utility functions for computing combinations of dimensions and hierarchy
levels"""
from __future__ import absolute_import
import itertools
import sys
import re
import os.path
import decimal
import datetime
import json
from collections import OrderedDict
from .errors import *
from . import compat
__all__ = [
"IgnoringDictionary",
"MissingPackage",
"localize_common",
"localize_attributes",
"get_localizable_attributes",
"decamelize",
"to_identifier",
"assert_instance",
"assert_all_instances",
"read_json_file",
"sorted_dependencies",
]
class IgnoringDictionary(OrderedDict):
"""Simple dictionary extension that will ignore any keys of which values
are empty (None/False)"""
def __setitem__(self, key, value):
if value is not None:
super(IgnoringDictionary, self).__setitem__(key, value)
def set(self, key, value):
"""Sets `value` for `key` even if value is null."""
super(IgnoringDictionary, self).__setitem__(key, value)
def __repr__(self):
items = []
for key, value in self.items():
item = '%s: %s' % (repr(key), repr(value))
items.append(item)
return "{%s}" % ", ".join(items)
def assert_instance(obj, class_, label):
"""Raises ArgumentError when `obj` is not instance of `cls`"""
if not isinstance(obj, class_):
raise ModelInconsistencyError("%s should be sublcass of %s, "
"provided: %s" % (label,
class_.__name__,
type(obj).__name__))
def assert_all_instances(list_, class_, label="object"):
"""Raises ArgumentError when objects in `list_` are not instances of
`cls`"""
for obj in list_ or []:
assert_instance(obj, class_, label="object")
class MissingPackageError(Exception):
"""Exception raised when encountered a missing package."""
pass
class MissingPackage(object):
"""Bogus class to handle missing optional packages - packages that are not
necessarily required for Cubes, but are needed for certain features."""
def __init__(self, package, feature = None, source = None, comment = None):
self.package = package
self.feature = feature
self.source = source
self.comment = comment
def __call__(self, *args, **kwargs):
self._fail()
def __getattr__(self, name):
self._fail()
def _fail(self):
if self.feature:
use = " to be able to use: %s" % self.feature
else:
use = ""
if self.source:
source = " from %s" % self.source
else:
source = ""
if self.comment:
comment = ". %s" % self.comment
else:
comment = ""
raise MissingPackageError("Optional package '%s' is not installed. "
"Please install the package%s%s%s" %
(self.package, source, use, comment))
def optional_import(name, feature=None, source=None, comment=None):
"""Optionally import package `name`. If package does not exist, import a
placeholder object, that raises an exception with more detailed
description about the missing package."""
try:
return __import__(name)
except ImportError:
return MissingPackage(name, feature, source, comment)
def expand_dictionary(record, separator = '.'):
"""Return expanded dictionary: treat keys are paths separated by
`separator`, create sub-dictionaries as necessary"""
result = {}
for key, value in record.items():
current = result
path = key.split(separator)
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = value
return result
def localize_common(obj, trans):
"""Localize common attributes: label and description"""
if "label" in trans:
obj.label = trans["label"]
if "description" in trans:
obj.description = trans["description"]
def localize_attributes(attribs, translations):
"""Localize list of attributes. `translations` should be a dictionary with
keys as attribute names, values are dictionaries with localizable
attribute metadata, such as ``label`` or ``description``."""
for (name, atrans) in translations.items():
attrib = attribs[name]
localize_common(attrib, atrans)
def get_localizable_attributes(obj):
"""Returns a dictionary with localizable attributes of `obj`."""
# FIXME: use some kind of class attribute to get list of localizable attributes
locale = {}
try:
if obj.label:
locale["label"] = obj.label
except:
pass
try:
if obj.description:
locale["description"] = obj.description
except:
pass
return locale
def decamelize(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1)
def to_identifier(name):
return re.sub(r' ', r'_', name).lower()
def to_label(name, capitalize=True):
"""Converts `name` into label by replacing underscores by spaces. If
`capitalize` is ``True`` (default) then the first letter of the label is
capitalized."""
label = name.replace("_", " ")
if capitalize:
label = label.capitalize()
return label
def coalesce_option_value(value, value_type, label=None):
"""Convert string into an object value of `value_type`. The type might be:
`string` (no conversion), `integer`, `float`, `list` – comma separated
list of strings.
"""
value_type = value_type.lower()
try:
if value_type in ('string', 'str'):
return_value = str(value)
elif value_type == 'list':
if isinstance(value, compat.string_type):
return_value = value.split(",")
else:
return_value = list(value)
elif value_type == "float":
return_value = float(value)
elif value_type in ["integer", "int"]:
return_value = int(value)
elif value_type in ["bool", "boolean"]:
if not value:
return_value = False
elif isinstance(value, compat.string_type):
return_value = value.lower() in ["1", "true", "yes", "on"]
else:
return_value = bool(value)
else:
raise ArgumentError("Unknown option value type %s" % value_type)
except ValueError:
if label:
label = "parameter %s " % label
else:
label = ""
raise ArgumentError("Unable to convert %svalue '%s' into type %s" %
(label, astring, value_type))
return return_value
def coalesce_options(options, types):
"""Coalesce `options` dictionary according to types dictionary. Keys in
`types` refer to keys in `options`, values of `types` are value types:
string, list, float, integer or bool."""
out = {}
for key, value in options.items():
if key in types:
out[key] = coalesce_option_value(value, types[key], key)
else:
out[key] = value
return out
def read_json_file(path, kind=None):
"""Read a JSON from `path`. This is convenience function that provides
more descriptive exception handling."""
kind = "%s " % str(kind) if kind else ""
if not os.path.exists(path):
raise ConfigurationError("Can not find %sfile '%s'"
% (kind, path))
try:
f = compat.open_unicode(path)
except IOError:
raise ConfigurationError("Can not open %sfile '%s'"
% (kind, path))
try:
content = json.load(f)
except ValueError as e:
raise SyntaxError("Syntax error in %sfile %s: %s"
% (kind, path, str(e)))
finally:
f.close()
return content
def sorted_dependencies(graph):
"""Return keys from `deps` ordered by dependency (topological sort).
`deps` is a dictionary where keys are strings and values are list of
strings where keys is assumed to be dependant on values.
Example::
A ---> B -+--> C
|
+--> D --> E
Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}``
"""
graph = dict((key, set(value)) for key, value in graph.items())
# L ← Empty list that will contain the sorted elements
L = []
# S ← Set of all nodes with no dependencies (incoming edges)
S = set(parent for parent, req in graph.items() if not req)
while S:
# remove a node n from S
n = S.pop()
# insert n into L
L.append(n)
# for each node m with an edge e from n to m do
# (n that depends on m)
parents = [parent for parent, req in graph.items() if n in req]
for parent in parents:
graph[parent].remove(n)
# remove edge e from the graph
# if m has no other incoming edges then insert m into S
if not graph[parent]:
S.add(parent)
# if graph has edges then -> error
nonempty = [k for k, v in graph.items() if v]
if nonempty:
raise ArgumentError("Cyclic dependency of: %s"
% ", ".join(nonempty))
return L
|
noyeitan/cubes
|
cubes/common.py
|
Python
|
mit
| 9,653 | 0.002695 |
import math
class P4(object):
def p4(self):
'''4-momentum, px, py, pz, E'''
return self._tlv
def p3(self):
'''3-momentum px, py, pz'''
return self._tlv.Vect()
def e(self):
'''energy'''
return self._tlv.E()
def pt(self):
'''transverse momentum (magnitude of p3 in transverse plane)'''
return self._tlv.Pt()
def theta(self):
'''angle w/r to transverse plane'''
return math.pi/2 - self._tlv.Theta()
def eta(self):
'''pseudo-rapidity (-ln(tan self._tlv.Theta()/2)).
theta = 0 -> eta = +inf
theta = pi/2 -> 0
theta = pi -> eta = -inf
'''
return self._tlv.Eta()
def phi(self):
'''azymuthal angle (from x axis, in the transverse plane)'''
return self._tlv.Phi()
def m(self):
'''mass'''
return self._tlv.M()
def __str__(self):
return 'pt = {e:5.1f}, e = {e:5.1f}, eta = {eta:5.2f}, theta = {theta:5.2f}, phi = {phi:5.2f}, mass = {m:5.2f}'.format(
pt = self.pt(),
e = self.e(),
eta = self.eta(),
theta = self.theta(),
phi = self.phi(),
m = self.m()
)
|
semkiv/heppy_fcc
|
particles/p4.py
|
Python
|
gpl-3.0
| 1,250 | 0.0152 |
from django.conf.urls import url
from .viewsets import BookmarkViewSet
bookmark_list = BookmarkViewSet.as_view({
'get': 'list',
'post': 'create'
})
bookmark_detail = BookmarkViewSet.as_view({
'get': 'retrieve',
'patch': 'update',
'delete': 'destroy'
})
urlpatterns = [
url(r'^bookmarks/$', bookmark_list, name='bookmarks'),
url(r'^bookmarks/(?P<pk>[0-9]+)/$', bookmark_detail, name='bookmark'),
]
|
hnakamur/django-bootstrap-table-example
|
project/apiv2/urls.py
|
Python
|
mit
| 430 | 0.002326 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: idrac_redfish_command
version_added: "2.8"
short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote OOB controllers to
perform an action.
- For use with Dell iDRAC operations that require Redfish OEM extensions
options:
category:
required: true
description:
- Category to execute on OOB controller
command:
required: true
description:
- List of commands to execute on OOB controller
baseuri:
required: true
description:
- Base URI of OOB controller
username:
required: true
description:
- User for authentication with OOB controller
password:
required: true
description:
- Password for authentication with OOB controller
timeout:
description:
- Timeout in seconds for URL requests to OOB controller
default: 10
type: int
version_added: '2.8'
author: "Jose Delarosa (@jose-delarosa)"
'''
EXAMPLES = '''
- name: Create BIOS configuration job (schedule BIOS setting update)
idrac_redfish_command:
category: Systems
command: CreateBiosConfigJob
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
msg:
description: Message with action result or error description
returned: always
type: str
sample: "Action was successful"
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.redfish_utils import RedfishUtils, HEADERS
from ansible.module_utils._text import to_native
class IdracRedfishUtils(RedfishUtils):
def create_bios_config_job(self):
result = {}
key = "Bios"
jobs = "Jobs"
# Search for 'key' entry and extract URI from it
response = self.get_request(self.root_uri + self.systems_uris[0])
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
if key not in data:
return {'ret': False, 'msg': "Key %s not found" % key}
bios_uri = data[key]["@odata.id"]
# Extract proper URI
response = self.get_request(self.root_uri + bios_uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
"@odata.id"]
payload = {"TargetSettingsURI": set_bios_attr_uri}
response = self.post_request(
self.root_uri + self.manager_uri + "/" + jobs,
payload, HEADERS)
if response['ret'] is False:
return response
response_output = response['resp'].__dict__
job_id = response_output["headers"]["Location"]
job_id = re.search("JID_.+", job_id).group()
# Currently not passing job_id back to user but patch is coming
return {'ret': True, 'msg': "Config job %s created" % job_id}
CATEGORY_COMMANDS_ALL = {
"Systems": ["CreateBiosConfigJob"],
"Accounts": [],
"Manager": []
}
def main():
result = {}
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True),
command=dict(required=True, type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
category = module.params['category']
command_list = module.params['command']
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_uri = "/redfish/v1/"
rf_utils = IdracRedfishUtils(creds, root_uri, timeout)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
# Check that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a System resource
result = rf_utils._find_systems_resource(rf_uri)
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "CreateBiosConfigJob":
# execute only if we find a Managers resource
result = rf_utils._find_managers_resource(rf_uri)
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
result = rf_utils.create_bios_config_job()
# Return data back or fail with proper message
if result['ret'] is True:
del result['ret']
module.exit_json(changed=True, msg='Action was successful')
else:
module.fail_json(msg=to_native(result['msg']))
if __name__ == '__main__':
main()
|
dagwieers/ansible
|
lib/ansible/modules/remote_management/redfish/idrac_redfish_command.py
|
Python
|
gpl-3.0
| 5,884 | 0.00119 |
# -*- coding: utf-8 -*-
# Copyright 2015 Eficent - Jordi Ballester Alomar
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class AnalyticAccountOpen(models.TransientModel):
_name = 'analytic.account.open'
_description = 'Open single analytic account'
analytic_account_id = fields.Many2one(
'account.analytic.account',
'Analytic Account',
required=True
)
include_child = fields.Boolean(
'Include child accounts',
default=True
)
@api.model
def _get_child_analytic_accounts(self, curr_id):
result = {}
result[curr_id] = True
# Now add the children
self.env.cr.execute('''
WITH RECURSIVE children AS (
SELECT parent_id, id
FROM account_analytic_account
WHERE parent_id = %s
UNION ALL
SELECT a.parent_id, a.id
FROM account_analytic_account a
JOIN children b ON(a.parent_id = b.id)
)
SELECT * FROM children order by parent_id
''', (curr_id,))
res = self.env.cr.fetchall()
for x, y in res:
result[y] = True
return result
@api.multi
def analytic_account_open_window(self):
self.ensure_one()
act_window_id = self.env.ref(
'analytic.action_account_analytic_account_form')
result = act_window_id.read()[0]
acc_id = self.analytic_account_id.id
acc_ids = []
if self.include_child:
acc_ids = self._get_child_analytic_accounts(acc_id)
else:
acc_ids.append(acc_id)
result['domain'] = "[('id','in', ["+','.join(map(str, acc_ids))+"])]"
return result
|
sysadminmatmoz/pmis
|
analytic_account_open/wizards/analytic_account_open.py
|
Python
|
agpl-3.0
| 1,741 | 0 |
# -*- coding: utf-8 -*-
### BEGIN LICENSE
# Copyright (C) 2009 Philip Peitsch <philip.peitsch@gmail.com>
#This program is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License version 3, as published
#by the Free Software Foundation.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranties of
#MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
#PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along
#with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import sys
import os
import gtk
from hudsonnotifier.hudsonnotifierconfig import getdatapath
class AboutHudsonnotifierDialog(gtk.AboutDialog):
__gtype_name__ = "AboutHudsonnotifierDialog"
def __init__(self):
"""__init__ - This function is typically not called directly.
Creation of a AboutHudsonnotifierDialog requires redeading the associated ui
file and parsing the ui definition extrenally,
and then calling AboutHudsonnotifierDialog.finish_initializing().
Use the convenience function NewAboutHudsonnotifierDialog to create
NewAboutHudsonnotifierDialog objects.
"""
pass
def finish_initializing(self, builder):
"""finish_initalizing should be called after parsing the ui definition
and creating a AboutHudsonnotifierDialog object with it in order to finish
initializing the start of the new AboutHudsonnotifierDialog instance.
"""
#get a reference to the builder and set up the signals
self.builder = builder
self.builder.connect_signals(self)
#code for other initialization actions should be added here
def NewAboutHudsonnotifierDialog():
"""NewAboutHudsonnotifierDialog - returns a fully instantiated
AboutHudsonnotifierDialog object. Use this function rather than
creating a AboutHudsonnotifierDialog instance directly.
"""
#look for the ui file that describes the ui
ui_filename = os.path.join(getdatapath(), 'ui', 'AboutHudsonnotifierDialog.ui')
if not os.path.exists(ui_filename):
ui_filename = None
builder = gtk.Builder()
builder.add_from_file(ui_filename)
dialog = builder.get_object("about_hudsonnotifier_dialog")
dialog.finish_initializing(builder)
return dialog
if __name__ == "__main__":
dialog = NewAboutHudsonnotifierDialog()
dialog.show()
gtk.main()
|
necolt/hudson-notifier
|
hudsonnotifier/AboutHudsonnotifierDialog.py
|
Python
|
gpl-3.0
| 2,628 | 0.012938 |
"""Utilities to support packages."""
# NOTE: This module must remain compatible with Python 2.3, as it is shared
# by setuptools for distribution with Python 2.3 and up.
import os
import sys
import imp
import os.path
from types import ModuleType
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules', 'get_data',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
]
def read_code(stream):
# This helper is needed in order for the PEP 302 emulation to
# correctly handle compiled files
import marshal
magic = stream.read(4)
if magic != imp.get_magic():
return None
stream.read(4) # Skip timestamp
return marshal.load(stream)
def simplegeneric(func):
"""Make a trivial single-dispatch generic function"""
registry = {}
def wrapper(*args, **kw):
ob = args[0]
try:
cls = ob.__class__
except AttributeError:
cls = type(ob)
try:
mro = cls.__mro__
except AttributeError:
try:
class cls(cls, object):
pass
mro = cls.__mro__[1:]
except TypeError:
mro = object, # must be an ExtensionClass or some such :(
for t in mro:
if t in registry:
return registry[t](*args, **kw)
else:
return func(*args, **kw)
try:
wrapper.__name__ = func.__name__
except (TypeError, AttributeError):
pass # Python 2.3 doesn't allow functions to be renamed
def register(typ, func=None):
if func is None:
return lambda f: register(typ, f)
registry[typ] = func
return func
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.register = register
return wrapper
def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
for item in walk_packages(path, name+'.', onerror):
yield item
def iter_modules(path=None, prefix=''):
"""Yields (module_loader, name, ispkg) for all submodules on path,
or, if path is None, all top-level modules on sys.path.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
"""
if path is None:
importers = iter_importers()
else:
importers = map(get_importer, path)
yielded = {}
for i in importers:
for name, ispkg in iter_importer_modules(i, prefix):
if name not in yielded:
yielded[name] = 1
yield i, name, ispkg
#@simplegeneric
def iter_importer_modules(importer, prefix=''):
if not hasattr(importer, 'iter_modules'):
return []
return importer.iter_modules(prefix)
iter_importer_modules = simplegeneric(iter_importer_modules)
class ImpImporter:
"""PEP 302 Importer that wraps Python's "classic" import algorithm
ImpImporter(dirname) produces a PEP 302 importer that searches that
directory. ImpImporter(None) produces a PEP 302 importer that searches
the current sys.path, plus any modules that are frozen or built-in.
Note that ImpImporter does not currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
if self.path is None or not os.path.isdir(self.path):
return
yielded = {}
import inspect
try:
filenames = os.listdir(self.path)
except OSError:
# ignore unreadable directories like import does
filenames = []
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(self.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
try:
dircontents = os.listdir(path)
except OSError:
# ignore unreadable directories like import does
dircontents = []
for fn in dircontents:
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
class ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" import algorithm
"""
code = source = None
def __init__(self, fullname, file, filename, etc):
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
@staticmethod
def get_data(pathname):
return open(pathname, "rb").read()
def _reopen(self):
if self.file and self.file.closed:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'rU')
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
if fullname is None:
fullname = self.fullname
elif fullname != self.fullname:
raise ImportError("Loader for module %s cannot handle "
"module %s" % (self.fullname, fullname))
return fullname
def is_package(self, fullname):
return self.etc[2]==imp.PKG_DIRECTORY
def get_code(self, fullname=None):
fullname = self._fix_name(fullname)
if self.code is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
source = self.get_source(fullname)
self.code = compile(source, self.filename, 'exec')
elif mod_type==imp.PY_COMPILED:
self._reopen()
try:
self.code = read_code(self.file)
finally:
self.file.close()
elif mod_type==imp.PKG_DIRECTORY:
self.code = self._get_delegate().get_code()
return self.code
def get_source(self, fullname=None):
if self.source is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self._reopen()
try:
self.source = self.file.read()
finally:
self.file.close()
elif mod_type==imp.PY_COMPILED:
if os.path.exists(self.filename[:-1]):
f = open(self.filename[:-1], 'rU')
self.source = f.read()
f.close()
elif mod_type==imp.PKG_DIRECTORY:
self.source = self._get_delegate().get_source()
return self.source
def _get_delegate(self):
return ImpImporter(self.filename).find_module('__init__')
def get_filename(self, fullname=None):
if self.etc[2]==imp.PKG_DIRECTORY:
return self._get_delegate().get_filename()
elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
return self.filename
return None
try:
import zipimport
from zipimport import zipimporter
def iter_zipimport_modules(importer, prefix=''):
dirlist = zipimport._zip_directory_cache[importer.archive].keys()
dirlist.sort()
_prefix = importer.prefix
plen = len(_prefix)
yielded = {}
import inspect
for fn in dirlist:
if not fn.startswith(_prefix):
continue
fn = fn[plen:].split(os.sep)
if len(fn)==2 and fn[1].startswith('__init__.py'):
if fn[0] not in yielded:
yielded[fn[0]] = 1
yield fn[0], True
if len(fn)!=1:
continue
modname = inspect.getmodulename(fn[0])
if modname=='__init__':
continue
if modname and '.' not in modname and modname not in yielded:
yielded[modname] = 1
yield prefix + modname, False
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
except ImportError:
pass
def get_importer(path_item):
"""Retrieve a PEP 302 importer for the given path item
The returned importer is cached in sys.path_importer_cache
if it was newly created by a path hook.
If there is no importer, a wrapper around the basic import
machinery is returned. This wrapper is never inserted into
the importer cache (None is inserted instead).
The cache (or part of it) can be cleared manually if a
rescan of sys.path_hooks is necessary.
"""
if type(path_item) == unicode:
path_item = path_item.encode(sys.getfilesystemencoding())
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for path_hook in sys.path_hooks:
try:
importer = path_hook(path_item)
break
except ImportError:
pass
else:
importer = None
sys.path_importer_cache.setdefault(path_item, importer)
if importer is None:
try:
importer = ImpImporter(path_item)
except ImportError:
importer = None
return importer
def iter_importers(fullname=""):
"""Yield PEP 302 importers for the given module name
If fullname contains a '.', the importers will be for the package
containing fullname, otherwise they will be importers for sys.meta_path,
sys.path, and Python's "classic" import machinery, in that order. If
the named module is in a package, that package is imported as a side
effect of invoking this function.
Non PEP 302 mechanisms (e.g. the Windows registry) used by the
standard import machinery to find files in alternative locations
are partially supported, but are searched AFTER sys.path. Normally,
these locations are searched BEFORE sys.path, preventing sys.path
entries from shadowing them.
For this to cause a visible difference in behaviour, there must
be a module or package name that is accessible via both sys.path
and one of the non PEP 302 file system mechanisms. In this case,
the emulation will find the former version, while the builtin
import mechanism will find the latter.
Items of the following types can be affected by this discrepancy:
imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
"""
if fullname.startswith('.'):
raise ImportError("Relative module names not supported")
if '.' in fullname:
# Get the containing package's __path__
pkg = '.'.join(fullname.split('.')[:-1])
if pkg not in sys.modules:
__import__(pkg)
path = getattr(sys.modules[pkg], '__path__', None) or []
else:
for importer in sys.meta_path:
yield importer
path = sys.path
for item in path:
yield get_importer(item)
if '.' not in fullname:
yield ImpImporter()
def get_loader(module_or_name):
"""Get a PEP 302 "loader" object for module_or_name
If the module or package is accessible via the normal import
mechanism, a wrapper around the relevant part of that machinery
is returned. Returns None if the module cannot be found or imported.
If the named module is not already imported, its containing package
(if any) is imported, in order to establish the package __path__.
This function uses iter_importers(), and is thus subject to the same
limitations regarding platform-specific special import locations such
as the Windows registry.
"""
if module_or_name in sys.modules:
module_or_name = sys.modules[module_or_name]
if isinstance(module_or_name, ModuleType):
module = module_or_name
loader = getattr(module, '__loader__', None)
if loader is not None:
return loader
fullname = module.__name__
else:
fullname = module_or_name
return find_loader(fullname)
def find_loader(fullname):
"""Find a PEP 302 "loader" object for fullname
If fullname contains dots, path must be the containing package's __path__.
Returns None if the module cannot be found or imported. This function uses
iter_importers(), and is thus subject to the same limitations regarding
platform-specific special import locations such as the Windows registry.
"""
for importer in iter_importers(fullname):
loader = importer.find_module(fullname)
if loader is not None:
return loader
return None
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path
def get_data(package, resource):
"""Get a resource from a package.
This is a wrapper round the PEP 302 loader get_data API. The package
argument should be the name of a package, in standard module format
(foo.bar). The resource argument should be in the form of a relative
filename, using '/' as the path separator. The parent directory name '..'
is not allowed, and nor is a rooted name (starting with a '/').
The function returns a binary string, which is the contents of the
specified resource.
For packages located in the filesystem, which have already been imported,
this is the rough equivalent of
d = os.path.dirname(sys.modules[package].__file__)
data = open(os.path.join(d, resource), 'rb').read()
If the package cannot be located or loaded, or it uses a PEP 302 loader
which does not support get_data(), then None is returned.
"""
loader = get_loader(package)
if loader is None or not hasattr(loader, 'get_data'):
return None
mod = sys.modules.get(package) or loader.load_module(package)
if mod is None or not hasattr(mod, '__file__'):
return None
# Modify the resource name to be compatible with the loader.get_data
# signature - an os.path format "filename" starting with the dirname of
# the package's __file__
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return loader.get_data(resource_name)
|
Khroki/MCEdit-Unified
|
pkgutil.py
|
Python
|
isc
| 20,304 | 0.000788 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile Darknet Models
=====================
This article is a test script to test darknet models with NNVM.
All the required models and libraries will be downloaded from the internet
by the script.
"""
import numpy as np
import tvm
from tvm.contrib import graph_runtime
from tvm.contrib.download import download_testdata
download_testdata.__test__ = False
from nnvm import frontend
from tvm.relay.testing.darknet import LAYERTYPE
from tvm.relay.testing.darknet import __darknetffi__
import nnvm.compiler
DARKNET_LIB = 'libdarknet2.0.so'
DARKNETLIB_URL = 'https://github.com/siju-samuel/darknet/blob/master/lib/' \
+ DARKNET_LIB + '?raw=true'
LIB = __darknetffi__.dlopen(download_testdata(DARKNETLIB_URL, DARKNET_LIB, module='darknet'))
DARKNET_TEST_IMAGE_NAME = 'dog.jpg'
DARKNET_TEST_IMAGE_URL = 'https://github.com/siju-samuel/darknet/blob/master/data/' + DARKNET_TEST_IMAGE_NAME +'?raw=true'
DARKNET_TEST_IMAGE_PATH = download_testdata(DARKNET_TEST_IMAGE_URL, DARKNET_TEST_IMAGE_NAME, module='data')
def _read_memory_buffer(shape, data, dtype='float32'):
length = 1
for x in shape:
length *= x
data_np = np.zeros(length, dtype=dtype)
for i in range(length):
data_np[i] = data[i]
return data_np.reshape(shape)
def _get_tvm_output(net, data, build_dtype='float32'):
'''Compute TVM output'''
dtype = 'float32'
sym, params = frontend.darknet.from_darknet(net, dtype)
target = 'llvm'
shape_dict = {'data': data.shape}
graph, library, params = nnvm.compiler.build(sym, target, shape_dict,
build_dtype, params=params)
# Execute on TVM
ctx = tvm.cpu(0)
m = graph_runtime.create(graph, library, ctx)
# set inputs
m.set_input('data', tvm.nd.array(data.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
tvm_out = []
for i in range(m.get_num_outputs()):
tvm_out.append(m.get_output(i).asnumpy())
return tvm_out
def _load_net(cfg_url, cfg_name, weights_url, weights_name):
cfg_path = download_testdata(cfg_url, cfg_name, module='darknet')
weights_path = download_testdata(weights_url, weights_name, module='darknet')
net = LIB.load_network(cfg_path.encode('utf-8'), weights_path.encode('utf-8'), 0)
return net
def verify_darknet_frontend(net, build_dtype='float32'):
'''Test network with given input image on both darknet and tvm'''
def get_darknet_output(net, img):
LIB.network_predict_image(net, img)
out = []
for i in range(net.n):
layer = net.layers[i]
if layer.type == LAYERTYPE.REGION:
attributes = np.array([layer.n, layer.out_c, layer.out_h,
layer.out_w, layer.classes,
layer.coords, layer.background],
dtype=np.int32)
out.insert(0, attributes)
out.insert(0, _read_memory_buffer((layer.n*2, ), layer.biases))
layer_outshape = (layer.batch, layer.out_c,
layer.out_h, layer.out_w)
out.insert(0, _read_memory_buffer(layer_outshape, layer.output))
elif layer.type == LAYERTYPE.YOLO:
attributes = np.array([layer.n, layer.out_c, layer.out_h,
layer.out_w, layer.classes,
layer.total],
dtype=np.int32)
out.insert(0, attributes)
out.insert(0, _read_memory_buffer((layer.total*2, ), layer.biases))
out.insert(0, _read_memory_buffer((layer.n, ), layer.mask, dtype='int32'))
layer_outshape = (layer.batch, layer.out_c,
layer.out_h, layer.out_w)
out.insert(0, _read_memory_buffer(layer_outshape, layer.output))
elif i == net.n-1:
if layer.type == LAYERTYPE.CONNECTED:
darknet_outshape = (layer.batch, layer.out_c)
elif layer.type in [LAYERTYPE.SOFTMAX]:
darknet_outshape = (layer.batch, layer.outputs)
else:
darknet_outshape = (layer.batch, layer.out_c,
layer.out_h, layer.out_w)
out.insert(0, _read_memory_buffer(darknet_outshape, layer.output))
return out
dtype = 'float32'
img = LIB.letterbox_image(LIB.load_image_color(DARKNET_TEST_IMAGE_PATH.encode('utf-8'), 0, 0), net.w, net.h)
darknet_output = get_darknet_output(net, img)
batch_size = 1
data = np.empty([batch_size, img.c, img.h, img.w], dtype)
i = 0
for c in range(img.c):
for h in range(img.h):
for k in range(img.w):
data[0][c][h][k] = img.data[i]
i = i + 1
tvm_out = _get_tvm_output(net, data, build_dtype)
for tvm_outs, darknet_out in zip(tvm_out, darknet_output):
tvm.testing.assert_allclose(darknet_out, tvm_outs, rtol=1e-3, atol=1e-3)
def verify_rnn_forward(net):
'''Test network with given input data on both darknet and tvm'''
def get_darknet_network_predict(net, data):
return LIB.network_predict(net, data)
from cffi import FFI
ffi = FFI()
np_arr = np.zeros([1, net.inputs], dtype='float32')
np_arr[0, 84] = 1
cffi_arr = ffi.cast('float*', np_arr.ctypes.data)
tvm_out = _get_tvm_output(net, np_arr)[0]
darknet_output = get_darknet_network_predict(net, cffi_arr)
darknet_out = np.zeros(net.outputs, dtype='float32')
for i in range(net.outputs):
darknet_out[i] = darknet_output[i]
last_layer = net.layers[net.n-1]
darknet_outshape = (last_layer.batch, last_layer.outputs)
darknet_out = darknet_out.reshape(darknet_outshape)
tvm.testing.assert_allclose(darknet_out, tvm_out, rtol=1e-4, atol=1e-4)
def test_forward_extraction():
'''test extraction model'''
model_name = 'extraction'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_alexnet():
'''test alexnet model'''
model_name = 'alexnet'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_resnet50():
'''test resnet50 model'''
model_name = 'resnet50'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_yolov2():
'''test yolov2 model'''
model_name = 'yolov2'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
build_dtype = {}
verify_darknet_frontend(net, build_dtype)
LIB.free_network(net)
def test_forward_yolov3():
'''test yolov3 model'''
model_name = 'yolov3'
cfg_name = model_name + '.cfg'
weights_name = model_name + '.weights'
cfg_url = 'https://github.com/pjreddie/darknet/blob/master/cfg/' + cfg_name + '?raw=true'
weights_url = 'http://pjreddie.com/media/files/' + weights_name + '?raw=true'
net = _load_net(cfg_url, cfg_name, weights_url, weights_name)
build_dtype = {}
verify_darknet_frontend(net, build_dtype)
LIB.free_network(net)
def test_forward_convolutional():
'''test convolutional layer'''
net = LIB.make_network(1)
layer = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 0, 0, 0, 0)
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_dense():
'''test fully connected layer'''
net = LIB.make_network(1)
layer = LIB.make_connected_layer(1, 75, 20, 1, 0, 0)
net.layers[0] = layer
net.w = net.h = 5
LIB.resize_network(net, 5, 5)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_dense_batchnorm():
'''test fully connected layer with batchnorm'''
net = LIB.make_network(1)
layer = LIB.make_connected_layer(1, 12, 2, 1, 1, 0)
for i in range(5):
layer.rolling_mean[i] = np.random.rand(1)
layer.rolling_variance[i] = np.random.rand(1)
layer.scales[i] = np.random.rand(1)
net.layers[0] = layer
net.w = net.h = 2
LIB.resize_network(net, 2, 2)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_maxpooling():
'''test maxpooling layer'''
net = LIB.make_network(1)
layer = LIB.make_maxpool_layer(1, 224, 224, 3, 2, 2, 0)
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_avgpooling():
'''test avgerage pooling layer'''
net = LIB.make_network(1)
layer = LIB.make_avgpool_layer(1, 224, 224, 3)
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_batch_norm():
'''test batch normalization layer'''
net = LIB.make_network(1)
layer = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 1, 0, 0, 0)
for i in range(32):
layer.rolling_mean[i] = np.random.rand(1)
layer.rolling_variance[i] = np.random.rand(1)
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_shortcut():
'''test shortcut layer'''
net = LIB.make_network(3)
layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 0, 0, 0, 0)
layer_2 = LIB.make_convolutional_layer(1, 111, 111, 32, 32, 1, 1, 1, 0, 1, 0, 0, 0, 0)
layer_3 = LIB.make_shortcut_layer(1, 0, 111, 111, 32, 111, 111, 32)
layer_3.activation = 1
layer_3.alpha = 1
layer_3.beta = 1
net.layers[0] = layer_1
net.layers[1] = layer_2
net.layers[2] = layer_3
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_reorg():
'''test reorg layer'''
net = LIB.make_network(2)
layer_1 = LIB.make_convolutional_layer(1, 222, 222, 3, 32, 1, 3, 2, 0, 1, 0, 0, 0, 0)
layer_2 = LIB.make_reorg_layer(1, 110, 110, 32, 2, 0, 0, 0)
net.layers[0] = layer_1
net.layers[1] = layer_2
net.w = net.h = 222
LIB.resize_network(net, 222, 222)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_region():
'''test region layer'''
net = LIB.make_network(2)
layer_1 = LIB.make_convolutional_layer(1, 19, 19, 3, 425, 1, 1, 1, 0, 1, 0, 0, 0, 0)
layer_2 = LIB.make_region_layer(1, 19, 19, 5, 80, 4)
layer_2.softmax = 1
net.layers[0] = layer_1
net.layers[1] = layer_2
net.w = net.h = 19
LIB.resize_network(net, 19, 19)
build_dtype = {}
verify_darknet_frontend(net, build_dtype)
LIB.free_network(net)
def test_forward_yolo_op():
'''test yolo layer'''
net = LIB.make_network(2)
layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 14, 1, 3, 2, 0, 1, 0, 0, 0, 0)
layer_2 = LIB.make_yolo_layer(1, 111, 111, 2, 9, __darknetffi__.NULL, 2)
net.layers[0] = layer_1
net.layers[1] = layer_2
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
build_dtype = {}
verify_darknet_frontend(net, build_dtype)
LIB.free_network(net)
def test_forward_upsample():
'''test upsample layer'''
net = LIB.make_network(1)
layer = LIB.make_upsample_layer(1, 19, 19, 3, 3)
layer.scale = 1
net.layers[0] = layer
net.w = net.h = 19
LIB.resize_network(net, 19, 19)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_l2normalize():
'''test l2 normalization layer'''
net = LIB.make_network(1)
layer = LIB.make_l2norm_layer(1, 224*224*3)
layer.c = layer.out_c = 3
layer.h = layer.out_h = 224
layer.w = layer.out_w = 224
net.layers[0] = layer
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_elu():
'''test elu activation layer'''
net = LIB.make_network(1)
layer_1 = LIB.make_convolutional_layer(1, 224, 224, 3, 32, 1, 3, 2, 0, 1, 0, 0, 0, 0)
layer_1.activation = 8
net.layers[0] = layer_1
net.w = net.h = 224
LIB.resize_network(net, 224, 224)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_softmax():
'''test softmax layer'''
net = LIB.make_network(1)
layer_1 = LIB.make_softmax_layer(1, 75, 1)
layer_1.temperature = 1
net.layers[0] = layer_1
net.w = net.h = 5
LIB.resize_network(net, net.w, net.h)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_softmax_temperature():
'''test softmax layer'''
net = LIB.make_network(1)
layer_1 = LIB.make_softmax_layer(1, 75, 1)
layer_1.temperature = 0.8
net.layers[0] = layer_1
net.w = net.h = 5
LIB.resize_network(net, net.w, net.h)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_rnn():
'''test RNN layer'''
net = LIB.make_network(1)
batch = 1
inputs = 256
outputs = 256
steps = 1
activation = 1
batch_normalize = 0
adam = 0
layer_1 = LIB.make_rnn_layer(batch, inputs, outputs, steps, activation, batch_normalize, adam)
net.layers[0] = layer_1
net.inputs = inputs
net.outputs = outputs
net.w = net.h = 0
LIB.resize_network(net, net.w, net.h)
verify_rnn_forward(net)
LIB.free_network(net)
def _test_forward_crnn():
'''test CRNN layer'''
net = LIB.make_network(1)
batch = 1
c = 3
h = 224
w = 224
hidden_filters = c
output_filters = c
steps = 1
activation = 0
batch_normalize = 0
inputs = 256
outputs = 256
layer_1 = LIB.make_crnn_layer(batch, h, w, c, hidden_filters, output_filters,
steps, activation, batch_normalize)
net.layers[0] = layer_1
net.inputs = inputs
net.outputs = output_filters * h * w
net.w = w
net.h = h
LIB.resize_network(net, net.w, net.h)
verify_darknet_frontend(net)
LIB.free_network(net)
def test_forward_lstm():
'''test LSTM layer'''
net = LIB.make_network(1)
batch = 1
inputs = 256
outputs = 256
steps = 1
batch_normalize = 0
adam = 0
layer_1 = LIB.make_lstm_layer(batch, inputs, outputs, steps, batch_normalize, adam)
net.layers[0] = layer_1
net.inputs = inputs
net.outputs = outputs
net.w = net.h = 0
LIB.resize_network(net, net.w, net.h)
verify_rnn_forward(net)
LIB.free_network(net)
def test_forward_gru():
'''test GRU layer'''
net = LIB.make_network(1)
batch = 1
inputs = 256
outputs = 256
steps = 1
batch_normalize = 0
adam = 0
layer_1 = LIB.make_gru_layer(batch, inputs, outputs, steps, batch_normalize, adam)
net.layers[0] = layer_1
net.inputs = inputs
net.outputs = outputs
net.w = net.h = 0
LIB.resize_network(net, net.w, net.h)
verify_rnn_forward(net)
LIB.free_network(net)
def test_forward_activation_logistic():
'''test logistic activation layer'''
net = LIB.make_network(1)
batch = 1
h = 224
w = 224
c = 3
n = 32
groups = 1
size = 3
stride = 2
padding = 0
activation = 0
batch_normalize = 0
binary = 0
xnor = 0
adam = 0
layer_1 = LIB.make_convolutional_layer(batch, h, w, c, n, groups, size, stride, padding,
activation, batch_normalize, binary, xnor, adam)
net.layers[0] = layer_1
net.w = w
net.h = h
LIB.resize_network(net, net.w, net.h)
verify_darknet_frontend(net)
LIB.free_network(net)
if __name__ == '__main__':
test_forward_resnet50()
test_forward_alexnet()
test_forward_extraction()
test_forward_yolov2()
test_forward_yolov3()
test_forward_convolutional()
test_forward_maxpooling()
test_forward_avgpooling()
test_forward_batch_norm()
test_forward_shortcut()
test_forward_dense()
test_forward_dense_batchnorm()
test_forward_softmax()
test_forward_softmax_temperature()
test_forward_rnn()
test_forward_reorg()
test_forward_region()
test_forward_yolo_op()
test_forward_upsample()
test_forward_l2normalize()
test_forward_elu()
test_forward_rnn()
# FIXME: Skip CRNN test since it causes segfault in libdarknet2.0.so
# _test_forward_crnn()
test_forward_lstm()
test_forward_gru()
test_forward_activation_logistic()
|
Huyuwei/tvm
|
nnvm/tests/python/frontend/darknet/test_forward.py
|
Python
|
apache-2.0
| 18,555 | 0.00388 |
# -*- coding: utf-8 -*-
"""
Layer.py - base layer for gabbs maps
======================================================================
AUTHOR: Wei Wan, Purdue University
EMAIL: rcac-help@purdue.edu
Copyright (c) 2016 Purdue University
See the file "license.terms" for information on usage and
redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
======================================================================
"""
from os.path import isfile
from PyQt4.QtGui import QAction, QIcon
from qgis.gui import *
from gabbs.layers.LayerProperty import *
from gabbs.MapUtils import iface, debug_trace
import math
class Layer(object):
"""Base class for layers"""
layerName = None
"""Layer type name in menu"""
layerIcon = None
"""Group icon in menu"""
layerTypeName = None
"""Layer type identificator used to store in project"""
layerTypeId = None
"""Numerical ID used in versions < 2.3"""
layerId = None
"""Store 2 qgis objects"""
layer = None
layerAction = None
layerAttribution = None
def __init__(self):
object.__init__(self)
def getLayer(self):
return self.layer
def getLayerId(self):
return self.layerId
def setAddLayerCallback(self, addLayerCallback):
"""Set post processing in add layer method in canvas class
"""
self.addLayerCallback = addLayerCallback
def loadStyleFile(self, symPath):
if isfile(symPath):
res = self.layer.loadNamedStyle(symPath)
if res[1]:
return True
else:
return False
else:
return False
def getScale(self, zoomlevel):
dpi = iface.mainWindow.physicalDpiX()
inchesPerMeter = 39.37
maxScalePerPixel = 156543.04
try:
zoomlevel = int(zoomlevel)
scale = (dpi * inchesPerMeter * maxScalePerPixel) / (math.pow(2, zoomlevel))
scale = int(scale)
return scale
except TypeError:
raise
#pass
except Exception as e:
raise e
|
waneric/PyMapLib
|
src/gabbs/layers/Layer.py
|
Python
|
mit
| 2,213 | 0.001808 |
cost, zeros = map(int, input().split())
print(int(round(cost, -zeros)))
|
JonSteinn/Kattis-Solutions
|
src/Slatkisi/Python 3/main.py
|
Python
|
gpl-3.0
| 71 | 0.014085 |
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from networking_vsphere._i18n import _
from neutron.agent.common import config
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_UPLINK_MAPPINGS = []
DEFAULT_VLAN_RANGES = []
DEFAULT_TUNNEL_RANGES = []
DEFAULT_TUNNEL_TYPES = []
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.IntOpt('quitting_rpc_timeout', default=10,
help=_("Set new timeout in seconds for new rpc calls after "
"agent receives SIGTERM. If value is set to 0, rpc "
"timeout won't be changed")),
cfg.BoolOpt('log_agent_heartbeats', default=False,
help=_("Log agent heartbeats")),
cfg.IntOpt('report_interval',
default=30,
help='Seconds between nodes reporting state to server.'),
]
vmware_opts = [
cfg.FloatOpt(
'task_poll_interval',
default=2,
help=_('The interval of task polling in seconds.')),
cfg.IntOpt(
'api_retry_count',
default=10,
help=_('number of times an API must be retried upon '
'session/connection related errors')),
cfg.IntOpt(
'connections_pool_size',
default=100,
help=_('number of vsphere connections pool '
'must be higher for intensive operations')),
cfg.StrOpt('vsphere_login', default='administrator',
help=_("Vsphere login.")),
cfg.ListOpt('network_maps',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<bridge>.")),
cfg.ListOpt('uplink_maps',
default=DEFAULT_UPLINK_MAPPINGS,
help=_("List of <physical_network>:<active uplinks>:"
"<failover uplinks>."
"Use semicolon between uplink names")),
cfg.StrOpt('vsphere_hostname', default='vsphere',
help=_("Vsphere host name or IP.")),
cfg.StrOpt('vsphere_password', default='',
help=_("Vsphere password.")),
]
dvs_opts = [
cfg.BoolOpt('clean_on_restart',
default=True,
help=_("Run DVS cleaning procedure on agent restart.")),
cfg.BoolOpt('precreate_networks',
default=False,
help=_("Precreate networks on DVS")),
]
cfg.CONF.register_opts(dvs_opts, "DVS")
cfg.CONF.register_opts(agent_opts, "DVS_AGENT")
cfg.CONF.register_opts(vmware_opts, "ML2_VMWARE")
config.register_agent_state_opts_helper(cfg.CONF)
CONF = cfg.CONF
|
VTabolin/networking-vsphere
|
networking_vsphere/common/vmware_conf.py
|
Python
|
apache-2.0
| 3,247 | 0.002772 |
#!/usr/bin/env python
#
# Copyright (C) 2012 Jay Sigbrandt <jsigbrandt@slb.com>
# Martin Owens <doctormo@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
#
"""
Test crontab usage.
"""
import os
import sys
import unittest
import crontab
from datetime import date, time, datetime, timedelta
try:
from test import test_support
except ImportError:
from test import support as test_support
crontab.LOG.setLevel(crontab.logging.ERROR)
TEST_DIR = os.path.dirname(__file__)
class DummyStdout(object):
def write(self, text):
pass
BASIC = '@hourly firstcommand\n\n'
USER = '\n*/4 * * * * user_command # user_comment\n\n\n'
crontab.CRONCMD = "%s %s" % (sys.executable, os.path.join(TEST_DIR, 'data', 'crontest'))
def flush():
pass
class Attribute(object):
def __init__(self, obj, attr, value):
self.obj = obj
self.attr = attr
self.value = value
def __enter__(self, *args, **kw):
if hasattr(self.obj, self.attr):
self.previous = getattr(self.obj, self.attr)
setattr(self.obj, self.attr, self.value)
def __exit__(self, *args, **kw):
if hasattr(self, 'previous'):
setattr(self.obj, self.attr, self.previous)
else:
delattr(self.obj, self.attr)
class UseTestCase(unittest.TestCase):
"""Test use documentation in crontab."""
def setUp(self):
self.filenames = []
def test_01_empty(self):
"""Open system crontab"""
cron = crontab.CronTab()
self.assertEqual(cron.render(), "")
self.assertEqual(cron.__unicode__(), "")
self.assertEqual(repr(cron), "<Unattached CronTab>")
def test_02_user(self):
"""Open a user's crontab"""
cron = crontab.CronTab(user='basic')
self.assertEqual(cron.render(), BASIC)
self.assertEqual(repr(cron), "<User CronTab 'basic'>")
def test_03_usage(self):
"""Dont modify crontab"""
cron = crontab.CronTab(tab='')
sys.stdout = DummyStdout()
sys.stdout.flush = flush
try:
exec(crontab.__doc__)
except ImportError:
pass
sys.stdout = sys.__stdout__
self.assertEqual(cron.render(), '')
def test_04_username(self):
"""Username is True"""
cron = crontab.CronTab(user=True)
self.assertNotEqual(cron.user, True)
self.assertEqual(cron.render(), USER)
self.assertEqual(repr(cron), "<My CronTab>")
def test_05_nouser(self):
"""Username doesn't exist"""
cron = crontab.CronTab(user='nouser')
self.assertEqual(cron.render(), '')
def test_06_touser(self):
"""Write to use API"""
cron = crontab.CronTab(tab=USER)
self.assertEqual(repr(cron), "<Unattached CronTab>")
cron.write_to_user('bob')
filename = os.path.join(TEST_DIR, 'data', 'spool', 'bob')
self.filenames.append(filename)
self.assertTrue(os.path.exists(filename))
self.assertEqual(repr(cron), "<User CronTab 'bob'>")
def test_07_ioerror_read(self):
"""No filename ioerror"""
with self.assertRaises(IOError):
cron = crontab.CronTab(user='error')
cron.read()
def test_07_ioerror_write(self):
"""User not specified, nowhere to write to"""
cron = crontab.CronTab()
with self.assertRaises(IOError):
cron.write()
def test_08_cronitem(self):
"""CronItem Standalone"""
item = crontab.CronItem(line='noline')
self.assertTrue(item.is_enabled())
with self.assertRaises(UnboundLocalError):
item.delete()
item.command = str('nothing')
self.assertEqual(item.render(), '* * * * * nothing')
def test_10_time_object(self):
"""Set slices using time object"""
item = crontab.CronItem(command='cmd')
self.assertEqual(str(item.slices), '* * * * *')
item.setall(time(1, 2))
self.assertEqual(str(item.slices), '2 1 * * *')
self.assertTrue(item.is_valid())
item.setall(time(0, 30, 0, 0))
self.assertEqual(str(item.slices), '30 0 * * *')
self.assertTrue(item.is_valid())
self.assertEqual(str(item), '30 0 * * * cmd')
def test_11_date_object(self):
"""Set slices using date object"""
item = crontab.CronItem(command='cmd')
self.assertEqual(str(item.slices), '* * * * *')
item.setall(date(2010, 6, 7))
self.assertEqual(str(item.slices), '0 0 7 6 *')
self.assertTrue(item.is_valid())
def test_12_datetime_object(self):
"""Set slices using datetime object"""
item = crontab.CronItem(command='cmd')
self.assertEqual(str(item.slices), '* * * * *')
item.setall(datetime(2009, 8, 9, 3, 4))
self.assertTrue(item.is_valid())
self.assertEqual(str(item.slices), '4 3 9 8 *')
def test_20_slice_validation(self):
"""CronSlices class and objects can validate"""
CronSlices = crontab.CronSlices
self.assertTrue(CronSlices('* * * * *').is_valid())
self.assertTrue(CronSlices.is_valid('* * * * *'))
self.assertTrue(CronSlices.is_valid('*/2 * * * *'))
self.assertTrue(CronSlices.is_valid('* 1,2 * * *'))
self.assertTrue(CronSlices.is_valid('* * 1-5 * *'))
self.assertTrue(CronSlices.is_valid('* * * * MON-WED'))
self.assertTrue(CronSlices.is_valid('@reboot'))
sliced = CronSlices('* * * * *')
sliced[0].parts = [300]
self.assertEqual(str(sliced), '300 * * * *')
self.assertFalse(sliced.is_valid())
self.assertFalse(CronSlices.is_valid('P'))
self.assertFalse(CronSlices.is_valid('*/61 * * * *'))
self.assertFalse(CronSlices.is_valid('* 1,300 * * *'))
self.assertFalse(CronSlices.is_valid('* * 50-1 * *'))
self.assertFalse(CronSlices.is_valid('* * * * FRO-TOO'))
self.assertFalse(CronSlices.is_valid('@retool'))
def test_25_open_pipe(self):
"""Test opening pipes"""
from crontab import open_pipe, CRONCMD
pipe = open_pipe(CRONCMD, h=None, a='one', abc='two')
(out, err) = pipe.communicate()
self.assertEqual(err, b'')
self.assertEqual(out, b'--abc=two|-a|-h|one\n')
def test_07_zero_padding(self):
"""Can we get zero padded output"""
cron = crontab.CronTab(tab="02 3-5 2,4 */2 01 cmd")
self.assertEqual(str(cron), '2 3-5 2,4 */2 1 cmd\n')
with Attribute(crontab, 'ZERO_PAD', True):
self.assertEqual(str(cron), '02 03-05 02,04 */2 01 cmd\n')
def tearDown(self):
for filename in self.filenames:
if os.path.exists(filename):
os.unlink(filename)
if __name__ == '__main__':
test_support.run_unittest(
UseTestCase,
)
|
doctormo/python-crontab
|
tests/test_usage.py
|
Python
|
lgpl-3.0
| 7,441 | 0.000672 |
from .. import BaseForm
from wtforms import StringField, TextAreaField
from wtforms.validators import DataRequired
class CategoryForm(BaseForm):
name = StringField('name',
validators=[
DataRequired()
])
description = TextAreaField('description',
validators=[
DataRequired()
])
|
friendly-of-python/flask-online-store
|
flask_online_store/forms/admin/category.py
|
Python
|
mit
| 461 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the Property List (Plist) Parser.
Plaso's engine calls PlistParser when it encounters Plist files to be processed.
"""
import binascii
import logging
from binplist import binplist
from plaso.lib import errors
from plaso.lib import utils
from plaso.parsers import interface
from plaso.parsers import manager
class PlistParser(interface.BasePluginsParser):
"""De-serializes and parses plists the event objects are generated by plist.
The Plaso engine calls parsers by their Parse() method. This parser's
Parse() has GetTopLevel() which deserializes plist files using the binplist
library and calls plugins (PlistPlugin) registered through the
interface by their Process() to produce event objects.
Plugins are how this parser understands the content inside a plist file,
each plugin holds logic specific to a particular plist file. See the
interface and plist_plugins/ directory for examples of how plist plugins are
implemented.
"""
NAME = 'plist'
DESCRIPTION = u'Parser for binary and text plist files.'
_plugin_classes = {}
def __init__(self):
"""Initializes a parser object."""
super(PlistParser, self).__init__()
self._plugins = PlistParser.GetPluginObjects()
def GetTopLevel(self, file_object, file_name=''):
"""Returns the deserialized content of a plist as a dictionary object.
Args:
file_object: A file-like object to parse.
file_name: The name of the file-like object.
Returns:
A dictionary object representing the contents of the plist.
"""
try:
top_level_object = binplist.readPlist(file_object)
except binplist.FormatError as exception:
raise errors.UnableToParseFile(
u'[{0:s}] File is not a plist file: {1:s}'.format(
self.NAME, utils.GetUnicodeString(exception)))
except (
LookupError, binascii.Error, ValueError, AttributeError) as exception:
raise errors.UnableToParseFile(
u'[{0:s}] Unable to parse XML file, reason: {1:s}'.format(
self.NAME, exception))
except OverflowError as exception:
raise errors.UnableToParseFile(
u'[{0:s}] Unable to parse: {1:s} with error: {2:s}'.format(
self.NAME, file_name, exception))
if not top_level_object:
raise errors.UnableToParseFile(
u'[{0:s}] File is not a plist: {1:s}'.format(
self.NAME, utils.GetUnicodeString(file_name)))
# Since we are using readPlist from binplist now instead of manually
# opening up the BinarPlist file we loose this option. Keep it commented
# out for now but this needs to be tested a bit more.
# TODO: Re-evaluate if we can delete this or still require it.
#if bpl.is_corrupt:
# logging.warning(
# u'[{0:s}] corruption detected in binary plist: {1:s}'.format(
# self.NAME, file_name))
return top_level_object
def Parse(self, parser_context, file_entry):
"""Parse and extract values from a plist file.
Args:
parser_context: A parser context object (instance of ParserContext).
file_entry: A file entry object (instance of dfvfs.FileEntry).
"""
# TODO: Should we rather query the stats object to get the size here?
file_object = file_entry.GetFileObject()
file_size = file_object.get_size()
if file_size <= 0:
file_object.close()
raise errors.UnableToParseFile(
u'[{0:s}] file size: {1:d} bytes is less equal 0.'.format(
self.NAME, file_size))
# 50MB is 10x larger than any plist seen to date.
if file_size > 50000000:
file_object.close()
raise errors.UnableToParseFile(
u'[{0:s}] file size: {1:d} bytes is larger than 50 MB.'.format(
self.NAME, file_size))
top_level_object = None
try:
top_level_object = self.GetTopLevel(file_object, file_entry.name)
except errors.UnableToParseFile:
file_object.close()
raise
if not top_level_object:
file_object.close()
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse: {1:s} skipping.'.format(
self.NAME, file_entry.name))
file_system = file_entry.GetFileSystem()
plist_name = file_system.BasenamePath(file_entry.name)
for plugin_object in self._plugins:
try:
plugin_object.Process(
parser_context, plist_name=plist_name, top_level=top_level_object)
except errors.WrongPlistPlugin as exception:
logging.debug(u'[{0:s}] Wrong plugin: {1:s} for: {2:s}'.format(
self.NAME, exception[0], exception[1]))
file_object.close()
manager.ParsersManager.RegisterParser(PlistParser)
|
cvandeplas/plaso
|
plaso/parsers/plist.py
|
Python
|
apache-2.0
| 5,390 | 0.004824 |
# -*- coding: utf-8 -*-
# @author: vuolter
from __future__ import absolute_import, unicode_literals
import os
import re
import sys
from future import standard_library
standard_library.install_aliases()
def char(text, chars, repl=''):
return re.sub(r'[{0}]+'.format(chars), repl, text)
_UNIXBADCHARS = ('\0', '/', '\\')
_MACBADCHARS = _UNIXBADCHARS + (':',)
_WINBADCHARS = _MACBADCHARS + ('<', '>', '"', '|', '?', '*')
_WINBADWORDS = (
'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
'con', 'prn')
def name(text, sep='_', allow_whitespaces=False):
"""Remove invalid characters."""
if os.name == 'nt':
bc = _WINBADCHARS
elif sys.platform == 'darwin':
bc = _MACBADCHARS
else:
bc = _UNIXBADCHARS
repl = r''.join(bc)
if not allow_whitespaces:
repl += ' '
res = char(text, repl, sep).strip()
if os.name == 'nt' and res.lower() in _WINBADWORDS:
res = sep + res
return res
def pattern(text, rules):
for rule in rules:
try:
pattr, repl, flags = rule
except ValueError:
pattr, repl = rule
flags = 0
text = re.sub(pattr, repl, text, flags)
return text
def truncate(text, offset):
maxtrunc = len(text) // 2
if offset > maxtrunc:
raise ValueError('String too short to truncate')
trunc = (len(text) - offset) // 3
return '{0}~{1}'.format(text[:trunc * 2], text[-trunc:])
def uniquify(seq):
"""Remove duplicates from list preserving order."""
seen = set()
seen_add = seen.add
return type(seq)(x for x in seq if x not in seen and not seen_add(x))
|
pyblub/pyload
|
pyload/utils/purge.py
|
Python
|
agpl-3.0
| 1,743 | 0 |
from .killableprocess import Popen, mswindows
if mswindows:
from .winprocess import STARTUPINFO, STARTF_USESHOWWINDOW
|
eukaryote/dotfiles
|
sublime3/.config/sublime-text-3/Packages/SublimeREPL/repls/killableprocess/__init__.py
|
Python
|
mit
| 118 | 0.016949 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import os
import argparse
import tensorflow as tf
from gym import wrappers
from yarll.environment.registration import make
class ModelRunner(object):
"""
Run an already learned model.
Currently only supports one variation of an environment.
"""
def __init__(self, env, model_directory: str, save_directory: str, **usercfg) -> None:
super(ModelRunner, self).__init__()
self.env = env
self.model_directory = model_directory
self.save_directory = save_directory
self.config = dict(
episode_max_length=self.env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps'),
repeat_n_actions=1
)
self.config.update(usercfg)
self.session = tf.Session()
self.saver = tf.train.import_meta_graph(os.path.join(self.model_directory, "model.meta"))
self.saver.restore(self.session, os.path.join(self.model_directory, "model"))
self.action = tf.get_collection("action")[0]
self.states = tf.get_collection("states")[0]
def choose_action(self, state):
"""Choose an action."""
return self.session.run([self.action], feed_dict={self.states: [state]})[0]
def get_trajectory(self, render: bool = False):
"""
Run agent-environment loop for one whole episode (trajectory)
Return dictionary of results
"""
state = self.env.reset()
for _ in range(self.config["episode_max_length"]):
action = self.choose_action(state)
for _ in range(self.config["repeat_n_actions"]):
_, _, done, _ = self.env.step(action)
if done: # Don't continue if episode has already ended
break
if done:
break
if render:
self.env.render()
return
def run(self):
for _ in range(self.config["n_iter"]):
self.get_trajectory()
parser = argparse.ArgumentParser()
parser.add_argument("environment", metavar="env", type=str, help="Gym environment to execute the model on.")
parser.add_argument("model_directory", type=str, help="Directory from where model files are loaded.")
parser.add_argument("save_directory", type=str, help="Directory where results of running the model are saved")
parser.add_argument("--iterations", default=100, type=int, help="Number of iterations to run the algorithm.")
def main():
args = parser.parse_args()
env = make(args.environment)
runner = ModelRunner(env, args.model_directory, args.save_directory, n_iter=args.iterations)
try:
runner.env = wrappers.Monitor(runner.env, args.save_directory, video_callable=False, force=True)
runner.run()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
arnomoonens/DeepRL
|
yarll/scripts/run_model.py
|
Python
|
mit
| 2,858 | 0.004899 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the LAPAX linear algebra module."""
from functools import partial
import unittest
import numpy as np
import scipy
import scipy as osp
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import jit, grad, jvp, vmap
from jax import lax
from jax import numpy as jnp
from jax import scipy as jsp
from jax._src import test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
T = lambda x: np.swapaxes(x, -1, -2)
float_types = jtu.dtypes.floating
complex_types = jtu.dtypes.complex
class NumpyLinalgTest(jtu.JaxTestCase):
def testNotImplemented(self):
for name in jnp.linalg._NOT_IMPLEMENTED:
func = getattr(jnp.linalg, name)
with self.assertRaises(NotImplementedError):
func()
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (1000, 0, 0)]
for dtype in float_types + complex_types))
def testCholesky(self, shape, dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
factor_shape = shape[:-1] + (2 * shape[-1],)
a = rng(factor_shape, dtype)
return [np.matmul(a, jnp.conj(T(a)))]
self._CheckAgainstNumpy(np.linalg.cholesky, jnp.linalg.cholesky, args_maker,
tol=1e-3)
self._CompileAndCheck(jnp.linalg.cholesky, args_maker)
if jnp.finfo(dtype).bits == 64:
jtu.check_grads(jnp.linalg.cholesky, args_maker(), order=2)
def testCholeskyGradPrecision(self):
rng = jtu.rand_default(self.rng())
a = rng((3, 3), np.float32)
a = np.dot(a, a.T)
jtu.assert_dot_precision(
lax.Precision.HIGHEST, partial(jvp, jnp.linalg.cholesky), (a,), (a,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype}
for n in [0, 2, 3, 4, 5, 25] # TODO(mattjj): complex64 unstable on large sizes?
for dtype in float_types + complex_types))
def testDet(self, n, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((n, n), dtype)]
self._CheckAgainstNumpy(np.linalg.det, jnp.linalg.det, args_maker, tol=1e-3)
self._CompileAndCheck(jnp.linalg.det, args_maker,
rtol={np.float64: 1e-13, np.complex128: 1e-13})
def testDetOfSingularMatrix(self):
x = jnp.array([[-1., 3./2], [2./3, -1.]], dtype=np.float32)
self.assertAllClose(np.float32(0), jsp.linalg.det(x))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (3, 3), (2, 4, 4)]
for dtype in float_types))
@jtu.skip_on_devices("tpu")
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testDetGrad(self, shape, dtype):
rng = jtu.rand_default(self.rng())
a = rng(shape, dtype)
jtu.check_grads(jnp.linalg.det, (a,), 2, atol=1e-1, rtol=1e-1)
# make sure there are no NaNs when a matrix is zero
if len(shape) == 2:
pass
jtu.check_grads(
jnp.linalg.det, (jnp.zeros_like(a),), 1, atol=1e-1, rtol=1e-1)
else:
a[0] = 0
jtu.check_grads(jnp.linalg.det, (a,), 1, atol=1e-1, rtol=1e-1)
def testDetGradIssue6121(self):
f = lambda x: jnp.linalg.det(x).sum()
x = jnp.ones((16, 1, 1))
jax.grad(f)(x)
jtu.check_grads(f, (x,), 2, atol=1e-1, rtol=1e-1)
def testDetGradOfSingularMatrixCorank1(self):
# Rank 2 matrix with nonzero gradient
a = jnp.array([[ 50, -30, 45],
[-30, 90, -81],
[ 45, -81, 81]], dtype=jnp.float32)
jtu.check_grads(jnp.linalg.det, (a,), 1, atol=1e-1, rtol=1e-1)
def testDetGradOfSingularMatrixCorank2(self):
# Rank 1 matrix with zero gradient
b = jnp.array([[ 36, -42, 18],
[-42, 49, -21],
[ 18, -21, 9]], dtype=jnp.float32)
jtu.check_grads(jnp.linalg.det, (b,), 1, atol=1e-1, rtol=1e-1, eps=1e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_m={}_n={}_q={}".format(
jtu.format_shape_dtype_string((m,), dtype),
jtu.format_shape_dtype_string((nq[0],), dtype),
jtu.format_shape_dtype_string(nq[1], dtype)),
"m": m, "nq": nq, "dtype": dtype}
for m in [1, 5, 7, 23]
for nq in zip([2, 4, 6, 36], [(1, 2), (2, 2), (1, 2, 3), (3, 3, 1, 4)])
for dtype in float_types))
def testTensorsolve(self, m, nq, dtype):
rng = jtu.rand_default(self.rng())
# According to numpy docs the shapes are as follows:
# Coefficient tensor (a), of shape b.shape + Q.
# And prod(Q) == prod(b.shape)
# Therefore, n = prod(q)
n, q = nq
b_shape = (n, m)
# To accomplish prod(Q) == prod(b.shape) we append the m extra dim
# to Q shape
Q = q + (m,)
args_maker = lambda: [
rng(b_shape + Q, dtype), # = a
rng(b_shape, dtype)] # = b
a, b = args_maker()
result = jnp.linalg.tensorsolve(*args_maker())
self.assertEqual(result.shape, Q)
self._CheckAgainstNumpy(np.linalg.tensorsolve,
jnp.linalg.tensorsolve, args_maker,
tol={np.float32: 1e-2, np.float64: 1e-3})
self._CompileAndCheck(jnp.linalg.tensorsolve,
args_maker,
rtol={np.float64: 1e-13})
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(0, 0), (1, 1), (3, 3), (4, 4), (10, 10), (200, 200),
(2, 2, 2), (2, 3, 3), (3, 2, 2)]
for dtype in float_types + complex_types))
def testSlogdet(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.linalg.slogdet, jnp.linalg.slogdet, args_maker,
tol=1e-3)
self._CompileAndCheck(jnp.linalg.slogdet, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 4), (5, 5), (2, 7, 7)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("tpu")
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testSlogdetGrad(self, shape, dtype):
rng = jtu.rand_default(self.rng())
a = rng(shape, dtype)
jtu.check_grads(jnp.linalg.slogdet, (a,), 2, atol=1e-1, rtol=2e-1)
def testIssue1213(self):
for n in range(5):
mat = jnp.array([np.diag(np.ones([5], dtype=np.float32))*(-.01)] * 2)
args_maker = lambda: [mat]
self._CheckAgainstNumpy(np.linalg.slogdet, jnp.linalg.slogdet, args_maker,
tol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_leftvectors={}_rightvectors={}".format(
jtu.format_shape_dtype_string(shape, dtype),
compute_left_eigenvectors, compute_right_eigenvectors),
"shape": shape, "dtype": dtype,
"compute_left_eigenvectors": compute_left_eigenvectors,
"compute_right_eigenvectors": compute_right_eigenvectors}
for shape in [(0, 0), (4, 4), (5, 5), (50, 50), (2, 6, 6)]
for dtype in float_types + complex_types
for compute_left_eigenvectors, compute_right_eigenvectors in [
(False, False),
(True, False),
(False, True),
(True, True)
]))
# TODO(phawkins): enable when there is an eigendecomposition implementation
# for GPU/TPU.
@jtu.skip_on_devices("gpu", "tpu")
def testEig(self, shape, dtype, compute_left_eigenvectors,
compute_right_eigenvectors):
rng = jtu.rand_default(self.rng())
n = shape[-1]
args_maker = lambda: [rng(shape, dtype)]
# Norm, adjusted for dimension and type.
def norm(x):
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / ((n + 1) * jnp.finfo(dtype).eps)
def check_right_eigenvectors(a, w, vr):
self.assertTrue(
np.all(norm(np.matmul(a, vr) - w[..., None, :] * vr) < 100))
def check_left_eigenvectors(a, w, vl):
rank = len(a.shape)
aH = jnp.conj(a.transpose(list(range(rank - 2)) + [rank - 1, rank - 2]))
wC = jnp.conj(w)
check_right_eigenvectors(aH, wC, vl)
a, = args_maker()
results = lax.linalg.eig(a, compute_left_eigenvectors,
compute_right_eigenvectors)
w = results[0]
if compute_left_eigenvectors:
check_left_eigenvectors(a, w, results[1])
if compute_right_eigenvectors:
check_right_eigenvectors(a, w, results[1 + compute_left_eigenvectors])
self._CompileAndCheck(partial(jnp.linalg.eig), args_maker,
rtol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(4, 4), (5, 5), (8, 8), (7, 6, 6)]
for dtype in float_types + complex_types))
# TODO(phawkins): enable when there is an eigendecomposition implementation
# for GPU/TPU.
@jtu.skip_on_devices("gpu", "tpu")
def testEigvalsGrad(self, shape, dtype):
# This test sometimes fails for large matrices. I (@j-towns) suspect, but
# haven't checked, that might be because of perturbations causing the
# ordering of eigenvalues to change, which will trip up check_grads. So we
# just test on small-ish matrices.
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
a, = args_maker()
tol = 1e-4 if dtype in (np.float64, np.complex128) else 1e-1
jtu.check_grads(lambda x: jnp.linalg.eigvals(x), (a,), order=1,
modes=['fwd', 'rev'], rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(4, 4), (5, 5), (50, 50)]
for dtype in float_types + complex_types))
# TODO: enable when there is an eigendecomposition implementation
# for GPU/TPU.
@jtu.skip_on_devices("gpu", "tpu")
def testEigvals(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
a, = args_maker()
w1, _ = jnp.linalg.eig(a)
w2 = jnp.linalg.eigvals(a)
self.assertAllClose(w1, w2, rtol={np.complex128: 1e-14})
@jtu.skip_on_devices("gpu", "tpu")
def testEigvalsInf(self):
# https://github.com/google/jax/issues/2661
x = jnp.array([[jnp.inf]])
self.assertTrue(jnp.all(jnp.isnan(jnp.linalg.eigvals(x))))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 4), (5, 5)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("gpu", "tpu")
def testEigBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
shape = (10,) + shape
args = rng(shape, dtype)
ws, vs = vmap(jnp.linalg.eig)(args)
self.assertTrue(np.all(np.linalg.norm(
np.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_n={}_lower={}".format(
jtu.format_shape_dtype_string((n,n), dtype), lower),
"n": n, "dtype": dtype, "lower": lower}
for n in [0, 4, 5, 50]
for dtype in float_types + complex_types
for lower in [True, False]))
def testEigh(self, n, dtype, lower):
rng = jtu.rand_default(self.rng())
tol = 1e-3
args_maker = lambda: [rng((n, n), dtype)]
uplo = "L" if lower else "U"
a, = args_maker()
a = (a + np.conj(a.T)) / 2
w, v = jnp.linalg.eigh(np.tril(a) if lower else np.triu(a),
UPLO=uplo, symmetrize_input=False)
self.assertLessEqual(
np.linalg.norm(np.eye(n) - np.matmul(np.conj(T(v)), v)), 1e-3)
with jax.numpy_rank_promotion('allow'):
self.assertLessEqual(np.linalg.norm(np.matmul(a, v) - w * v),
tol * np.linalg.norm(a))
self._CompileAndCheck(partial(jnp.linalg.eigh, UPLO=uplo), args_maker,
rtol=1e-3)
def testEighZeroDiagonal(self):
a = np.array([[0., -1., -1., 1.],
[-1., 0., 1., -1.],
[-1., 1., 0., -1.],
[1., -1., -1., 0.]], dtype=np.float32)
w, v = jnp.linalg.eigh(a)
with jax.numpy_rank_promotion('allow'):
self.assertLessEqual(np.linalg.norm(np.matmul(a, v) - w * v),
1e-3 * np.linalg.norm(a))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(4, 4), (5, 5), (50, 50)]
for dtype in float_types + complex_types))
def testEigvalsh(self, shape, dtype):
rng = jtu.rand_default(self.rng())
n = shape[-1]
def args_maker():
a = rng((n, n), dtype)
a = (a + np.conj(a.T)) / 2
return [a]
self._CheckAgainstNumpy(np.linalg.eigvalsh, jnp.linalg.eigvalsh, args_maker,
tol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_lower={}".format(jtu.format_shape_dtype_string(shape, dtype),
lower),
"shape": shape, "dtype": dtype, "lower":lower}
for shape in [(1, 1), (4, 4), (5, 5), (50, 50), (2, 10, 10)]
for dtype in float_types + complex_types
for lower in [True, False]))
def testEighGrad(self, shape, dtype, lower):
rng = jtu.rand_default(self.rng())
self.skipTest("Test fails with numeric errors.")
uplo = "L" if lower else "U"
a = rng(shape, dtype)
a = (a + np.conj(T(a))) / 2
ones = np.ones((a.shape[-1], a.shape[-1]), dtype=dtype)
a *= np.tril(ones) if lower else np.triu(ones)
# Gradient checks will fail without symmetrization as the eigh jvp rule
# is only correct for tangents in the symmetric subspace, whereas the
# checker checks against unconstrained (co)tangents.
if dtype not in complex_types:
f = partial(jnp.linalg.eigh, UPLO=uplo, symmetrize_input=True)
else: # only check eigenvalue grads for complex matrices
f = lambda a: partial(jnp.linalg.eigh, UPLO=uplo, symmetrize_input=True)(a)[0]
jtu.check_grads(f, (a,), 2, rtol=1e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_lower={}".format(jtu.format_shape_dtype_string(shape, dtype),
lower),
"shape": shape, "dtype": dtype, "lower":lower, "eps":eps}
for shape in [(1, 1), (4, 4), (5, 5), (50, 50)]
for dtype in complex_types
for lower in [True, False]
for eps in [1e-4]))
def testEighGradVectorComplex(self, shape, dtype, lower, eps):
rng = jtu.rand_default(self.rng())
# Special case to test for complex eigenvector grad correctness.
# Exact eigenvector coordinate gradients are hard to test numerically for complex
# eigensystem solvers given the extra degrees of per-eigenvector phase freedom.
# Instead, we numerically verify the eigensystem properties on the perturbed
# eigenvectors. You only ever want to optimize eigenvector directions, not coordinates!
uplo = "L" if lower else "U"
a = rng(shape, dtype)
a = (a + np.conj(a.T)) / 2
a = np.tril(a) if lower else np.triu(a)
a_dot = eps * rng(shape, dtype)
a_dot = (a_dot + np.conj(a_dot.T)) / 2
a_dot = np.tril(a_dot) if lower else np.triu(a_dot)
# evaluate eigenvector gradient and groundtruth eigensystem for perturbed input matrix
f = partial(jnp.linalg.eigh, UPLO=uplo)
(w, v), (dw, dv) = jvp(f, primals=(a,), tangents=(a_dot,))
self.assertTrue(jnp.issubdtype(w.dtype, jnp.floating))
self.assertTrue(jnp.issubdtype(dw.dtype, jnp.floating))
new_a = a + a_dot
new_w, new_v = f(new_a)
new_a = (new_a + np.conj(new_a.T)) / 2
# Assert rtol eigenvalue delta between perturbed eigenvectors vs new true eigenvalues.
RTOL = 1e-2
with jax.numpy_rank_promotion('allow'):
assert np.max(
np.abs((np.diag(np.dot(np.conj((v+dv).T), np.dot(new_a,(v+dv)))) - new_w) / new_w)) < RTOL
# Redundant to above, but also assert rtol for eigenvector property with new true eigenvalues.
assert np.max(
np.linalg.norm(np.abs(new_w*(v+dv) - np.dot(new_a, (v+dv))), axis=0) /
np.linalg.norm(np.abs(new_w*(v+dv)), axis=0)
) < RTOL
def testEighGradPrecision(self):
rng = jtu.rand_default(self.rng())
a = rng((3, 3), np.float32)
jtu.assert_dot_precision(
lax.Precision.HIGHEST, partial(jvp, jnp.linalg.eigh), (a,), (a,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 4), (5, 5)]
for dtype in float_types + complex_types))
def testEighBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
shape = (10,) + shape
args = rng(shape, dtype)
args = (args + np.conj(T(args))) / 2
ws, vs = vmap(jsp.linalg.eigh)(args)
self.assertTrue(np.all(np.linalg.norm(
np.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1,), (4,), (5,)]
for dtype in (np.int32,)))
def testLuPivotsToPermutation(self, shape, dtype):
pivots_size = shape[-1]
permutation_size = 2 * pivots_size
pivots = jnp.arange(permutation_size - 1, pivots_size - 1, -1, dtype=dtype)
pivots = jnp.broadcast_to(pivots, shape)
actual = lax.linalg.lu_pivots_to_permutation(pivots, permutation_size)
expected = jnp.arange(permutation_size - 1, -1, -1, dtype=dtype)
expected = jnp.broadcast_to(expected, actual.shape)
self.assertArraysEqual(actual, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1,), (4,), (5,)]
for dtype in (np.int32,)))
def testLuPivotsToPermutationBatching(self, shape, dtype):
shape = (10,) + shape
pivots_size = shape[-1]
permutation_size = 2 * pivots_size
pivots = jnp.arange(permutation_size - 1, pivots_size - 1, -1, dtype=dtype)
pivots = jnp.broadcast_to(pivots, shape)
batched_fn = vmap(
lambda x: lax.linalg.lu_pivots_to_permutation(x, permutation_size))
actual = batched_fn(pivots)
expected = jnp.arange(permutation_size - 1, -1, -1, dtype=dtype)
expected = jnp.broadcast_to(expected, actual.shape)
self.assertArraysEqual(actual, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_ord={}_axis={}_keepdims={}".format(
jtu.format_shape_dtype_string(shape, dtype), ord, axis, keepdims),
"shape": shape, "dtype": dtype, "axis": axis, "keepdims": keepdims,
"ord": ord}
for axis, shape in [
(None, (1,)), (None, (7,)), (None, (5, 8)),
(0, (9,)), (0, (4, 5)), ((1,), (10, 7, 3)), ((-2,), (4, 8)),
(-1, (6, 3)), ((0, 2), (3, 4, 5)), ((2, 0), (7, 8, 9)),
(None, (7, 8, 11))]
for keepdims in [False, True]
for ord in (
[None] if axis is None and len(shape) > 2
else [None, 0, 1, 2, 3, -1, -2, -3, jnp.inf, -jnp.inf]
if (axis is None and len(shape) == 1) or
isinstance(axis, int) or
(isinstance(axis, tuple) and len(axis) == 1)
else [None, 'fro', 1, 2, -1, -2, jnp.inf, -jnp.inf, 'nuc'])
for dtype in float_types + complex_types)) # type: ignore
def testNorm(self, shape, dtype, ord, axis, keepdims):
rng = jtu.rand_default(self.rng())
if (ord in ('nuc', 2, -2) and (
jtu.device_under_test() != "cpu" or
(isinstance(axis, tuple) and len(axis) == 2))):
raise unittest.SkipTest("No adequate SVD implementation available")
args_maker = lambda: [rng(shape, dtype)]
np_fn = partial(np.linalg.norm, ord=ord, axis=axis, keepdims=keepdims)
jnp_fn = partial(jnp.linalg.norm, ord=ord, axis=axis, keepdims=keepdims)
self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(jnp_fn, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_n={}_full_matrices={}_compute_uv={}_hermitian={}".format(
jtu.format_shape_dtype_string(b + (m, n), dtype), full_matrices,
compute_uv, hermitian),
"b": b, "m": m, "n": n, "dtype": dtype, "full_matrices": full_matrices,
"compute_uv": compute_uv, "hermitian": hermitian}
for b in [(), (3,), (2, 3)]
for m in [0, 2, 7, 29, 53]
for n in [0, 2, 7, 29, 53]
for dtype in float_types + complex_types
for full_matrices in [False, True]
for compute_uv in [False, True]
for hermitian in ([False, True] if m == n else [False])))
@jtu.skip_on_devices("rocm") # will be fixed in ROCm-5.1
def testSVD(self, b, m, n, dtype, full_matrices, compute_uv, hermitian):
if (jnp.issubdtype(dtype, np.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest("No complex SVD implementation")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(b + (m, n), dtype)]
# Norm, adjusted for dimension and type.
def norm(x):
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / (max(1, m, n) * jnp.finfo(dtype).eps)
a, = args_maker()
if hermitian:
a = a + np.conj(T(a))
out = jnp.linalg.svd(a, full_matrices=full_matrices, compute_uv=compute_uv,
hermitian=hermitian)
if compute_uv:
# Check the reconstructed matrices
if full_matrices:
k = min(m, n)
if m < n:
self.assertTrue(np.all(
norm(a - np.matmul(out[1][..., None, :] * out[0], out[2][..., :k, :])) < 50))
else:
self.assertTrue(np.all(
norm(a - np.matmul(out[1][..., None, :] * out[0][..., :, :k], out[2])) < 350))
else:
self.assertTrue(np.all(
norm(a - np.matmul(out[1][..., None, :] * out[0], out[2])) < 350))
# Check the unitary properties of the singular vector matrices.
self.assertTrue(np.all(norm(np.eye(out[0].shape[-1]) - np.matmul(np.conj(T(out[0])), out[0])) < 15))
if m >= n:
self.assertTrue(np.all(norm(np.eye(out[2].shape[-1]) - np.matmul(np.conj(T(out[2])), out[2])) < 10))
else:
self.assertTrue(np.all(norm(np.eye(out[2].shape[-2]) - np.matmul(out[2], np.conj(T(out[2])))) < 20))
else:
self.assertTrue(np.allclose(np.linalg.svd(a, compute_uv=False), np.asarray(out), atol=1e-4, rtol=1e-4))
self._CompileAndCheck(partial(jnp.linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv),
args_maker)
if not compute_uv:
svd = partial(jnp.linalg.svd, full_matrices=full_matrices,
compute_uv=compute_uv)
# TODO(phawkins): these tolerances seem very loose.
if dtype == np.complex128:
jtu.check_jvp(svd, partial(jvp, svd), (a,), rtol=1e-4, atol=1e-4, eps=1e-8)
else:
jtu.check_jvp(svd, partial(jvp, svd), (a,), rtol=5e-2, atol=2e-1)
if jtu.device_under_test() == "tpu":
raise unittest.SkipTest("TPU matmul does not have enough precision")
# TODO(frederikwilde): Find the appropriate precision to use for this test on TPUs.
if compute_uv and (not full_matrices):
b, = args_maker()
def f(x):
u, s, v = jnp.linalg.svd(
a + x * b,
full_matrices=full_matrices,
compute_uv=compute_uv)
vdiag = jnp.vectorize(jnp.diag, signature='(k)->(k,k)')
return jnp.matmul(jnp.matmul(u, vdiag(s)), v).real
_, t_out = jvp(f, (1.,), (1.,))
if dtype == np.complex128:
atol = 1e-13
else:
atol = 5e-4
self.assertArraysAllClose(t_out, b.real, atol=atol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_fullmatrices={}".format(
jtu.format_shape_dtype_string(shape, dtype), full_matrices),
"shape": shape, "dtype": dtype, "full_matrices": full_matrices}
for shape in [(1, 1), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)]
for dtype in float_types + complex_types
for full_matrices in [False, True]))
def testQr(self, shape, dtype, full_matrices):
rng = jtu.rand_default(self.rng())
m, n = shape[-2:]
if full_matrices:
mode, k = "complete", m
else:
mode, k = "reduced", min(m, n)
a = rng(shape, dtype)
lq, lr = jnp.linalg.qr(a, mode=mode)
# np.linalg.qr doesn't support batch dimensions. But it seems like an
# inevitable extension so we support it in our version.
nq = np.zeros(shape[:-2] + (m, k), dtype)
nr = np.zeros(shape[:-2] + (k, n), dtype)
for index in np.ndindex(*shape[:-2]):
nq[index], nr[index] = np.linalg.qr(a[index], mode=mode)
max_rank = max(m, n)
# Norm, adjusted for dimension and type.
def norm(x):
n = np.linalg.norm(x, axis=(-2, -1))
return n / (max_rank * jnp.finfo(dtype).eps)
def compare_orthogonal(q1, q2):
# Q is unique up to sign, so normalize the sign first.
sum_of_ratios = np.sum(np.divide(q1, q2), axis=-2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
q1 *= phases
self.assertTrue(np.all(norm(q1 - q2) < 30))
# Check a ~= qr
self.assertTrue(np.all(norm(a - np.matmul(lq, lr)) < 30))
# Compare the first 'k' vectors of Q; the remainder form an arbitrary
# orthonormal basis for the null space.
compare_orthogonal(nq[..., :k], lq[..., :k])
# Check that q is close to unitary.
self.assertTrue(np.all(
norm(np.eye(k) - np.matmul(np.conj(T(lq)), lq)) < 5))
if not full_matrices and m >= n:
jtu.check_jvp(jnp.linalg.qr, partial(jvp, jnp.linalg.qr), (a,), atol=3e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(10, 4, 5), (5, 3, 3), (7, 6, 4)]
for dtype in float_types + complex_types))
def testQrBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args = rng(shape, jnp.float32)
qs, rs = vmap(jsp.linalg.qr)(args)
self.assertTrue(np.all(np.linalg.norm(args - np.matmul(qs, rs)) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_pnorm={}".format(jtu.format_shape_dtype_string(shape, dtype), pnorm),
"shape": shape, "pnorm": pnorm, "dtype": dtype}
for shape in [(1, 1), (4, 4), (2, 3, 5), (5, 5, 5), (20, 20), (5, 10)]
for pnorm in [jnp.inf, -jnp.inf, 1, -1, 2, -2, 'fro']
for dtype in float_types + complex_types))
@jtu.skip_on_devices("gpu") # TODO(#2203): numerical errors
def testCond(self, shape, pnorm, dtype):
if (jnp.issubdtype(dtype, np.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest("No complex SVD implementation")
def gen_mat():
# arr_gen = jtu.rand_some_nan(self.rng())
arr_gen = jtu.rand_default(self.rng())
res = arr_gen(shape, dtype)
return res
def args_gen(p):
def _args_gen():
return [gen_mat(), p]
return _args_gen
args_maker = args_gen(pnorm)
if pnorm not in [2, -2] and len(set(shape[-2:])) != 1:
with self.assertRaises(np.linalg.LinAlgError):
jnp.linalg.cond(*args_maker())
else:
self._CheckAgainstNumpy(np.linalg.cond, jnp.linalg.cond, args_maker,
check_dtypes=False, tol=1e-3)
partial_norm = partial(jnp.linalg.cond, p=pnorm)
self._CompileAndCheck(partial_norm, lambda: [gen_mat()],
check_dtypes=False, rtol=1e-03, atol=1e-03)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 4), (200, 200), (7, 7, 7, 7)]
for dtype in float_types))
def testTensorinv(self, shape, dtype):
rng = jtu.rand_default(self.rng())
def tensor_maker():
invertible = False
while not invertible:
a = rng(shape, dtype)
try:
np.linalg.inv(a)
invertible = True
except np.linalg.LinAlgError:
pass
return a
args_maker = lambda: [tensor_maker(), int(np.floor(len(shape) / 2))]
self._CheckAgainstNumpy(np.linalg.tensorinv, jnp.linalg.tensorinv, args_maker,
check_dtypes=False, tol=1e-3)
partial_inv = partial(jnp.linalg.tensorinv, ind=int(np.floor(len(shape) / 2)))
self._CompileAndCheck(partial_inv, lambda: [tensor_maker()], check_dtypes=False, rtol=1e-03, atol=1e-03)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4)),
((1, 2, 2), (3, 2)),
((2, 1, 3, 3), (1, 4, 3, 4)),
((1, 0, 0), (1, 0, 2)),
]
for dtype in float_types + complex_types))
def testSolve(self, lhs_shape, rhs_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(np.linalg.solve, jnp.linalg.solve, args_maker,
tol=1e-3)
self._CompileAndCheck(jnp.linalg.solve, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (5, 5, 5), (0, 0)]
for dtype in float_types))
def testInv(self, shape, dtype):
rng = jtu.rand_default(self.rng())
if jtu.device_under_test() == "gpu" and shape == (200, 200):
raise unittest.SkipTest("Test is flaky on GPU")
def args_maker():
invertible = False
while not invertible:
a = rng(shape, dtype)
try:
np.linalg.inv(a)
invertible = True
except np.linalg.LinAlgError:
pass
return [a]
self._CheckAgainstNumpy(np.linalg.inv, jnp.linalg.inv, args_maker,
tol=1e-3)
self._CompileAndCheck(jnp.linalg.inv, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 4), (2, 70, 7), (2000, 7), (7, 1000), (70, 7, 2),
(2, 0, 0), (3, 0, 2), (1, 0)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("rocm") # will be fixed in ROCm-5.1
def testPinv(self, shape, dtype):
if (jnp.issubdtype(dtype, np.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest("No complex SVD implementation")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.linalg.pinv, jnp.linalg.pinv, args_maker,
tol=1e-2)
self._CompileAndCheck(jnp.linalg.pinv, args_maker)
if jtu.device_under_test() != "tpu":
# TODO(phawkins): 1e-1 seems like a very loose tolerance.
jtu.check_grads(jnp.linalg.pinv, args_maker(), 2, rtol=1e-1, atol=2e-1)
@jtu.skip_on_devices("rocm") # will be fixed in ROCm-5.1
def testPinvGradIssue2792(self):
def f(p):
a = jnp.array([[0., 0.],[-p, 1.]], jnp.float32) * 1 / (1 + p**2)
return jnp.linalg.pinv(a)
j = jax.jacobian(f)(jnp.float32(2.))
self.assertAllClose(jnp.array([[0., -1.], [ 0., 0.]], jnp.float32), j)
expected = jnp.array([[[[-1., 0.], [ 0., 0.]], [[0., -1.], [0., 0.]]],
[[[0., 0.], [-1., 0.]], [[0., 0.], [0., -1.]]]],
dtype=jnp.float32)
self.assertAllClose(
expected, jax.jacobian(jnp.linalg.pinv)(jnp.eye(2, dtype=jnp.float32)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(
jtu.format_shape_dtype_string(shape, dtype), n),
"shape": shape, "dtype": dtype, "n": n}
for shape in [(1, 1), (2, 2), (4, 4), (5, 5),
(1, 2, 2), (2, 3, 3), (2, 5, 5)]
for dtype in float_types + complex_types
for n in [-5, -2, -1, 0, 1, 2, 3, 4, 5, 10]))
@jtu.skip_on_devices("tpu") # TODO(b/149870255): Bug in XLA:TPU?.
def testMatrixPower(self, shape, dtype, n):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
tol = 1e-1 if jtu.device_under_test() == "tpu" else 1e-3
self._CheckAgainstNumpy(partial(np.linalg.matrix_power, n=n),
partial(jnp.linalg.matrix_power, n=n),
args_maker, tol=tol)
self._CompileAndCheck(partial(jnp.linalg.matrix_power, n=n), args_maker,
rtol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, ), (1, 2), (8, 5), (4, 4), (5, 5), (50, 50)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("rocm") # will be fixed in ROCm-5.1
def testMatrixRank(self, shape, dtype):
if (jnp.issubdtype(dtype, np.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest("No complex SVD implementation")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
a, = args_maker()
self._CheckAgainstNumpy(np.linalg.matrix_rank, jnp.linalg.matrix_rank,
args_maker, check_dtypes=False, tol=1e-3)
self._CompileAndCheck(jnp.linalg.matrix_rank, args_maker,
check_dtypes=False, rtol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shapes={}".format(
','.join(jtu.format_shape_dtype_string(s, dtype) for s in shapes)),
"shapes": shapes, "dtype": dtype}
for shapes in [
[(3, ), (3, 1)], # quick-out codepath
[(1, 3), (3, 5), (5, 2)], # multi_dot_three codepath
[(1, 3), (3, 5), (5, 2), (2, 7), (7, )] # dynamic programming codepath
]
for dtype in float_types + complex_types))
def testMultiDot(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [[rng(shape, dtype) for shape in shapes]]
np_fun = np.linalg.multi_dot
jnp_fun = partial(jnp.linalg.multi_dot, precision=lax.Precision.HIGHEST)
tol = {np.float32: 1e-4, np.float64: 1e-10,
np.complex64: 1e-4, np.complex128: 1e-10}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}__rcond={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
rcond),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, "rcond": rcond}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 6), (4,)),
((6, 6), (6, 1)),
((8, 6), (8, 4)),
]
for rcond in [-1, None, 0.5]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("tpu","rocm") # SVD not implemented on TPU. will be fixed in ROCm-5.1
def testLstsq(self, lhs_shape, rhs_shape, dtype, rcond):
rng = jtu.rand_default(self.rng())
np_fun = partial(np.linalg.lstsq, rcond=rcond)
jnp_fun = partial(jnp.linalg.lstsq, rcond=rcond)
jnp_fun_numpy_resid = partial(jnp.linalg.lstsq, rcond=rcond, numpy_resid=True)
tol = {np.float32: 1e-5, np.float64: 1e-12,
np.complex64: 1e-5, np.complex128: 1e-12}
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun_numpy_resid, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
# Disabled because grad is flaky for low-rank inputs.
# TODO:
# jtu.check_grads(lambda *args: jnp_fun(*args)[0], args_maker(), order=2, atol=1e-2, rtol=1e-2)
# Regression test for incorrect type for eigenvalues of a complex matrix.
def testIssue669(self):
def test(x):
val, vec = jnp.linalg.eigh(x)
return jnp.real(jnp.sum(val))
grad_test_jc = jit(grad(jit(test)))
xc = np.eye(3, dtype=np.complex64)
self.assertAllClose(xc, grad_test_jc(xc))
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testIssue1151(self):
rng = self.rng()
A = jnp.array(rng.randn(100, 3, 3), dtype=jnp.float32)
b = jnp.array(rng.randn(100, 3), dtype=jnp.float32)
x = jnp.linalg.solve(A, b)
self.assertAllClose(vmap(jnp.dot)(A, x), b, atol=2e-3, rtol=1e-2)
_ = jax.jacobian(jnp.linalg.solve, argnums=0)(A, b)
_ = jax.jacobian(jnp.linalg.solve, argnums=1)(A, b)
_ = jax.jacobian(jnp.linalg.solve, argnums=0)(A[0], b[0])
_ = jax.jacobian(jnp.linalg.solve, argnums=1)(A[0], b[0])
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testIssue1383(self):
seed = jax.random.PRNGKey(0)
tmp = jax.random.uniform(seed, (2,2))
a = jnp.dot(tmp, tmp.T)
def f(inp):
val, vec = jnp.linalg.eigh(inp)
return jnp.dot(jnp.dot(vec, inp), vec.T)
grad_func = jax.jacfwd(f)
hess_func = jax.jacfwd(grad_func)
cube_func = jax.jacfwd(hess_func)
self.assertFalse(np.any(np.isnan(cube_func(a))))
class ScipyLinalgTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_i={}".format(i), "args": args}
for i, args in enumerate([
(),
(1,),
(7, -2),
(3, 4, 5),
(np.ones((3, 4), dtype=jnp.float_), 5,
np.random.randn(5, 2).astype(jnp.float_)),
])))
def testBlockDiag(self, args):
args_maker = lambda: args
self._CheckAgainstNumpy(osp.linalg.block_diag, jsp.linalg.block_diag,
args_maker)
self._CompileAndCheck(jsp.linalg.block_diag, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 5), (10, 5), (50, 50)]
for dtype in float_types + complex_types))
def testLu(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
x, = args_maker()
p, l, u = jsp.linalg.lu(x)
self.assertAllClose(x, np.matmul(p, np.matmul(l, u)),
rtol={np.float32: 1e-3, np.float64: 1e-12,
np.complex64: 1e-3, np.complex128: 1e-12})
self._CompileAndCheck(jsp.linalg.lu, args_maker)
def testLuOfSingularMatrix(self):
x = jnp.array([[-1., 3./2], [2./3, -1.]], dtype=np.float32)
p, l, u = jsp.linalg.lu(x)
self.assertAllClose(x, np.matmul(p, np.matmul(l, u)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(1, 1), (4, 5), (10, 5), (10, 10), (6, 7, 7)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("tpu") # TODO(phawkins): precision problems on TPU.
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testLuGrad(self, shape, dtype):
rng = jtu.rand_default(self.rng())
a = rng(shape, dtype)
lu = vmap(jsp.linalg.lu) if len(shape) > 2 else jsp.linalg.lu
jtu.check_grads(lu, (a,), 2, atol=5e-2, rtol=3e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(4, 5), (6, 5)]
for dtype in [jnp.float32]))
def testLuBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args = [rng(shape, jnp.float32) for _ in range(10)]
expected = list(osp.linalg.lu(x) for x in args)
ps = np.stack([out[0] for out in expected])
ls = np.stack([out[1] for out in expected])
us = np.stack([out[2] for out in expected])
actual_ps, actual_ls, actual_us = vmap(jsp.linalg.lu)(jnp.stack(args))
self.assertAllClose(ps, actual_ps)
self.assertAllClose(ls, actual_ls, rtol=5e-6)
self.assertAllClose(us, actual_us)
@jtu.skip_on_devices("cpu", "tpu")
def testLuCPUBackendOnGPU(self):
# tests running `lu` on cpu when a gpu is present.
jit(jsp.linalg.lu, backend="cpu")(np.ones((2, 2))) # does not crash
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype}
for n in [1, 4, 5, 200]
for dtype in float_types + complex_types))
def testLuFactor(self, n, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((n, n), dtype)]
x, = args_maker()
lu, piv = jsp.linalg.lu_factor(x)
l = np.tril(lu, -1) + np.eye(n, dtype=dtype)
u = np.triu(lu)
for i in range(n):
x[[i, piv[i]],] = x[[piv[i], i],]
self.assertAllClose(x, np.matmul(l, u), rtol=1e-3,
atol=1e-3)
self._CompileAndCheck(jsp.linalg.lu_factor, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_trans={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
trans),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"trans": trans}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4)),
]
for trans in [0, 1, 2]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("cpu") # TODO(frostig): Test fails on CPU sometimes
def testLuSolve(self, lhs_shape, rhs_shape, dtype, trans):
rng = jtu.rand_default(self.rng())
osp_fun = lambda lu, piv, rhs: osp.linalg.lu_solve((lu, piv), rhs, trans=trans)
jsp_fun = lambda lu, piv, rhs: jsp.linalg.lu_solve((lu, piv), rhs, trans=trans)
def args_maker():
a = rng(lhs_shape, dtype)
lu, piv = osp.linalg.lu_factor(a)
return [lu, piv, rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker, tol=1e-3)
self._CompileAndCheck(jsp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_sym_pos={}_lower={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
sym_pos, lower),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"sym_pos": sym_pos, "lower": lower}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4)),
]
for sym_pos, lower in [
(False, False),
(True, False),
(True, True),
]
for dtype in float_types + complex_types))
def testSolve(self, lhs_shape, rhs_shape, dtype, sym_pos, lower):
rng = jtu.rand_default(self.rng())
osp_fun = lambda lhs, rhs: osp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower)
jsp_fun = lambda lhs, rhs: jsp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower)
def args_maker():
a = rng(lhs_shape, dtype)
if sym_pos:
a = np.matmul(a, np.conj(T(a)))
a = np.tril(a) if lower else np.triu(a)
return [a, rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker, tol=1e-3)
self._CompileAndCheck(jsp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_lower={}_transposea={}_unit_diagonal={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
lower, transpose_a, unit_diagonal),
"lower": lower, "transpose_a": transpose_a,
"unit_diagonal": unit_diagonal, "lhs_shape": lhs_shape,
"rhs_shape": rhs_shape, "dtype": dtype}
for lower in [False, True]
for transpose_a in [False, True]
for unit_diagonal in [False, True]
for lhs_shape, rhs_shape in [
((4, 4), (4,)),
((4, 4), (4, 3)),
((2, 8, 8), (2, 8, 10)),
]
for dtype in float_types))
def testSolveTriangular(self, lower, transpose_a, unit_diagonal, lhs_shape,
rhs_shape, dtype):
rng = jtu.rand_default(self.rng())
k = rng(lhs_shape, dtype)
l = np.linalg.cholesky(np.matmul(k, T(k))
+ lhs_shape[-1] * np.eye(lhs_shape[-1]))
l = l.astype(k.dtype)
b = rng(rhs_shape, dtype)
if unit_diagonal:
a = np.tril(l, -1) + np.eye(lhs_shape[-1], dtype=dtype)
else:
a = l
a = a if lower else T(a)
inv = np.linalg.inv(T(a) if transpose_a else a).astype(a.dtype)
if len(lhs_shape) == len(rhs_shape):
np_ans = np.matmul(inv, b)
else:
np_ans = np.einsum("...ij,...j->...i", inv, b)
# The standard scipy.linalg.solve_triangular doesn't support broadcasting.
# But it seems like an inevitable extension so we support it.
ans = jsp.linalg.solve_triangular(
l if lower else T(l), b, trans=1 if transpose_a else 0, lower=lower,
unit_diagonal=unit_diagonal)
self.assertAllClose(np_ans, ans,
rtol={np.float32: 1e-4, np.float64: 1e-11})
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_A={}_B={}_lower={}_transposea={}_conja={}_unitdiag={}_leftside={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype),
lower, transpose_a, conjugate_a, unit_diagonal, left_side),
"lower": lower, "transpose_a": transpose_a, "conjugate_a": conjugate_a,
"unit_diagonal": unit_diagonal, "left_side": left_side,
"a_shape": a_shape, "b_shape": b_shape, "dtype": dtype}
for lower in [False, True]
for unit_diagonal in [False, True]
for dtype in float_types + complex_types
for transpose_a in [False, True]
for conjugate_a in (
[False] if jnp.issubdtype(dtype, jnp.floating) else [False, True])
for left_side, a_shape, b_shape in [
(False, (4, 4), (4,)),
(False, (4, 4), (1, 4,)),
(False, (3, 3), (4, 3)),
(True, (4, 4), (4,)),
(True, (4, 4), (4, 1)),
(True, (4, 4), (4, 3)),
(True, (2, 8, 8), (2, 8, 10)),
]))
def testTriangularSolveGrad(
self, lower, transpose_a, conjugate_a, unit_diagonal, left_side, a_shape,
b_shape, dtype):
rng = jtu.rand_default(self.rng())
# Test lax.linalg.triangular_solve instead of scipy.linalg.solve_triangular
# because it exposes more options.
A = jnp.tril(rng(a_shape, dtype) + 5 * np.eye(a_shape[-1], dtype=dtype))
A = A if lower else T(A)
B = rng(b_shape, dtype)
f = partial(lax.linalg.triangular_solve, lower=lower, transpose_a=transpose_a,
conjugate_a=conjugate_a, unit_diagonal=unit_diagonal,
left_side=left_side)
jtu.check_grads(f, (A, B), 2, rtol=4e-2, eps=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_A={}_B={}_bdim={}_leftside={}".format(
a_shape, b_shape, bdims, left_side),
"left_side": left_side, "a_shape": a_shape, "b_shape": b_shape,
"bdims": bdims}
for left_side, a_shape, b_shape, bdims in [
(False, (4, 4), (2, 3, 4,), (None, 0)),
(False, (2, 4, 4), (2, 2, 3, 4,), (None, 0)),
(False, (2, 4, 4), (3, 4,), (0, None)),
(False, (2, 4, 4), (2, 3, 4,), (0, 0)),
(True, (2, 4, 4), (2, 4, 3), (0, 0)),
(True, (2, 4, 4), (2, 2, 4, 3), (None, 0)),
]))
def testTriangularSolveBatching(self, left_side, a_shape, b_shape, bdims):
rng = jtu.rand_default(self.rng())
A = jnp.tril(rng(a_shape, np.float32)
+ 5 * np.eye(a_shape[-1], dtype=np.float32))
B = rng(b_shape, np.float32)
solve = partial(lax.linalg.triangular_solve, lower=True, transpose_a=False,
conjugate_a=False, unit_diagonal=False, left_side=left_side)
X = vmap(solve, bdims)(A, B)
matmul = partial(jnp.matmul, precision=lax.Precision.HIGHEST)
Y = matmul(A, X) if left_side else matmul(X, A)
self.assertArraysAllClose(Y, jnp.broadcast_to(B, Y.shape), atol=1e-4)
def testTriangularSolveGradPrecision(self):
rng = jtu.rand_default(self.rng())
a = jnp.tril(rng((3, 3), np.float32))
b = rng((1, 3), np.float32)
jtu.assert_dot_precision(
lax.Precision.HIGHEST,
partial(jvp, lax.linalg.triangular_solve),
(a, b),
(a, b))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype}
for n in [1, 4, 5, 20, 50, 100]
for dtype in float_types + complex_types))
def testExpm(self, n, dtype):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng((n, n), dtype)]
osp_fun = lambda a: osp.linalg.expm(a)
jsp_fun = lambda a: jsp.linalg.expm(a)
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker)
self._CompileAndCheck(jsp_fun, args_maker)
args_maker_triu = lambda: [np.triu(rng((n, n), dtype))]
jsp_fun_triu = lambda a: jsp.linalg.expm(a, upper_triangular=True)
self._CheckAgainstNumpy(osp_fun, jsp_fun_triu, args_maker_triu)
self._CompileAndCheck(jsp_fun_triu, args_maker_triu)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype}
for n in [1, 4, 5, 20, 50, 100]
for dtype in float_types + complex_types
))
def testIssue2131(self, n, dtype):
args_maker_zeros = lambda: [np.zeros((n, n), dtype)]
osp_fun = lambda a: osp.linalg.expm(a)
jsp_fun = lambda a: jsp.linalg.expm(a)
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker_zeros)
self._CompileAndCheck(jsp_fun, args_maker_zeros)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs={}_rhs={}_lower={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
lower),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"lower": lower}
for lhs_shape, rhs_shape in [
[(1, 1), (1,)],
[(4, 4), (4,)],
[(4, 4), (4, 4)],
]
for dtype in float_types
for lower in [True, False]))
def testChoSolve(self, lhs_shape, rhs_shape, dtype, lower):
rng = jtu.rand_default(self.rng())
def args_maker():
b = rng(rhs_shape, dtype)
if lower:
L = np.tril(rng(lhs_shape, dtype))
return [(L, lower), b]
else:
U = np.triu(rng(lhs_shape, dtype))
return [(U, lower), b]
self._CheckAgainstNumpy(osp.linalg.cho_solve, jsp.linalg.cho_solve,
args_maker, tol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype}
for n in [1, 4, 5, 20, 50, 100]
for dtype in float_types + complex_types))
def testExpmFrechet(self, n, dtype):
rng = jtu.rand_small(self.rng())
if dtype == np.float64 or dtype == np.complex128:
target_norms = [1.0e-2, 2.0e-1, 9.0e-01, 2.0, 3.0]
# TODO(zhangqiaorjc): Reduce tol to default 1e-15.
tol = {
np.dtype(np.float64): 1e-14,
np.dtype(np.complex128): 1e-14,
}
elif dtype == np.float32 or dtype == np.complex64:
target_norms = [4.0e-1, 1.0, 3.0]
tol = None
else:
raise TypeError("dtype={} is not supported.".format(dtype))
for norm in target_norms:
def args_maker():
a = rng((n, n), dtype)
a = a / np.linalg.norm(a, 1) * norm
e = rng((n, n), dtype)
return [a, e, ]
#compute_expm is True
osp_fun = lambda a,e: osp.linalg.expm_frechet(a,e,compute_expm=True)
jsp_fun = lambda a,e: jsp.linalg.expm_frechet(a,e,compute_expm=True)
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker,
check_dtypes=False, tol=tol)
self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=False)
#compute_expm is False
osp_fun = lambda a,e: osp.linalg.expm_frechet(a,e,compute_expm=False)
jsp_fun = lambda a,e: jsp.linalg.expm_frechet(a,e,compute_expm=False)
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker,
check_dtypes=False, tol=tol)
self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype}
for n in [1, 4, 5, 20, 50]
for dtype in float_types + complex_types))
def testExpmGrad(self, n, dtype):
rng = jtu.rand_small(self.rng())
a = rng((n, n), dtype)
if dtype == np.float64 or dtype == np.complex128:
target_norms = [1.0e-2, 2.0e-1, 9.0e-01, 2.0, 3.0]
elif dtype == np.float32 or dtype == np.complex64:
target_norms = [4.0e-1, 1.0, 3.0]
else:
raise TypeError("dtype={} is not supported.".format(dtype))
# TODO(zhangqiaorjc): Reduce tol to default 1e-5.
# Lower tolerance is due to 2nd order derivative.
tol = {
# Note that due to inner_product, float and complex tol are coupled.
np.dtype(np.float32): 0.02,
np.dtype(np.complex64): 0.02,
np.dtype(np.float64): 1e-4,
np.dtype(np.complex128): 1e-4,
}
for norm in target_norms:
a = a / np.linalg.norm(a, 1) * norm
def expm(x):
return jsp.linalg.expm(x, upper_triangular=False, max_squarings=16)
jtu.check_grads(expm, (a,), modes=["fwd", "rev"], order=1, atol=tol,
rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list({
"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype
} for shape in [(4, 4), (15, 15), (50, 50), (100, 100)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("gpu", "tpu")
def testSchur(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(osp.linalg.schur, jsp.linalg.schur, args_maker)
self._CompileAndCheck(jsp.linalg.schur, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list({
"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape" : shape, "dtype" : dtype
} for shape in [(4, 4), (15, 15), (50, 50), (100, 100)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("gpu", "tpu")
def testSqrtmPSDMatrix(self, shape, dtype):
# Checks against scipy.linalg.sqrtm when the principal square root
# is guaranteed to be unique (i.e no negative real eigenvalue)
rng = jtu.rand_default(self.rng())
arg = rng(shape, dtype)
mat = arg @ arg.T
args_maker = lambda : [mat]
if dtype == np.float32 or dtype == np.complex64:
tol = 1e-4
else:
tol = 1e-8
self._CheckAgainstNumpy(osp.linalg.sqrtm,
jsp.linalg.sqrtm,
args_maker,
tol=tol,
check_dtypes=False)
self._CompileAndCheck(jsp.linalg.sqrtm, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list({
"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape" : shape, "dtype" : dtype
} for shape in [(4, 4), (15, 15), (50, 50), (100, 100)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("gpu", "tpu")
def testSqrtmGenMatrix(self, shape, dtype):
rng = jtu.rand_default(self.rng())
arg = rng(shape, dtype)
if dtype == np.float32 or dtype == np.complex64:
tol = 1e-3
else:
tol = 1e-8
R = jsp.linalg.sqrtm(arg)
self.assertAllClose(R @ R, arg, atol=tol, check_dtypes=False)
@parameterized.named_parameters(
jtu.cases_from_list({
"testcase_name":
"_diag={}".format((diag, dtype)),
"diag" : diag, "expected": expected, "dtype" : dtype
} for diag, expected in [([1, 0, 0], [1, 0, 0]), ([0, 4, 0], [0, 2, 0]),
([0, 0, 0, 9],[0, 0, 0, 3]),
([0, 0, 9, 0, 0, 4], [0, 0, 3, 0, 0, 2])]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("gpu", "tpu")
def testSqrtmEdgeCase(self, diag, expected, dtype):
"""
Tests the zero numerator condition
"""
mat = jnp.diag(jnp.array(diag)).astype(dtype)
expected = jnp.diag(jnp.array(expected))
root = jsp.linalg.sqrtm(mat)
self.assertAllClose(root, expected, check_dtypes=False)
class LaxLinalgTest(jtu.JaxTestCase):
def run_test(self, alpha, beta):
n = alpha.shape[-1]
# scipy.linalg.eigh_tridiagonal doesn't support complex inputs, so for
# this we call the slower numpy.linalg.eigh.
if np.issubdtype(alpha.dtype, np.complexfloating):
tridiagonal = np.diag(alpha) + np.diag(beta, 1) + np.diag(
np.conj(beta), -1)
eigvals_expected, _ = np.linalg.eigh(tridiagonal)
else:
eigvals_expected = scipy.linalg.eigh_tridiagonal(
alpha, beta, eigvals_only=True)
eigvals = jax.scipy.linalg.eigh_tridiagonal(
alpha, beta, eigvals_only=True)
finfo = np.finfo(alpha.dtype)
atol = 4 * np.sqrt(n) * finfo.eps * np.amax(np.abs(eigvals_expected))
self.assertAllClose(eigvals_expected, eigvals, atol=atol, rtol=1e-4)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": f"_n={n}_dtype={dtype.__name__}",
"n": n, "dtype": dtype}
for n in [1, 2, 3, 7, 8, 100]
for dtype in float_types + complex_types))
def testToeplitz(self, n, dtype):
for a, b in [[2, -1], [1, 0], [0, 1], [-1e10, 1e10], [-1e-10, 1e-10]]:
alpha = a * np.ones([n], dtype=dtype)
beta = b * np.ones([n - 1], dtype=dtype)
self.run_test(alpha, beta)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": f"_n={n}_dtype={dtype.__name__}",
"n": n, "dtype": dtype}
for n in [1, 2, 3, 7, 8, 100]
for dtype in float_types + complex_types))
def testRandomUniform(self, n, dtype):
alpha = jtu.rand_uniform(self.rng())((n,), dtype)
beta = jtu.rand_uniform(self.rng())((n - 1,), dtype)
self.run_test(alpha, beta)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": f"_dtype={dtype.__name__}",
"dtype": dtype}
for dtype in float_types + complex_types))
def testSelect(self, dtype):
n = 5
alpha = jtu.rand_uniform(self.rng())((n,), dtype)
beta = jtu.rand_uniform(self.rng())((n - 1,), dtype)
eigvals_all = jax.scipy.linalg.eigh_tridiagonal(alpha, beta, select="a",
eigvals_only=True)
eps = np.finfo(alpha.dtype).eps
atol = 2 * n * eps
for first in range(n - 1):
for last in range(first + 1, n - 1):
# Check that we get the expected eigenvalues by selecting by
# index range.
eigvals_index = jax.scipy.linalg.eigh_tridiagonal(
alpha, beta, select="i", select_range=(first, last),
eigvals_only=True)
self.assertAllClose(
eigvals_all[first:(last + 1)], eigvals_index, atol=atol)
@parameterized.parameters(np.float32, np.float64)
@jtu.skip_on_devices("rocm") # will be fixed in ROCm-5.1
def test_tridiagonal_solve(self, dtype):
dl = np.array([0.0, 2.0, 3.0], dtype=dtype)
d = np.ones(3, dtype=dtype)
du = np.array([1.0, 2.0, 0.0], dtype=dtype)
m = 3
B = np.ones([m, 1], dtype=dtype)
X = lax.linalg.tridiagonal_solve(dl, d, du, B)
A = np.eye(3, dtype=dtype)
A[[1, 2], [0, 1]] = dl[1:]
A[[0, 1], [1, 2]] = du[:-1]
np.testing.assert_allclose(A @ X, B, rtol=1e-6, atol=1e-6)
@parameterized.named_parameters(
jtu.cases_from_list({
"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype
} for shape in [(4, 4), (15, 15), (50, 50), (100, 100)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("gpu", "tpu")
def testSchur(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(osp.linalg.schur, lax.linalg.schur, args_maker)
self._CompileAndCheck(lax.linalg.schur, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list({
"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype
} for shape in [(2, 2), (4, 4), (15, 15), (50, 50), (100, 100)]
for dtype in float_types + complex_types))
@jtu.skip_on_devices("gpu", "tpu")
def testSchurBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
batch_size = 10
shape = (batch_size, ) + shape
args = rng(shape, dtype)
reconstruct = vmap(lambda S, T: S @ T @ jnp.conj(S.T))
Ts, Ss = vmap(lax.linalg.schur)(args)
self.assertAllClose(reconstruct(Ss, Ts), args, atol=1e-4)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
google/jax
|
tests/linalg_test.py
|
Python
|
apache-2.0
| 64,375 | 0.007518 |
"""
Kodi resolveurl plugin
Copyright (C) 2014 smokdpi
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urllib2
from lib import jsunpack
from urlparse import urlparse
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
from resolveurl.hmf import HostedMediaFile
class VideoZooResolver(ResolveUrl):
name = "videozoo"
domains = ["byzoo.org", "playpanda.net", "videozoo.me", "videowing.me", "easyvideo.me", "play44.net", "playbb.me", "video44.net"]
pattern = 'http://((?:www\.)*(?:play44|playbb|video44|byzoo|playpanda|videozoo|videowing|easyvideo)\.(?:me|org|net|eu)/(?:embed[/0-9a-zA-Z]*?|gplus|picasa|gogo/)(?:\.php)*)\?.*?((?:vid|video|id|file)=[%0-9a-zA-Z_\-\./]+|.*)[\?&]*.*'
def __init__(self):
self.net = common.Net()
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'http://{host}?vid={media_id}')
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {
'User-Agent': common.IOS_USER_AGENT,
'Referer': web_url
}
stream_url = ''
new_host = urlparse(web_url).netloc
html = self.net.http_GET(web_url, headers=headers).content
if 'videozoo' not in new_host:
r = re.search('(?:playlist:|timer\s*=\s*null;).+?url\s*[:=]+\s*[\'"]+(.+?)[\'"]+', html, re.DOTALL)
else:
r = re.search('\*/\s+?(eval\(function\(p,a,c,k,e,d\).+)\s+?/\*', html)
if r:
try:
r = jsunpack.unpack(r.group(1))
if r:
r = re.search('\[{"url":"(.+?)"', r.replace('\\', ''))
except:
if r:
re_src = re.search('urlResolvers\|2F(.+?)\|', r.group(1))
re_url = re.search('php\|3D(.+?)\|', r.group(1))
if re_src and re_url:
stream_url = 'http://%s/%s.php?url=%s' % (new_host, re_src.group(1), re_url.group(1))
stream_url = self._redirect_test(stream_url)
else:
raise ResolverError('File not found')
if r:
stream_url = urllib.unquote_plus(r.group(1))
if 'http' not in stream_url:
stream_url = 'http://' + host + '/' + stream_url.replace('/gplus.php', 'gplus.php').replace('/picasa.php', 'picasa.php')
stream_url = self._redirect_test(stream_url)
if stream_url:
if 'google' in stream_url:
return HostedMediaFile(url=stream_url).resolve()
else:
return stream_url
else:
raise ResolverError('File not found')
def _redirect_test(self, url):
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', common.IOS_USER_AGENT)]
opener.addheaders = [('Referer', urlparse(url).netloc)]
try:
resp = opener.open(url)
if url != resp.geturl():
return resp.geturl()
else:
return url
except urllib2.HTTPError, e:
if e.code == 403:
if url != e.geturl():
return e.geturl()
raise ResolverError('File not found')
|
repotvsupertuga/tvsupertuga.repository
|
script.module.resolveurl/lib/resolveurl/plugins/videozoo.py
|
Python
|
gpl-2.0
| 3,993 | 0.008264 |
#!/usr/bin/env python
# Standard packages
import os
import sys
import argparse
# Third-party packages
from toil.job import Job
# Package methods
from ddb import configuration
from ddb_ngsflow import gatk
from ddb_ngsflow import annotation
from ddb_ngsflow import pipeline
from ddb_ngsflow.align import bwa
from ddb_ngsflow.utils import utilities
from ddb_ngsflow.qc import qc
from ddb_ngsflow.coverage import sambamba
from ddb_ngsflow.variation import variation
from ddb_ngsflow.variation import freebayes
from ddb_ngsflow.variation import mutect
from ddb_ngsflow.variation import platypus
from ddb_ngsflow.variation import vardict
from ddb_ngsflow.variation import scalpel
from ddb_ngsflow.variation.sv import pindel
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
args.logLevel = "INFO"
sys.stdout.write("Setting up analysis directory\n")
if not os.path.exists("Logs"):
os.makedirs("Logs")
if not os.path.exists("FinalVCFs"):
os.makedirs("FinalVCFs")
if not os.path.exists("FinalBAMs"):
os.makedirs("FinalBAMs")
if not os.path.exists("Intermediates"):
os.makedirs("Intermediates")
if not os.path.exists("Coverage"):
os.makedirs("Coverage")
if not os.path.exists("Reports"):
os.makedirs("Reports")
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
# Workflow Graph definition. The following workflow definition should create a valid Directed Acyclic Graph (DAG)
root_job = Job.wrapJobFn(pipeline.spawn_batch_jobs, cores=1)
# Per sample jobs
for sample in samples:
vcfanno_job = Job.wrapJobFn(annotation.vcfanno, config, sample, samples,
"{}.snpEff.{}.vcf".format(sample, config['snpeff']['reference']),
cores=int(config['vcfanno']['num_cores']),
memory="{}G".format(config['vcfanno']['max_mem']))
# Create workflow from created jobs
root_job.addChild(vcfanno_job)
# Start workflow execution
Job.Runner.startToil(root_job, args)
|
dgaston/ddb-ngsflow-scripts
|
workflow-vcfanno_somatic_amplicon.py
|
Python
|
mit
| 2,541 | 0.002361 |
import importlib
from .base import BaseTransport
from ..service import Service
class LocalTransport(BaseTransport):
def __init__(self):
super(LocalTransport, self).__init__()
self.__service = None
def __repr__(self):
return self.__class__.__name__
def configure(self, service_name='', service_version='', service_meta=None, **kwargs):
instance = self._import_service_and_instantiate_service(service_name, service_version)
self.service = instance
@property
def service(self):
raise AttributeError("Cannot access service property directly")
@service.setter
def service(self, service_instance):
self.__service = service_instance
def _import_service_and_instantiate_service(self, service_name, service_version):
if not service_name and service_version:
raise Exception(
'service_name and service_version are required '
'arguments for local transport')
module = importlib.import_module('%s.service' % (service_name,))
for name in dir(module):
if name.startswith('_'):
continue
obj = getattr(module, name)
if not self._looks_like_service_class(obj, service_name,
service_version):
continue
instance = obj()
# uber-safe final check to make sure we have the correct service class
if not isinstance(instance, Service):
continue
return instance
raise Exception(
'Could not find appropriate Service class. Services '
'must subclass servant.Service and define an action_map, '
'name and version.'
)
def _looks_like_service_class(self, obj, service_name, service_version):
return (
getattr(obj, 'name', '') == service_name and
getattr(obj, 'version', -1) == service_version and
isinstance(getattr(obj, 'action_map', None), dict) and
hasattr(obj, 'run_actions')
)
def is_connected(self):
return True
def send(self, request):
return self.__service.handle_request(request)
|
brianz/servant
|
servant/transport/local.py
|
Python
|
lgpl-3.0
| 2,255 | 0.002661 |
import squeakspace.common.util as ut
import squeakspace.common.util_http as ht
import squeakspace.proxy.server.db_sqlite3 as db
import squeakspace.common.squeak_ex as ex
import config
def post_handler(environ):
query = ht.parse_post_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
url = ht.get_required(query, 'url')
real_node_name = ht.get_required(query, 'real_node_name')
fingerprint = ht.get_optional(query, 'fingerprint')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
db.set_node_addr(c, user_id, session_id, node_name, url, real_node_name, fingerprint)
db.commit(conn)
raise ht.ok_json({'status' : 'ok'})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def get_handler(environ):
query = ht.parse_get_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
addr = db.read_node_addr(c, user_id, session_id, node_name)
raise ht.ok_json({'status' : 'ok', 'addr' : addr})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def delete_handler(environ):
query = ht.parse_post_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
db.delete_node_addr(c, user_id, session_id, node_name)
db.commit(conn)
raise ht.ok_json({'status' : 'ok'})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def main_handler(environ):
ht.dispatch_on_method(environ, {
'POST' : post_handler,
'GET' : get_handler,
'DELETE' : delete_handler})
def application(environ, start_response):
return ht.respond_with_handler(environ, start_response, main_handler)
|
eek6/squeakspace
|
www/proxy/scripts/local/node_addr.py
|
Python
|
gpl-3.0
| 2,510 | 0.004382 |
from .pathutils import grep_r
from . import project
import os
import re
def is_partial(path):
'''Check if file is a Sass partial'''
return os.path.basename(path).startswith('_')
def partial_import_regex(partial):
'''Get name of Sass partial file as would be used for @import'''
def from_curdir(cwd):
relpath = os.path.relpath(partial, cwd)
dirname, basename = os.path.split(relpath)
name = os.path.splitext(basename)[0][1:]
partial_import = os.path.join(dirname, name).replace("\\","/")
import_stmt = re.compile('''@import\s+['"]{0}['"]'''.format(partial_import))
return import_stmt
return from_curdir
def get_rec(file_path, start, files=None, partials=None):
'''
Recursively find files importing `partial` in `start` and if any are partials
themselves, find those importing them.
'''
if files is None:
files = []
if partials is None:
partials = []
if not is_partial(file_path):
files.append(file_path)
return (files, partials)
else:
partials.append(file_path)
partial_fn = partial_import_regex(os.path.join(start, file_path))
for f in grep_r(partial_fn, start, exts=['.sass','.scss']):
if f not in files and f not in partials:
files, partials = get_rec(f, start, files, partials)
return (files, partials)
def get(path):
'''Get files affected by change in contents of `path`'''
rel, root = project.splitpath(path)
deps, _ = get_rec(rel, root)
return (deps, root)
|
blitzrk/sublime_libsass
|
lib/deps.py
|
Python
|
mit
| 1,567 | 0.003191 |
#
# Copyright (C) 2014 National Institute For Space Research (INPE) - Brazil.
#
# This file is part of Python Client API for Web Time Series Service.
#
# Web Time Series Service for Python is free software: you can
# redistribute it and/or modify it under the terms of the
# GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# Web Time Series Service for Python is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Web Time Series Service for Python. See LICENSE. If not, write to
# e-sensing team at <esensing-team@dpi.inpe.br>.
#
"""Python Client API for Web Time Series Services (WTSS)."""
from .wtss import wtss
from .wtss import time_series
|
e-sensing/wtss.py
|
src/wtss/__init__.py
|
Python
|
lgpl-3.0
| 1,040 | 0.000962 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Subscription.frequency'
db.add_column('billing_subscription', 'frequency',
self.gf('django.db.models.fields.CharField')(default='MONTHLY', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Subscription.frequency'
db.delete_column('billing_subscription', 'frequency')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'billing.subscription': {
'Meta': {'object_name': 'Subscription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'MONTHLY'", 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'billing.usersubscription': {
'Meta': {'object_name': 'UserSubscription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['billing.Subscription']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['billing']
|
artminster/artminster
|
contrib/billing/migrations/0002_auto__add_field_subscription_frequency.py
|
Python
|
mit
| 5,583 | 0.008239 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gi
gi.require_version('Gtk', '3.0')
import sys
import pygame
from gi.repository import Gtk
from sugar3.activity.activity import Activity
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.activity.widgets import ActivityToolbarButton
from sugar3.graphics.toolbutton import ToolButton
from sugar3.activity.widgets import StopButton
from sugar3.graphics.objectchooser import ObjectChooser
from gettext import gettext as _
import sugargame.canvas
import conozco
from points_list import Data
from save_util import save, fixValues
class IknowEditor(Activity):
def __init__(self, handle):
Activity.__init__(self, handle)
self.init_vars()
self.build_toolbar()
self.actividad = conozco.Conozco(self)
self.build_canvas()
self.run_canvas()
self.show_all()
def init_vars(self):
self._image = None
def build_toolbar(self):
self.max_participants = 1
toolbar_box = ToolbarBox()
self.set_toolbar_box(toolbar_box)
toolbar_box.show()
activity_button = ActivityToolbarButton(self)
toolbar_box.toolbar.insert(activity_button, -1)
activity_button.show()
# new pic button
new_pic = ToolButton('new-pic')
new_pic.connect('clicked', self._new_picture)
new_pic.set_tooltip(_('New picture'))
toolbar_box.toolbar.insert(new_pic, -1)
# add / remove point buttons
add_point = ToolButton("row-insert")
add_point.connect("clicked", self._add_point)
add_point.set_tooltip(_("Add a point"))
toolbar_box.toolbar.insert(add_point, -1)
rem_point = ToolButton("row-remove")
rem_point.connect("clicked", self._remove_point)
rem_point.set_tooltip(_("Remove the selected point"))
toolbar_box.toolbar.insert(rem_point, -1)
# save list button
save = ToolButton('filesave')
save.connect('clicked', self._save)
save.set_tooltip(_('Save data'))
toolbar_box.toolbar.insert(save, -1)
# separator and stop button
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_expand(True)
toolbar_box.toolbar.insert(separator, -1)
separator.show()
stop_button = StopButton(self)
toolbar_box.toolbar.insert(stop_button, -1)
stop_button.show()
def build_canvas(self):
self.table = Gtk.Table(1, 2, False)
self.box1 = Gtk.HBox()
self.box1.set_size_request(350, 350)
self.box1.show()
self.box2 = Gtk.HBox()
self.box2.set_size_request(50, 200)
self.box2.show()
self.table.attach(self.box1, 0, 1, 0, 1)
self.table.attach(self.box2, 1, 2, 0, 1)
self.labels_and_values = Data(self)
self.labels_and_values.connect("some-changed", self._some_changed)
self.box2.add(self.labels_and_values)
self.set_canvas(self.table)
def run_canvas(self):
self.actividad.canvas = sugargame.canvas.PygameCanvas(self,
main=self.actividad.run,
modules=[pygame.display, pygame.font])
self.box1.add(self.actividad.canvas)
self.actividad.canvas.grab_focus()
def _save(self, widget):
l = self.labels_and_values.get_info()
scale = self.actividad.getScale()
shiftx = self.actividad.getShiftX()
shifty = self.actividad.getShiftY()
ready = fixValues(l, scale, shiftx, shifty)
save(ready)
def _new_picture(self, widget):
try:
chooser = ObjectChooser(parent=self)
except:
chooser = None
f = None
if chooser is not None:
result = chooser.run()
if result == Gtk.ResponseType.ACCEPT:
dsobject = chooser.get_selected_object()
f = dsobject.file_path
if f is not None:
self._image = pygame.image.load(f)
self.actividad.set_background(self._image)
def _add_point(self, widget, label="", value="City", dx='0', dy='-14'):
pos = self.labels_and_values.add_value(label, value, dx, dy)
def _remove_point(self, widget):
path = self.labels_and_values.remove_selected_value()
self._update_points()
def _add_coor(self, pos):
if self._image is not None:
self.labels_and_values.update_selected_value(pos)
def _some_changed(self, treeview, path, new_label):
self._update_points()
def _update_points(self):
l = self.labels_and_values.get_info()
self.actividad.update_points(l)
|
AlanJAS/iknowEditor
|
activity.py
|
Python
|
gpl-3.0
| 4,743 | 0.004217 |
import unittest
import os
from PIL import Image
from SUASSystem.utils import crop_target
class SUASSystemUtilsDataFunctionsTestCase(unittest.TestCase):
def test_crop_image(self):
"""
Test the crop image method.
"""
input_image_path = "tests/images/image2_test_image_bounder.jpg"
output_crop_image_path = "tests/images/test_crop.jpg"
top_left_coords = [250.0, 200.0]
bottom_right_coords = [350.0, 300.0]
crop_target(input_image_path, output_crop_image_path, top_left_coords, bottom_right_coords)
saved_crop = Image.open(output_crop_image_path).load()
input_image = Image.open(input_image_path).load()
self.assertEqual(saved_crop[0, 0], input_image[250, 200])
self.assertEqual(saved_crop[1, 1], input_image[251, 201])
self.assertEqual(saved_crop[50, 50], input_image[300, 250])
self.assertEqual(saved_crop[99, 99], input_image[349, 299])
os.remove("tests/images/test_crop.jpg")
|
FlintHill/SUAS-Competition
|
tests/unit_tests/test_suassystem_utils_data_functions.py
|
Python
|
mit
| 1,006 | 0.001988 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Hook scripts handling"""
import os.path
import subprocess
from weblate.trans.util import get_clean_env
def get_script_name(name):
'''
Returns script name from string possibly containing full path and
parameters.
'''
return os.path.basename(name).split()[0]
def run_post_push_script(component):
"""Run post push hook"""
run_hook(component, component.post_push_script)
def run_post_update_script(component):
"""Run post update hook"""
run_hook(component, component.post_update_script)
def run_pre_commit_script(component, filename):
"""
Pre commit hook
"""
run_hook(component, component.pre_commit_script, filename)
def run_post_commit_script(component, filename):
"""
Post commit hook
"""
run_hook(component, component.post_commit_script, filename)
def run_hook(component, script, *args):
"""
Generic script hook executor.
"""
if script:
command = [script]
if args:
command.extend(args)
environment = get_clean_env()
if component.is_repo_link:
target = component.linked_subproject
else:
target = component
environment['WL_VCS'] = target.vcs
environment['WL_REPO'] = target.repo
environment['WL_PATH'] = target.get_path()
environment['WL_FILEMASK'] = component.filemask
environment['WL_FILE_FORMAT'] = component.file_format
try:
subprocess.check_call(
command,
env=environment,
cwd=component.get_path(),
)
return True
except (OSError, subprocess.CalledProcessError) as err:
component.log_error(
'failed to run hook script %s: %s',
script,
err
)
return False
|
leohmoraes/weblate
|
weblate/trans/scripts.py
|
Python
|
gpl-3.0
| 2,637 | 0 |
import sys
#line = sys.stdin.read()
#print line
datas = []
for line in sys.stdin:
datas.append(line)
print datas
|
BizShuk/code_sandbox
|
python/raw_input_test.py
|
Python
|
mit
| 120 | 0.016667 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
def add(x, y):
a=1
while a>0:
a = x & y
b = x ^ y
x = b
y = a << 1
return b
def vowel_count(word):
vowels_counter = 0
for letter in word:
if letter.isalpha():
if letter.upper() in 'AEIOUY':
vowels_counter += 1
return vowels_counter
if __name__ == '__main__':
# Assignment N 1
text="Proin eget tortor risus. Cras ultricies ligula sed magna dictum porta. Proin eget tortor risus. Curabitur non nulla sit amet nisl tempus convallis quis ac lectus. Donec rutrum congue leo eget malesuada."
list=text.split()
max_vowel_number=0
for i in range(0,len(list)-1):
print "word=",list[i]," number of vowels",vowel_count(list[i])
if vowel_count(list[i])>max_vowel_number:
max_vowel_number=vowel_count(list[i])
print "Maximum number of vowels is",max_vowel_number
# Assignment N 2
text="Proin eget tortor risus. Cras ultricies ligula sed magna dictum porta. Proin eget tortor risus. Curabitur non nulla sit amet nisl tempus convallis quis ac lectus. Donec rutrum congue leo eget malesuada."
list=text.split()
length=len(list[0])
words=[]
words.append(list[0])
for i in range(1,len(list)-1):
if length<len(list[i]):
length=len(list[i])
words[:] = []
words.append(list[i])
elif length==len(list[i]):
words.append(list[i])
print "maximum length=",length,"words are",words
# Assignment N 3
text="Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla quis lorem ut libero malesuada feugiat. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec rutrum congue leo eget malesuada. Cras ultricies ligula sed magna dictum porta."
list=text.split()
i=len(text)-1
mirrored_text=''
while i>=0:
mirrored_text=mirrored_text+(text[i])
i-=1
print mirrored_text
# Assignment N 4
import os
content=dir(os)
content_len=len(content)
for k in range(0,content_len-1):
s="os"+"."+content[k]+".__doc__"
print(eval(s))
import sys
content=dir(sys)
content_len=len(content)
for k in range(0,content_len-1):
s="sys"+"."+content[k]+".__doc__"
print(eval(s))
# Assignment N 5
input=12345
a=str(input)
str_len=len(a)
i=0
total=int(a[i])
while i<str_len-1:
total=add(total,int(a[add(i,1)]))
i=add(i,1)
print total
|
pybursa/homeworks
|
a_lusher/hw3/Lusher_Alexander_home_work_3_.py
|
Python
|
gpl-2.0
| 2,380 | 0.059714 |
# Copyright 22011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""All the interfaces that are exposed through the webservice.
There is a declaration in ZCML somewhere that looks like:
<webservice:register module="lp.patchwebservice" />
which tells `lazr.restful` that it should look for webservice exports here.
"""
__metaclass__ = type
__all__ = [
'ITemporaryBlobStorage',
'ITemporaryStorageManager',
]
from lp.services.temporaryblobstorage.interfaces import (
ITemporaryBlobStorage,
ITemporaryStorageManager,
)
from lp.services.webservice.apihelpers import (
patch_operations_explicit_version,
)
# ITemporaryBlobStorage
patch_operations_explicit_version(
ITemporaryBlobStorage, 'beta', "getProcessedData", "hasBeenProcessed")
# ITemporaryStorageManager
patch_operations_explicit_version(
ITemporaryStorageManager, 'beta', "fetch")
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/services/temporaryblobstorage/webservice.py
|
Python
|
agpl-3.0
| 959 | 0 |
# Copyright: Ankitects Pty Ltd and contributors
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import anki.lang
import aqt
from aqt import AnkiQt
from aqt.profiles import RecordingDriver, VideoDriver
from aqt.qt import *
from aqt.utils import (
TR,
HelpPage,
disable_help_button,
openHelp,
showInfo,
showWarning,
tr,
)
def video_driver_name_for_platform(driver: VideoDriver) -> str:
if driver == VideoDriver.ANGLE:
return tr(TR.PREFERENCES_VIDEO_DRIVER_ANGLE)
elif driver == VideoDriver.Software:
if isMac:
return tr(TR.PREFERENCES_VIDEO_DRIVER_SOFTWARE_MAC)
else:
return tr(TR.PREFERENCES_VIDEO_DRIVER_SOFTWARE_OTHER)
else:
if isMac:
return tr(TR.PREFERENCES_VIDEO_DRIVER_OPENGL_MAC)
else:
return tr(TR.PREFERENCES_VIDEO_DRIVER_OPENGL_OTHER)
class Preferences(QDialog):
def __init__(self, mw: AnkiQt) -> None:
QDialog.__init__(self, mw, Qt.Window)
self.mw = mw
self.prof = self.mw.pm.profile
self.form = aqt.forms.preferences.Ui_Preferences()
self.form.setupUi(self)
disable_help_button(self)
self.form.buttonBox.button(QDialogButtonBox.Help).setAutoDefault(False)
self.form.buttonBox.button(QDialogButtonBox.Close).setAutoDefault(False)
qconnect(
self.form.buttonBox.helpRequested, lambda: openHelp(HelpPage.PREFERENCES)
)
self.silentlyClose = True
self.prefs = self.mw.col.get_preferences()
self.setupLang()
self.setupCollection()
self.setupNetwork()
self.setupBackup()
self.setupOptions()
self.show()
def accept(self) -> None:
# avoid exception if main window is already closed
if not self.mw.col:
return
self.updateCollection()
self.updateNetwork()
self.updateBackup()
self.updateOptions()
self.mw.pm.save()
self.mw.reset()
self.done(0)
aqt.dialogs.markClosed("Preferences")
def reject(self) -> None:
self.accept()
# Language
######################################################################
def setupLang(self) -> None:
f = self.form
f.lang.addItems([x[0] for x in anki.lang.langs])
f.lang.setCurrentIndex(self.langIdx())
qconnect(f.lang.currentIndexChanged, self.onLangIdxChanged)
def langIdx(self) -> int:
codes = [x[1] for x in anki.lang.langs]
lang = anki.lang.currentLang
if lang in anki.lang.compatMap:
lang = anki.lang.compatMap[lang]
else:
lang = lang.replace("-", "_")
try:
return codes.index(lang)
except:
return codes.index("en_US")
def onLangIdxChanged(self, idx: int) -> None:
code = anki.lang.langs[idx][1]
self.mw.pm.setLang(code)
showInfo(
tr(TR.PREFERENCES_PLEASE_RESTART_ANKI_TO_COMPLETE_LANGUAGE), parent=self
)
# Collection options
######################################################################
def setupCollection(self) -> None:
import anki.consts as c
f = self.form
qc = self.mw.col.conf
self.setup_video_driver()
f.newSpread.addItems(list(c.newCardSchedulingLabels(self.mw.col).values()))
f.useCurrent.setCurrentIndex(int(not qc.get("addToCur", True)))
s = self.prefs.sched
f.lrnCutoff.setValue(int(s.learn_ahead_secs / 60.0))
f.timeLimit.setValue(int(s.time_limit_secs / 60.0))
f.showEstimates.setChecked(s.show_intervals_on_buttons)
f.showProgress.setChecked(s.show_remaining_due_counts)
f.newSpread.setCurrentIndex(s.new_review_mix)
f.dayLearnFirst.setChecked(s.day_learn_first)
f.dayOffset.setValue(s.rollover)
if s.scheduler_version < 2:
f.dayLearnFirst.setVisible(False)
f.legacy_timezone.setVisible(False)
else:
f.legacy_timezone.setChecked(not s.new_timezone)
def setup_video_driver(self) -> None:
self.video_drivers = VideoDriver.all_for_platform()
names = [
tr(TR.PREFERENCES_VIDEO_DRIVER, driver=video_driver_name_for_platform(d))
for d in self.video_drivers
]
self.form.video_driver.addItems(names)
self.form.video_driver.setCurrentIndex(
self.video_drivers.index(self.mw.pm.video_driver())
)
def update_video_driver(self) -> None:
new_driver = self.video_drivers[self.form.video_driver.currentIndex()]
if new_driver != self.mw.pm.video_driver():
self.mw.pm.set_video_driver(new_driver)
showInfo(tr(TR.PREFERENCES_CHANGES_WILL_TAKE_EFFECT_WHEN_YOU))
def updateCollection(self) -> None:
f = self.form
d = self.mw.col
self.update_video_driver()
qc = d.conf
qc["addToCur"] = not f.useCurrent.currentIndex()
s = self.prefs.sched
s.show_remaining_due_counts = f.showProgress.isChecked()
s.show_intervals_on_buttons = f.showEstimates.isChecked()
s.new_review_mix = f.newSpread.currentIndex()
s.time_limit_secs = f.timeLimit.value() * 60
s.learn_ahead_secs = f.lrnCutoff.value() * 60
s.day_learn_first = f.dayLearnFirst.isChecked()
s.rollover = f.dayOffset.value()
s.new_timezone = not f.legacy_timezone.isChecked()
self.mw.col.set_preferences(self.prefs)
d.setMod()
# Network
######################################################################
def setupNetwork(self) -> None:
self.form.media_log.setText(tr(TR.SYNC_MEDIA_LOG_BUTTON))
qconnect(self.form.media_log.clicked, self.on_media_log)
self.form.syncOnProgramOpen.setChecked(self.prof["autoSync"])
self.form.syncMedia.setChecked(self.prof["syncMedia"])
self.form.autoSyncMedia.setChecked(self.mw.pm.auto_sync_media_minutes() != 0)
if not self.prof["syncKey"]:
self._hideAuth()
else:
self.form.syncUser.setText(self.prof.get("syncUser", ""))
qconnect(self.form.syncDeauth.clicked, self.onSyncDeauth)
self.form.syncDeauth.setText(tr(TR.SYNC_LOG_OUT_BUTTON))
def on_media_log(self) -> None:
self.mw.media_syncer.show_sync_log()
def _hideAuth(self) -> None:
self.form.syncDeauth.setVisible(False)
self.form.syncUser.setText("")
self.form.syncLabel.setText(
tr(TR.PREFERENCES_SYNCHRONIZATIONNOT_CURRENTLY_ENABLED_CLICK_THE_SYNC)
)
def onSyncDeauth(self) -> None:
if self.mw.media_syncer.is_syncing():
showWarning("Can't log out while sync in progress.")
return
self.prof["syncKey"] = None
self.mw.col.media.force_resync()
self._hideAuth()
def updateNetwork(self) -> None:
self.prof["autoSync"] = self.form.syncOnProgramOpen.isChecked()
self.prof["syncMedia"] = self.form.syncMedia.isChecked()
self.mw.pm.set_auto_sync_media_minutes(
self.form.autoSyncMedia.isChecked() and 15 or 0
)
if self.form.fullSync.isChecked():
self.mw.col.modSchema(check=False)
self.mw.col.setMod()
# Backup
######################################################################
def setupBackup(self) -> None:
self.form.numBackups.setValue(self.prof["numBackups"])
def updateBackup(self) -> None:
self.prof["numBackups"] = self.form.numBackups.value()
# Basic & Advanced Options
######################################################################
def setupOptions(self) -> None:
self.form.pastePNG.setChecked(self.prof.get("pastePNG", False))
self.form.uiScale.setValue(int(self.mw.pm.uiScale() * 100))
self.form.pasteInvert.setChecked(self.prof.get("pasteInvert", False))
self.form.showPlayButtons.setChecked(self.prof.get("showPlayButtons", True))
self.form.nightMode.setChecked(self.mw.pm.night_mode())
self.form.interrupt_audio.setChecked(self.mw.pm.interrupt_audio())
self._recording_drivers = [
RecordingDriver.QtAudioInput,
RecordingDriver.PyAudio,
]
# The plan is to phase out PyAudio soon, so will hold off on
# making this string translatable for now.
self.form.recording_driver.addItems(
[
f"Voice recording driver: {driver.value}"
for driver in self._recording_drivers
]
)
self.form.recording_driver.setCurrentIndex(
self._recording_drivers.index(self.mw.pm.recording_driver())
)
def updateOptions(self) -> None:
restart_required = False
self.prof["pastePNG"] = self.form.pastePNG.isChecked()
self.prof["pasteInvert"] = self.form.pasteInvert.isChecked()
newScale = self.form.uiScale.value() / 100
if newScale != self.mw.pm.uiScale():
self.mw.pm.setUiScale(newScale)
restart_required = True
self.prof["showPlayButtons"] = self.form.showPlayButtons.isChecked()
if self.mw.pm.night_mode() != self.form.nightMode.isChecked():
self.mw.pm.set_night_mode(not self.mw.pm.night_mode())
restart_required = True
self.mw.pm.set_interrupt_audio(self.form.interrupt_audio.isChecked())
new_audio_driver = self._recording_drivers[
self.form.recording_driver.currentIndex()
]
if self.mw.pm.recording_driver() != new_audio_driver:
self.mw.pm.set_recording_driver(new_audio_driver)
if new_audio_driver == RecordingDriver.PyAudio:
showInfo(
"""\
The PyAudio driver will likely be removed in a future update. If you find it works better \
for you than the default driver, please let us know on the Anki forums."""
)
if restart_required:
showInfo(tr(TR.PREFERENCES_CHANGES_WILL_TAKE_EFFECT_WHEN_YOU))
|
simgunz/anki
|
qt/aqt/preferences.py
|
Python
|
agpl-3.0
| 10,181 | 0.000982 |
"""Custom urls.py for django-registration."""
from django.conf import settings
from django.conf.urls import include, url
from django.views.generic import TemplateView
from registration.backends.default.views import (
ActivationView,
RegistrationView,
)
from registration_email.forms import EmailRegistrationForm
urlpatterns = [
# django-registration views
url(r'^activate/complete/$',
TemplateView.as_view(
template_name='registration/activation_complete.html'),
name='registration_activation_complete'),
url(r'^activate/(?P<activation_key>\w+)/$',
ActivationView.as_view(
template_name='registration/activate.html',
get_success_url=getattr(
settings, 'REGISTRATION_EMAIL_ACTIVATE_SUCCESS_URL',
lambda request, user: '/'),
),
name='registration_activate'),
url(r'^register/$',
RegistrationView.as_view(
form_class=EmailRegistrationForm,
get_success_url=getattr(
settings, 'REGISTRATION_EMAIL_REGISTER_SUCCESS_URL',
lambda request, user: '/'),
),
name='registration_register'),
url(r'^register/complete/$',
TemplateView.as_view(
template_name='registration/registration_complete.html'),
name='registration_complete'),
url(r'^register/closed/$',
TemplateView.as_view(
template_name='registration/registration_closed.html'),
name='registration_disallowed'),
# django auth urls
url(r'', include('registration_email.auth_urls')),
]
|
bitmazk/django-registration-email
|
registration_email/backends/default/urls.py
|
Python
|
unlicense
| 1,615 | 0 |
#!/usr/bin/env python
import json
DEBUG = False
import sys
import tweepy
import time
#consumer_key = 'HcMP89vDDumRhHeQBYbE3Asnp'
#consumer_secret = 'kcXfsNyBl7tan1u2DgV7E10MpsVxhbwTjmbjp3YL9XfDdMJiYt'
#access_key = '67882386-IXbLKaQEtTbZF9yotuLTjgitqjwBkouIstmlW4ecG'
#access_secret = 'SyVrXlIDkidYr3JlNiTQ8tjZ973gIKy5mfpEwFpQWN3Gy'
consumer_key = 'Mcof8aJtJVDqQwz4OMDn2AyZu'
consumer_secret = 'mjsHber2Gj79uc2unbzSRdwGyNyZGjEPBEn4ZHXQZW8FeGeSkv'
access_key = '833745600743079936-hK2K3umAtnfYYuLGLDwD7uzj9ssPCDU'
access_secret = '2Odz7Cky2gb3dZJsO1E65zNL8i84ZnoxLrM9uihSEDb6M'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
class CustomStreamListener(tweepy.StreamListener):
def __init__(self, data_dir):
# query_fname = format_filename(query)
time_now = time.strftime("%Y-%m-%d_%H.%M.%S")
self.outfile = "%s/stream_%s.json" % (data_dir, time_now)
def on_data(self, data):
try:
with open(self.outfile, 'a') as f:
f.write(data)
print(data)
return True
except BaseException as e:
print("Error on_data: %s" % str(e))
time.sleep(5)
return True
def on_error(self, status_code):
print >> sys.stderr, 'Encountered error with status code:', status_code
return True # Don't kill the stream
def on_timeout(self):
print >> sys.stderr, 'Timeout...'
return True # Don't kill the stream
# run the code with try to handle the exception
try:
sapi = tweepy.streaming.Stream(auth, CustomStreamListener('twitter-data'))
sapi.filter(track=["transjakarta", "trans jakarta", "bus way", "busway"], languages=["in"])
except:
pass
|
gtrdp/twitter-clustering
|
crawling/crawl.py
|
Python
|
mit
| 1,795 | 0.007242 |
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
"""
Publish and subscribe to MQTT messages.
Additional information at http://mqtt.org and
http://ibmstreams.github.io/streamsx.messaging
"""
from future.builtins import *
from streamsx.topology.topology import *
from streamsx.topology import schema
class MqttStreams(object):
"""
A simple connector to a MQTT broker for publishing
string tuples to MQTT topics, and
subscribing to MQTT topics and creating streams.
A connector is for a specific MQTT Broker as specified in
the configuration object config. Any number of publish()and subscribe()
connections may be created from a single mqtt_streams connector.
Sample use:
::
topo = Topology("An MQTT application")
# define configuration information
config = {}
config['clientID'] = "test_MQTTpublishClient"
config['qos'] = int("1") #(needs to be int vs long)
config['keepAliveInterval'] = int(20) (needs to be int vs long)
config['commandTimeout'] = 30000 (needs to be int vs long)
config['period'] = 5000 (needs to be int vs long)
config['messageQueueSize'] = 10 (needs to be int vs long)
config['reconnectionBound'] = int(20)
config['retain'] = True
config['password'] = "foobar"
config['trustStore'] = "/tmp/no-such-trustStore"
config['trustStorePassword'] = "woohoo"
config['keyStore'] = "/tmp/no-such-keyStore"
config['keyStorePassword'] = "woohoo"
# create the connector's configuration property map
config['serverURI'] = "tcp://localhost:1883"
config['userID'] = "user1id"
config[' password'] = "user1passwrd"
# create the connector
mqstream = MqttStreams(topo, config)
# publish a python source stream to the topic "python.topic1"
topic = "python.topic1"
src = topo.source(test_functions.mqtt_publish)
mqs = mqstream.publish(src, topic)
# subscribe to the topic "python.topic1"
topic = ["python.topic1", ]
mqs = mqstream.subscribe(topic)
mqs.print()
Configuration properties apply to publish and
subscribe unless stated otherwise.
serverURI
Required String. URI to the MQTT server, either
tcp://<hostid>[:<port>]}
or ssl://<hostid>[:<port>]}.
The port defaults to 1883 for "tcp:" and 8883 for "ssl:" URIs.
clientID
Optional String. A unique identifier for a connection
to the MQTT server.
he MQTT broker only allows a single
onnection for a particular clientID.
By default a unique client ID is automatically
generated for each use of publish() and subscribe().
The specified clientID is used for the first
publish() or subscribe() use and
suffix is added for each subsequent uses.
keepAliveInterval
Optional Integer. Automatically generate a MQTT
ping message to the server if a message or ping hasn't been
sent or received in the last keelAliveInterval seconds.
Enables the client to detect if the server is no longer available
without having to wait for the TCP/IP timeout.
A value of 0 disables keepalive processing.
The default is 60.
commandTimeout
Optional Long. The maximum time in milliseconds
to wait for a MQTT connect or publish action to complete.
A value of 0 causes the client to wait indefinitely.
The default is 0.
period
Optional Long. The time in milliseconds before
attempting to reconnect to the server following a connection failure.
The default is 60000.
userID
Optional String. The identifier to use when authenticating
with a server configured to require that form of authentication.
password
Optional String. The identifier to use when authenticating
with server configured to require that form of authentication.
trustStore
Optional String. The pathname to a file containing the
public certificate of trusted MQTT servers. If a relative path
is specified, the path is relative to the application directory.
Required when connecting to a MQTT server with an
ssl:/... serverURI.
trustStorePassword
Required String when trustStore is used.
The password needed to access the encrypted trustStore file.
keyStore
Optional String. The pathname to a file containing the
MQTT client's public private key certificates.
If a relative path is specified, the path is relative to the
application directory.
Required when an MQTT server is configured to use SSL client authentication.
keyStorePassword
Required String when keyStore is used.
The password needed to access the encrypted keyStore file.
messageQueueSize
[subscribe] Optional Integer. The size, in number
of messages, of the subscriber's internal receive buffer. Received
messages are added to the buffer prior to being converted to a
stream tuple. The receiver blocks when the buffer is full.
The default is 50.
retain
[publish] Optional Boolean. Indicates if messages should be
retained on the MQTT server. Default is false.
qos
Optional Integer. The default
MQTT quality of service used for message handling.
The default is 0.
"""
def __init__(self, topology, config):
self.topology = topology
self.config = config.copy()
self.opCnt = 0
def publish(self, pub_stream, topic):
parms = self.config.copy()
parms['topic'] = topic
parms['dataAttributeName'] = "string"
if (++self.opCnt > 1):
# each op requires its own clientID
clientId = parms['clientID']
if (clientId is not None and len(clientId) > 0):
parms['clientID'] = clientId + "-" + str(id(self)) + "-" + str(self.opCnt)
# convert pub_stream outputport schema from spl po to spl rstring type
forOp = pub_stream._map(streamsx.topology.functions.identity, schema.CommonSchema.String)
op = self.topology.graph.addOperator(kind="com.ibm.streamsx.messaging.mqtt::MQTTSink")
op.addInputPort(outputPort=forOp.oport)
op.setParameters(parms)
return None
def subscribe(self, topic):
parms = self.config.copy()
if (parms['retain'] is not None):
del parms['retain']
parms['topics'] = topic
parms['topicOutAttrName'] = "topic"
parms['dataAttributeName'] = "string"
if (++self.opCnt > 1):
# each op requires its own clientID
clientId = parms['clientID']
if (clientId is not None and len(clientId) > 0):
parms['clientID'] = clientId + "-" + str(id(self)) + "-" + str(self.opCnt)
op = self.topology.graph.addOperator(kind="com.ibm.streamsx.messaging.mqtt::MQTTSource")
oport = op.addOutputPort(schema=schema.StreamSchema("tuple<rstring topic, rstring string>"))
op.setParameters(parms)
pop = self.topology.graph.addPassThruOperator()
pop.addInputPort(outputPort=oport)
pOport = pop.addOutputPort(schema=schema.CommonSchema.String)
return Stream(self.topology, pOport)
|
wmarshall484/streamsx.topology
|
com.ibm.streamsx.topology/opt/python/packages/streamsx/topology/mqtt.py
|
Python
|
apache-2.0
| 7,558 | 0.003705 |
def fat(n):
result = 1
while n > 0:
result = result * n
n = n - 1
return result
# testes
print("Fatorial de 3: ", fat(3));
|
Gigers/data-struct
|
algoritimos/Python/fatorial-while.py
|
Python
|
bsd-2-clause
| 158 | 0.012658 |
from datetime import datetime
def days_diff(date1, date2):
"""
Find absolute diff in days between dates
"""
days = datetime(*date1) - datetime(*date2)
print abs(days)
return abs(days.days)
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert days_diff((1982, 4, 19), (1982, 4, 22)) == 3
assert days_diff((2014, 1, 1), (2014, 8, 27)) == 238
assert days_diff((2014, 8, 27), (2014, 1, 1)) == 238
|
Dani4kor/Checkio
|
days-diff.py
|
Python
|
mit
| 504 | 0.003968 |
import sys
import requests
try:
from .helper import *
except SystemError:
from helper import *
def compareRequestsAndSelenium(url):
html1 = str(requests.get(url).text)
try:
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(url)
html2 = str(driver.page_source)
finally:
driver.close()
view_diff(url, html1, html2)
# url = 'http://www.healthgrades.com/physician/dr-jeannine-villella-y4jts'
# compareRequestsAndSelenium(url)
# url = 'https://www.betterdoctor.com/wendy-tcheng'
# compareRequestsAndSelenium(url)
if __name__ == '__main__':
compareRequestsAndSelenium(sys.argv[1])
|
bgarrels/sky
|
sky/legacy/comparison.py
|
Python
|
bsd-3-clause
| 685 | 0.014599 |
"""
This module contains several handy functions primarily meant for internal use.
"""
from datetime import date, datetime, timedelta
from time import mktime
import re
import sys
from types import MethodType
__all__ = ('asint', 'asbool', 'convert_to_datetime', 'timedelta_seconds',
'time_difference', 'datetime_ceil', 'combine_opts',
'get_callable_name', 'obj_to_ref', 'ref_to_obj', 'maybe_ref',
'to_unicode', 'iteritems', 'itervalues', 'xrange')
def asint(text):
"""
Safely converts a string to an integer, returning None if the string
is None.
:type text: str
:rtype: int
"""
if text is not None:
return int(text)
def asbool(obj):
"""
Interprets an object as a boolean value.
:rtype: bool
"""
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ('true', 'yes', 'on', 'y', 't', '1'):
return True
if obj in ('false', 'no', 'off', 'n', 'f', '0'):
return False
raise ValueError('Unable to interpret value "%s" as boolean' % obj)
return bool(obj)
_DATE_REGEX = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'(?: (?P<hour>\d{1,2}):(?P<minute>\d{1,2}):(?P<second>\d{1,2})'
r'(?:\.(?P<microsecond>\d{1,6}))?)?')
def convert_to_datetime(input):
"""
Converts the given object to a datetime object, if possible.
If an actual datetime object is passed, it is returned unmodified.
If the input is a string, it is parsed as a datetime.
Date strings are accepted in three different forms: date only (Y-m-d),
date with time (Y-m-d H:M:S) or with date+time with microseconds
(Y-m-d H:M:S.micro).
:rtype: datetime
"""
if isinstance(input, datetime):
return input
elif isinstance(input, date):
return datetime.fromordinal(input.toordinal())
elif isinstance(input, str):
m = _DATE_REGEX.match(input)
if not m:
raise ValueError('Invalid date string')
values = [(k, int(v or 0)) for k, v in m.groupdict().items()]
values = dict(values)
return datetime(**values)
raise TypeError('Unsupported input type: %s' % type(input))
def timedelta_seconds(delta):
"""
Converts the given timedelta to seconds.
:type delta: timedelta
:rtype: float
"""
return delta.days * 24 * 60 * 60 + delta.seconds + \
delta.microseconds / 1000000.0
def time_difference(date1, date2):
"""
Returns the time difference in seconds between the given two
datetime objects. The difference is calculated as: date1 - date2.
:param date1: the later datetime
:type date1: datetime
:param date2: the earlier datetime
:type date2: datetime
:rtype: float
"""
later = mktime(date1.timetuple()) + date1.microsecond / 1000000.0
earlier = mktime(date2.timetuple()) + date2.microsecond / 1000000.0
return later - earlier
def datetime_ceil(dateval):
"""
Rounds the given datetime object upwards.
:type dateval: datetime
"""
if dateval.microsecond > 0:
return dateval + timedelta(seconds=1,
microseconds= -dateval.microsecond)
return dateval
def combine_opts(global_config, prefix, local_config={}):
"""
Returns a subdictionary from keys and values of ``global_config`` where
the key starts with the given prefix, combined with options from
local_config. The keys in the subdictionary have the prefix removed.
:type global_config: dict
:type prefix: str
:type local_config: dict
:rtype: dict
"""
prefixlen = len(prefix)
subconf = {}
for key, value in global_config.items():
if key.startswith(prefix):
key = key[prefixlen:]
subconf[key] = value
subconf.update(local_config)
return subconf
def get_callable_name(func):
"""
Returns the best available display name for the given function/callable.
"""
f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
if f_self and hasattr(func, '__name__'):
if isinstance(f_self, type):
# class method
return '%s.%s' % (f_self.__name__, func.__name__)
# bound method
return '%s.%s' % (f_self.__class__.__name__, func.__name__)
if hasattr(func, '__call__'):
if hasattr(func, '__name__'):
# function, unbound method or a class with a __call__ method
return func.__name__
# instance of a class with a __call__ method
return func.__class__.__name__
raise TypeError('Unable to determine a name for %s -- '
'maybe it is not a callable?' % repr(func))
def obj_to_ref(obj):
"""
Returns the path to the given object.
"""
ref = '%s:%s' % (obj.__module__, get_callable_name(obj))
try:
obj2 = ref_to_obj(ref)
if obj != obj2:
raise ValueError
except Exception:
raise ValueError('Cannot determine the reference to %s' % repr(obj))
return ref
def ref_to_obj(ref):
"""
Returns the object pointed to by ``ref``.
"""
if not isinstance(ref, basestring):
raise TypeError('References must be strings')
if not ':' in ref:
raise ValueError('Invalid reference')
modulename, rest = ref.split(':', 1)
try:
obj = __import__(modulename)
except ImportError:
raise LookupError('Error resolving reference %s: '
'could not import module' % ref)
try:
for name in modulename.split('.')[1:] + rest.split('.'):
obj = getattr(obj, name)
return obj
except Exception:
raise LookupError('Error resolving reference %s: '
'error looking up object' % ref)
def maybe_ref(ref):
"""
Returns the object that the given reference points to, if it is indeed
a reference. If it is not a reference, the object is returned as-is.
"""
if not isinstance(ref, str):
return ref
return ref_to_obj(ref)
def to_unicode(string, encoding='ascii'):
"""
Safely converts a string to a unicode representation on any
Python version.
"""
if hasattr(string, 'decode'):
return string.decode(encoding, 'ignore')
return string # pragma: nocover
if sys.version_info < (3, 0): # pragma: nocover
iteritems = lambda d: d.iteritems()
itervalues = lambda d: d.itervalues()
xrange = xrange
basestring = basestring
else: # pragma: nocover
iteritems = lambda d: d.items()
itervalues = lambda d: d.values()
xrange = range
basestring = str
|
ecdpalma/napscheduler
|
napscheduler/util.py
|
Python
|
mit
| 6,708 | 0.001044 |
# -*- coding: utf-8 -*-
# © <YEAR(S)> ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import account_move_line
|
ClearCorp/odoo-clearcorp
|
exchange_rate_calculated/models/__init__.py
|
Python
|
agpl-3.0
| 144 | 0 |
#! /usr/bin/env python
"""Read infofiles.
"""
import glob
import os, os.path
import sys
import threading
import time
import skytools
import cc.util
from cc import json
from cc.daemon import CCDaemon
from cc.message import is_msg_req_valid
from cc.reqs import InfofileMessage
class InfoStamp:
def __init__(self, fn, st):
self.filename = fn
self.filestat = st
self.modified = 1
def check_send(self, st):
if (st.st_mtime != self.filestat.st_mtime
or st.st_size != self.filestat.st_size):
# st changed, new mod
self.modified = 1
self.filestat = st
return 0
elif self.modified:
return 1
else:
return 0
class InfofileCollector(CCDaemon):
log = skytools.getLogger('d:InfofileCollector')
def reload(self):
super(InfofileCollector, self).reload()
self.infodir = self.cf.getfile('infodir')
self.infomask = self.cf.get('infomask')
self.compression = self.cf.get ('compression', 'none')
if self.compression not in (None, '', 'none', 'gzip', 'bzip2'):
self.log.error ("unknown compression: %s", self.compression)
self.compression_level = self.cf.getint ('compression-level', '')
self.maint_period = self.cf.getint ('maint-period', 60 * 60)
self.stats_period = self.cf.getint ('stats-period', 30)
self.msg_suffix = self.cf.get ('msg-suffix', '')
if self.msg_suffix and not is_msg_req_valid (self.msg_suffix):
self.log.error ("invalid msg-suffix: %s", self.msg_suffix)
self.msg_suffix = None
self.use_blob = self.cf.getbool ('use-blob', True)
def startup(self):
super(InfofileCollector, self).startup()
# fn -> stamp
self.infomap = {}
# activate periodic maintenance
self.do_maint()
def process_file(self, fs):
f = open(fs.filename, 'rb')
st = os.fstat(f.fileno())
if fs.check_send(st):
body = f.read()
if len(body) != st.st_size:
return
fs.modified = 0
self.log.debug('Sending: %s', fs.filename)
self.send_file(fs, body)
self.stat_inc('count')
f.close()
def send_file(self, fs, body):
cfb = cc.util.compress (body, self.compression, {'level': self.compression_level})
self.log.debug ("file compressed from %i to %i", len(body), len(cfb))
if self.use_blob:
data = ''
blob = cfb
else:
data = cfb.encode('base64')
blob = None
msg = InfofileMessage(
filename = fs.filename.replace('\\', '/'),
mtime = fs.filestat.st_mtime,
comp = self.compression,
data = data)
if self.msg_suffix:
msg.req += '.' + self.msg_suffix
self.ccpublish (msg, blob)
self.stat_inc ('infosender.bytes.read', len(body))
self.stat_inc ('infosender.bytes.sent', len(cfb))
def find_new(self):
fnlist = glob.glob (os.path.join (self.infodir, self.infomask))
newlist = []
for fn in fnlist:
try:
st = os.stat(fn)
except OSError, e:
self.log.info('%s: %s', fn, e)
continue
if fn not in self.infomap:
fstamp = InfoStamp(fn, st)
self.infomap[fn] = fstamp
else:
old = self.infomap[fn]
if old.check_send(st):
newlist.append(old)
self.log.debug ("files found - all: %i, new: %i", len(fnlist), len(newlist))
return newlist
def _work (self):
self.connect_cc()
newlist = self.find_new()
for fs in newlist:
try:
self.process_file(fs)
except (OSError, IOError), e:
self.log.info('%s: %s', fs.filename, e)
self.stat_inc('changes', len(newlist))
def work (self):
t = time.time()
while self.looping and self.stats_period > time.time() - t:
self._work()
self.sleep(1)
return 1
def stop (self):
""" Called from signal handler """
super(InfofileCollector, self).stop()
self.log.info ("stopping")
self.maint_timer.cancel()
def do_maint (self):
""" Drop removed files from our cache """
self.log.info ("cleanup")
current = glob.glob (os.path.join (self.infodir, self.infomask))
removed = set(self.infomap) - set(current)
for fn in removed:
self.log.debug ("forgetting file %s", fn)
del self.infomap[fn]
self.log.info ("current: %i, removed: %i", len(current), len(removed))
self.maint_timer = threading.Timer (self.maint_period, self.do_maint)
self.maint_timer.start()
if __name__ == '__main__':
s = InfofileCollector('infofile_collector', sys.argv[1:])
s.start()
|
markokr/cc
|
cc/daemon/infosender.py
|
Python
|
bsd-2-clause
| 5,065 | 0.0077 |
"""empty message
Revision ID: ded3fd1d7f9d
Revises: b70e85abec53
Create Date: 2020-12-30 22:46:59.418950
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'ded3fd1d7f9d'
down_revision = 'b70e85abec53'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('hashfiles', sa.Column('checksum', sa.String(length=256), nullable=False))
op.drop_column('hashfiles', 'hash_str')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('hashfiles', sa.Column('hash_str', mysql.VARCHAR(length=256), nullable=False))
op.drop_column('hashfiles', 'checksum')
# ### end Alembic commands ###
|
hashview/hashview
|
migrations/versions/ded3fd1d7f9d_.py
|
Python
|
gpl-3.0
| 850 | 0.002353 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# This is a PyRegion-based python test regions for exploring/testing CLA Network
# mechanisms
from abc import ABCMeta, abstractmethod
from nupic.bindings.regions.PyRegion import PyRegion
from nupic.data.dictutils import DictObj
class RegionIdentityPolicyBase(object):
""" A base class that must be subclassed by users in order to define the
TestRegion instance's specialization. See also setIdentityPolicyInstance().
"""
__metaclass__ = ABCMeta
@abstractmethod
def initialize(self, testRegionObj):
""" Called from the scope of the region's PyRegion.initialize() method.
testRegionObj: TestRegion instance with which this policy is
associated.
"""
@abstractmethod
def compute(self, inputs, outputs):
"""Perform the main computation
This method is called in each iteration for each phase the node supports.
Called from the scope of the region's PyRegion.compute() method.
inputs: dict of numpy arrays (one per input)
outputs: dict of numpy arrays (one per output)
"""
@abstractmethod
def getOutputElementCount(self, name):
"""Return the number of elements in the given output of the region
Called from the scope of the region's PyRegion.getOutputElementCount() method.
name: the name of the output
"""
@abstractmethod
def getName(self):
""" Return the name of the region
"""
class TestRegion(PyRegion):
"""
TestRegion is designed for testing and exploration of CLA Network
mechanisms. Each TestRegion instance takes on a specific role via
the associated TestRegionRole policy (TBD).
"""
def __init__(self,
**kwargs):
super(PyRegion, self).__init__(**kwargs)
# Learning, inference, and other parameters.
# By default we start out in stage learn with inference disabled
# The specialization policy is what gives this region instance its identity.
# Users set this via setIdentityPolicyInstance() before running the network
self.identityPolicy = None
# Debugging support, used in _conditionalBreak
self.breakPdb = False
self.breakKomodo = False
# Construct ephemeral variables (those that aren't serialized)
self.__constructEphemeralInstanceVars()
# Variables set up in initialize()
#self._sfdr = None # FDRCSpatial instance
return
def __constructEphemeralInstanceVars(self):
""" Initialize ephemeral instance variables (those that aren't serialized)
"""
assert not hasattr(self, 'ephemeral')
self.ephemeral = DictObj()
self.ephemeral.logPathInput = ''
self.ephemeral.logPathOutput = ''
self.ephemeral.logPathOutputDense = ''
self.ephemeral._fpLogInput = None
self.ephemeral._fpLogOutput = None
self.ephemeral._fpLogOutputDense = None
return
#############################################################################
#
# Initialization code
#
#############################################################################
def initialize(self, dims, splitterMaps):
""" Called by network after all links have been set up
dims, splitterMaps: Unused legacy args
"""
self.identityPolicy.initialize(self)
_debugOut(self.identityPolicy.getName())
return
#############################################################################
#
# Core compute methods: learning, inference, and prediction
#
#############################################################################
def compute(self, inputs, outputs):
"""
Run one iteration of the region's compute.
The guts of the compute are contained in the _compute() call so that
we can profile it if requested.
"""
# Uncomment this to find out who is generating divide by 0, or other numpy warnings
# numpy.seterr(divide='raise', invalid='raise', over='raise')
self.identityPolicy.compute(inputs, outputs)
_debugOut(("%s: inputs=%s; outputs=%s") % \
(self.identityPolicy.getName(),inputs, outputs))
return
#############################################################################
#
# NuPIC 2 Support
# These methods are required by NuPIC 2
#
#############################################################################
def getOutputElementCount(self, name):
nOutputElements = self.identityPolicy.getOutputElementCount(name)
return nOutputElements
# TODO: as a temporary hack, getParameterArrayCount checks to see if there's a
# variable, private or not, with that name. If so, it attempts to return the
# length of that variable.
def getParameterArrayCount(self, name, index):
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '{0!s}' as an array but it is not an array".format(name))
return len(p)
# TODO: as a temporary hack, getParameterArray checks to see if there's a
# variable, private or not, with that name. If so, it returns the value of the
# variable.
def getParameterArray(self, name, index, a):
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '{0!s}' as an array but it is not an array".format(name))
if len(p) > 0:
a[:] = p[:]
return
#############################################################################
#
# Region API support methods: getSpec, getParameter, and setParameter
#
#############################################################################
@classmethod
def getSpec(cls):
"""Return the base Spec for TestRegion.
"""
spec = dict(
description="TestRegion",
singleNodeOnly=True,
inputs=dict(
bottomUpIn=dict(
description="""The input vector.""",
dataType='Real32',
count=0,
required=False,
regionLevel=True,
isDefaultInput=True,
requireSplitterMap=False),
topDownIn=dict(
description="""The top-down input signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required = False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
bottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
topDownOut=dict(
description="""The top-down output signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
),
parameters=dict(
logPathInput=dict(
description='Optional name of input log file. If set, every input vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutput=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutputDense=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file as a dense vector.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
breakPdb=dict(
description='Set to 1 to stop in the pdb debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
breakKomodo=dict(
description='Set to 1 to stop in the Komodo debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
),
commands=dict(
setIdentityPolicyInstance=dict(description=
"Set identity policy instance BERORE running the network. " + \
"The instance MUST be derived from TestRegion's " + \
"RegionIdentityPolicyBase class."),
getIdentityPolicyInstance=dict(description=
"Returns identity policy instance that was associated with " + \
"the TestRegion instance via the setIdentityPolicyInstance " + \
"command."),
)
)
return spec
def getParameter(self, parameterName, index=-1):
"""
Get the value of a NodeSpec parameter. Most parameters are handled
automatically by PyRegion's parameter get mechanism. The ones that need
special treatment are explicitly handled here.
"""
assert not (parameterName in self.__dict__ and parameterName in self.ephemeral)
if parameterName in self.ephemeral:
assert parameterName not in self.__dict__
return self.ephemeral[parameterName]
else:
return super(PyRegion, self).getParameter(parameterName, index)
def setParameter(self, parameterName, index, parameterValue):
"""
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
assert not (parameterName in self.__dict__ and parameterName in self.ephemeral)
if parameterName in self.ephemeral:
if parameterName == "logPathInput":
self.ephemeral.logPathInput = parameterValue
# Close any existing log file
if self.ephemeral._fpLogInput:
self.ephemeral._fpLogInput.close()
self.ephemeral._fpLogInput = None
# Open a new log file
if parameterValue:
self.ephemeral._fpLogInput = open(self.ephemeral.logPathInput, 'w')
elif parameterName == "logPathOutput":
self.ephemeral.logPathOutput = parameterValue
# Close any existing log file
if self.ephemeral._fpLogOutput:
self.ephemeral._fpLogOutput.close()
self.ephemeral._fpLogOutput = None
# Open a new log file
if parameterValue:
self.ephemeral._fpLogOutput = open(self.ephemeral.logPathOutput, 'w')
elif parameterName == "logPathOutputDense":
self.ephemeral.logPathOutputDense = parameterValue
# Close any existing log file
if self.ephemeral._fpLogOutputDense:
self.ephemeral._fpLogOutputDense.close()
self.ephemeral._fpLogOutputDense = None
# Open a new log file
if parameterValue:
self.ephemeral._fpLogOutputDense = open(self.ephemeral.logPathOutputDense, 'w')
else:
raise Exception('Unknown parameter: ' + parameterName)
return
#############################################################################
#
# Commands
#
#############################################################################
def setIdentityPolicyInstance(self, identityPolicyObj):
"""TestRegion command that sets identity policy instance. The instance
MUST be derived from TestRegion's RegionIdentityPolicyBase class.
Users MUST set the identity instance BEFORE running the network
Exception: AssertionError if identity policy instance has already been set
or if the passed-in instance is not derived from
RegionIdentityPolicyBase.
"""
assert not self.identityPolicy
assert isinstance(identityPolicyObj, RegionIdentityPolicyBase)
self.identityPolicy = identityPolicyObj
return
def getIdentityPolicyInstance(self):
"""TestRegion command that returns the identity policy instance that was
associated with this TestRegion instance via setIdentityPolicyInstance().
Returns: a RegionIdentityPolicyBase-based instance that was associated with
this TestRegion intstance.
Exception: AssertionError if no identity policy instance has been set.
"""
assert self.identityPolicy
return self.identityPolicy
#############################################################################
#
# Methods to support serialization
#
#############################################################################
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
# Don't serialize ephemeral data
state.pop('ephemeral')
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
assert 'ephemeral' not in state
self.__dict__.update(state)
# Initialize our ephemeral member variables
self.__constructEphemeralInstanceVars()
return
#############################################################################
#
# Debugging support code
#
#############################################################################
def _conditionalBreak(self):
if self.breakKomodo:
import dbgp.client; dbgp.client.brk()
if self.breakPdb:
import pdb; pdb.set_trace()
return
g_debug = True
def _debugOut(msg):
import sys
global g_debug
if g_debug:
callerTraceback = whois_callers_caller()
print "TEST_REGION (f={0!s};line={1!s}): {2!s}".format(callerTraceback.function, callerTraceback.lineno, msg)
sys.stdout.flush()
return
def whois_callers_caller():
"""
Returns: Traceback namedtuple for our caller's caller
"""
import inspect
frameObj = inspect.stack()[2][0]
return inspect.getframeinfo(frameObj)
|
runt18/nupic
|
src/nupic/regions/TestRegion.py
|
Python
|
agpl-3.0
| 15,152 | 0.011022 |
import unittest
import os, sys, imp
from qgis import utils
from qgis.core import QgsVectorLayer, QgsField, QgsProject, QGis
from qgis.PyQt.QtCore import QVariant
from .qgis_models import set_up_interface
from mole3.qgisinteraction import layer_interaction as li
from mole3.qgisinteraction import plugin_interaction as pi
from mole3.tests.qgis_models import HybridLayer
class PstPluginInteractionTest(unittest.TestCase):
def create_layer_with_features(self, name, type='Polygon'):
v_layer_name = li.biuniquify_layer_name(name)
if type == 'Point':
v_layer = QgsVectorLayer('{}?crs=EPSG:3857'.format(type), v_layer_name, 'memory', False)
else:
v_layer = HybridLayer(type, v_layer_name)
provider = v_layer.dataProvider()
v_layer.startEditing()
attributes = [QgsField('COLOR_RED', QVariant.String),
QgsField('COLOR_GRE', QVariant.String),
QgsField('COLOR_BLU', QVariant.String),
QgsField('COLOR_ALP', QVariant.String)]
provider.addAttributes(attributes)
v_layer.commitChanges()
return v_layer
def add_pointsamplingtool_to_plugins(self):
plugin_folder = os.path.join(utils.plugin_paths[0], 'pointsamplingtool', '__init__.py')
self.assertTrue(os.path.exists(str(plugin_folder)), 'Path to plugin not found. ({})'.format(str(plugin_folder)))
sys.modules['pointsamplingtool'] = imp.load_source('pointsamplingtool', plugin_folder)
def setUp(self):
self.qgis_app, self.canvas, self.iface = set_up_interface()
utils.plugin_paths = [os.path.expanduser('~/.qgis2/python/plugins')]
utils.updateAvailablePlugins()
utils.loadPlugin('pointsamplingtool')
utils.iface = self.iface
utils.startPlugin('pointsamplingtool')
def tearDown(self):
if self.qgis_app is not None:
del(self.qgis_app)
def test_if_plugin_is_available(self):
self.assertNotEqual(utils.available_plugins, [], 'No plugins were loaded.')
self.assertIn('pointsamplingtool', utils.available_plugins)
def test_if_plugin_is_accessible(self):
self.add_pointsamplingtool_to_plugins()
psti = pi.PstInteraction(utils.iface)
self.assertIsNotNone(psti)
def test_if_all_fields_are_selected(self):
self.add_pointsamplingtool_to_plugins()
registry = QgsProject.instance()
point_layer = self.create_layer_with_features('point', 'Point')
poly_layer1 = self.create_layer_with_features('poly1')
poly_layer2 = self.create_layer_with_features('poly2')
registry.addMapLayer(point_layer)
registry.addMapLayer(poly_layer1)
registry.addMapLayer(poly_layer2)
psti = pi.PstInteraction(utils.iface)
psti.set_input_layer(point_layer.name())
selected_fields = psti.pst_dialog.fieldsTable
psti.select_and_rename_files_for_sampling()
fields_point = point_layer.dataProvider().fields()
fields_poly1 = poly_layer1.dataProvider().fields()
fields_poly2 = poly_layer2.dataProvider().fields()
rows_expected = fields_point.count() + fields_poly1.count() + fields_poly2.count()
self.assertEqual(selected_fields.rowCount(), rows_expected)
def test_if_field_names_are_unique(self):
self.add_pointsamplingtool_to_plugins()
registry = QgsProject.instance()
point_layer = self.create_layer_with_features('test_pointlayer', 'Point')
poly_layer1 = self.create_layer_with_features('test_polygonlayer1')
poly_layer2 = self.create_layer_with_features('test_polygonlayer2')
registry.addMapLayer(point_layer)
registry.addMapLayer(poly_layer1)
registry.addMapLayer(poly_layer2)
psti = pi.PstInteraction(utils.iface)
psti.set_input_layer(point_layer.name())
map = psti.select_and_rename_files_for_sampling()
appendix = ['R', 'G', 'B', 'a']
poly_fields = psti.pst_dialog.rastItems[poly_layer1.name()]
for i in range(1, len(poly_fields)):
self.assertEqual(poly_fields[i][1], '01{}_{}'.format(poly_layer1.name()[:6], appendix[i-1]))
poly_fields = psti.pst_dialog.rastItems[poly_layer2.name()]
for i in range(1, len(poly_fields)):
self.assertEqual(poly_fields[i][1], '02{}_{}'.format(poly_layer1.name()[:6], appendix[i-1]))
self.assertEqual(map[poly_layer1.name()], '01{}'.format(poly_layer1.name()[:6]))
self.assertEqual(map[poly_layer2.name()], '02{}'.format(poly_layer2.name()[:6]))
if __name__ == '__main__':
unittest.main()
|
UdK-VPT/Open_eQuarter
|
mole3/tests/plugin_interaction_test.py
|
Python
|
gpl-2.0
| 4,712 | 0.002971 |
# the problem described below was fixed in 9758!
# keep_htpsit=False fails since 9473,
# on some installations (?) with:
# case A (see below in the code):
# RuntimeError: Could not locate the Fermi level!
# or the energies from the 2nd one behave strange, no convergence:
# iter: 1 18:21:49 +1.7 -3608.512512 0 19
# iter: 2 18:22:31 +1.9 -3148.936317 0
# iter: 3 18:23:13 +2.1 -2375.137532 0
# iter: 4 18:23:58 +2.4 -0.9 -1040.851545 216 11
# iter: 5 18:24:43 +2.6 -1.0 822.569589 597 14
# case B (see below in the code):
# No convergence when starting from a converged (keep_htpsit=True) run!
# WFS error grows to positive values!
# Is it an extreme case of https://trac.fysik.dtu.dk/projects/gpaw/ticket/51 ?
import os
import sys
from ase import Atoms
from gpaw import GPAW
from gpaw import ConvergenceError
from gpaw.mpi import rank
from gpaw.eigensolvers.rmm_diis_old import RMM_DIIS
from gpaw import setup_paths
if len(sys.argv) == 1:
run = 'A'
else:
run = sys.argv[1]
assert run in ['A', 'B']
# Use setups from the $PWD and $PWD/.. first
setup_paths.insert(0, '.')
setup_paths.insert(0, '../')
positions=[
(-0.069, 0.824,-1.295), ( 0.786, 0.943,-0.752), (-0.414,-0.001,-0.865),
(-0.282,-0.674,-3.822), ( 0.018,-0.147,-4.624), (-0.113,-0.080,-3.034),
( 2.253, 1.261, 0.151), ( 2.606, 0.638,-0.539), ( 2.455, 0.790, 1.019),
( 3.106,-0.276,-1.795), ( 2.914, 0.459,-2.386), ( 2.447,-1.053,-1.919),
( 6.257,-0.625,-0.626), ( 7.107,-1.002,-0.317), ( 5.526,-1.129,-0.131),
( 5.451,-1.261,-2.937), ( 4.585,-0.957,-2.503), ( 6.079,-0.919,-2.200),
(-0.515, 3.689, 0.482), (-0.218, 3.020,-0.189), ( 0.046, 3.568, 1.382),
(-0.205, 2.640,-3.337), (-1.083, 2.576,-3.771), (-0.213, 1.885,-2.680),
( 0.132, 6.301,-0.278), ( 1.104, 6.366,-0.068), (-0.148, 5.363,-0.112),
(-0.505, 6.680,-3.285), (-0.674, 7.677,-3.447), (-0.965, 6.278,-2.517),
( 4.063, 3.342,-0.474), ( 4.950, 2.912,-0.663), ( 3.484, 2.619,-0.125),
( 2.575, 2.404,-3.170), ( 1.694, 2.841,-3.296), ( 3.049, 2.956,-2.503),
( 6.666, 2.030,-0.815), ( 7.476, 2.277,-0.316), ( 6.473, 1.064,-0.651),
( 6.860, 2.591,-3.584), ( 6.928, 3.530,-3.176), ( 6.978, 2.097,-2.754),
( 2.931, 6.022,-0.243), ( 3.732, 6.562,-0.004), ( 3.226, 5.115,-0.404),
( 2.291, 7.140,-2.455), ( 1.317, 6.937,-2.532), ( 2.586, 6.574,-1.669),
( 6.843, 5.460, 1.065), ( 7.803, 5.290, 0.852), ( 6.727, 5.424, 2.062),
( 6.896, 4.784,-2.130), ( 6.191, 5.238,-2.702), ( 6.463, 4.665,-1.259),
( 0.398, 0.691, 4.098), ( 0.047, 1.567, 3.807), ( 1.268, 0.490, 3.632),
( 2.687, 0.272, 2.641), ( 3.078, 1.126, 3.027), ( 3.376,-0.501, 2.793),
( 6.002,-0.525, 4.002), ( 6.152, 0.405, 3.660), ( 5.987,-0.447, 4.980),
( 0.649, 3.541, 2.897), ( 0.245, 4.301, 3.459), ( 1.638, 3.457, 3.084),
(-0.075, 5.662, 4.233), (-0.182, 6.512, 3.776), (-0.241, 5.961, 5.212),
( 3.243, 2.585, 3.878), ( 3.110, 2.343, 4.817), ( 4.262, 2.718, 3.780),
( 5.942, 2.582, 3.712), ( 6.250, 3.500, 3.566), ( 6.379, 2.564, 4.636),
( 2.686, 5.638, 5.164), ( 1.781, 5.472, 4.698), ( 2.454, 6.286, 5.887),
( 6.744, 5.276, 3.826), ( 6.238, 5.608, 4.632), ( 7.707, 5.258, 4.110),
( 8.573, 8.472, 0.407), ( 9.069, 7.656, 0.067), ( 8.472, 8.425, 1.397),
( 8.758, 8.245, 2.989), ( 9.294, 9.091, 3.172), ( 7.906, 8.527, 3.373),
( 4.006, 7.734, 3.021), ( 4.685, 8.238, 3.547), ( 3.468, 7.158, 3.624),
( 5.281, 6.089, 6.035), ( 5.131, 7.033, 6.378), ( 4.428, 5.704, 5.720),
( 5.067, 7.323, 0.662), ( 5.785, 6.667, 0.703), ( 4.718, 7.252, 1.585)]
prefix = 'b256H2O'
L = 9.8553729
atoms = Atoms('32(OH2)',
positions=positions)
atoms.set_cell((L,L,L),scale_atoms=False)
atoms.set_pbc(1)
r = [1, 1, 2]
atoms = atoms.repeat(r)
n = [56 * ri for ri in r]
# nbands (>=128) is the number of bands per 32 water molecules
nbands = 2*6*11 # 132
for ri in r: nbands = nbands*ri
# the next line decreases memory usage
es = RMM_DIIS(keep_htpsit=False)
calc = GPAW(nbands=nbands,
# uncomment next two lines to use lcao/sz
#mode='lcao',
#basis='sz',
gpts=tuple(n),
#maxiter=5,
width = 0.01,
eigensolver = es,
txt=prefix + '.txt',
)
if run == 'A':
atoms.set_calculator(calc)
pot = atoms.get_potential_energy()
elif run == 'B':
# converge first with keep_htpsit=True
calc.set(eigensolver='rmm-diis')
calc.set(txt=prefix + '_True.txt')
atoms.set_calculator(calc)
pot = atoms.get_potential_energy()
# fails to converge with keep_htpsit=False
calc.set(eigensolver=es)
calc.set(maxiter=200)
calc.set(txt=prefix + '_False.txt')
atoms.set_calculator(calc)
pot = atoms.get_potential_energy()
|
robwarm/gpaw-symm
|
gpaw/test/big/scf/b256H2O/b256H2O.py
|
Python
|
gpl-3.0
| 4,905 | 0.031804 |
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from config import config
from flask.ext.redis import Redis
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
redis1 = Redis()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
app.config['REDIS_HOST'] = 'localhost'
app.config['REDIS_PORT'] = 6379
app.config['REDIS_DB'] = 0
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
redis1.init_app(app)
from .main import main as main_blueprint
# from .main.common import common
app.register_blueprint(main_blueprint)
# app.register_blueprint(common)
return app
|
simonqiang/gftest
|
app/__init__.py
|
Python
|
mit
| 888 | 0.003378 |
# -*- coding: utf-8 -*-
from .base import WatershedBEM
|
carolFrohlich/nipype
|
nipype/interfaces/mne/__init__.py
|
Python
|
bsd-3-clause
| 55 | 0 |
"""
A tool for converting kv6 models into pmf.
GreaseMonkey, 2013 - Public Domain
WARNING: I haven't checked to ensure that X,Y are around the right way.
If you find your models have been flipped inadvertently, let me know! --GM
"""
from __future__ import print_function
import sys, struct
# Backwards compatibility - make new code work on old version, not vice-versa
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
# This script didn't use range() anyway, so no problem overwriting it in Py2
import __builtin__
range = getattr(__builtin__, "xrange")
_ord = ord
else:
_ord = lambda x: x
USAGE_MSG = """
usage:
python2 kv62pmf.py in.kv6 out.pmf ptsize ptspacing bonename
"""
if len(sys.argv) <= 4:
print(USAGE_MSG)
exit()
if not sys.argv[3].isdigit():
raise Exception("expected a number for the 3rd argument")
if not sys.argv[4].isdigit():
raise Exception("expected a number for the 4th argument")
ptsize = int(sys.argv[3])
ptspacing = int(sys.argv[4])
if ptsize < 1 or ptsize > 65535:
raise Exception("point size out of range (1..65535)")
bonename = sys.argv[4]
if PY3:
bonename = bonename.encode()
if len(bonename) > 15:
raise Exception("bone name too large")
infp = open(sys.argv[1],"rb")
if infp.read(4) != b"Kvxl":
raise Exception("not a KV6 file")
xsiz, ysiz, zsiz, xpivot, ypivot, zpivot, blklen = struct.unpack("<IIIfffI", infp.read(28))
print(xsiz, ysiz, zsiz, xpivot, ypivot, zpivot)
xpivot = int(xpivot*ptspacing+0.5)
ypivot = int(ypivot*ptspacing+0.5)
zpivot = int(zpivot*ptspacing+0.5)
# yeah i know this is basically worst case assuming x,y,z pivot is within the model bounds
if max(max(xsiz,ysiz),zsiz)*ptspacing > 65535:
raise Exception("point size a bit TOO large to fit into a pmf")
if blklen > 4096:
raise Exception("kv6 has too many blocks to fit into a pmf")
def parseblk(s):
return struct.unpack("<BBBBHBB",s)
blkdata = [parseblk(infp.read(8)) for i in range(blklen)]
xoffset = [struct.unpack("<I", infp.read(4))[0] for i in range(xsiz)]
xyoffset = [struct.unpack("<H", infp.read(2))[0] for i in range(xsiz*ysiz)]
assert blklen == sum(xoffset)
assert blklen == sum(xyoffset)
# Corollary: sum(xoffset) == sum(xyoffset)
# Proof: Left as an exercise to the reader.
magic_spal = infp.read(4)
palette = None
if magic_spal == b"":
pass # no palette
elif magic_spal == b"SPal":
palette = [[_ord(v) for v in infp.read(3)] for i in range(256)]
else:
raise Exception("expected palette at end of file")
infp.close()
#
#
#
# pretty simple really
outfp = open(sys.argv[2], "wb")
# start with the header of "PMF",0x1A,1,0,0,0
outfp.write(b"PMF\x1A\x01\x00\x00\x00")
# then there's a uint32_t denoting how many body parts there are
outfp.write(struct.pack("<I",1))
# then, for each body part,
# there's a null-terminated 16-byte string (max 15 chars) denoting the part
outfp.write(bonename + b"\x00"*(16-len(bonename)))
# then there's a uint32_t denoting how many points there are in this body part
outfp.write(struct.pack("<I",blklen))
# then there's a whole bunch of this:
# uint16_t radius;
# int16_t x,y,z;
# uint8_t b,g,r,reserved;
bi = 0
oi = 0
for cx in range(xsiz):
for cy in range(ysiz):
for i in range(xyoffset[oi]):
b,g,r,l,ypos,vis,unk1 = blkdata[bi]
outfp.write(struct.pack("<HhhhBBBB"
,ptsize
,cx*ptspacing-xpivot
,ypos*ptspacing-zpivot
,cy*ptspacing-ypivot
,b,g,r,0))
bi += 1
oi += 1
# rinse, lather, repeat
outfp.close()
|
fkaa/iceball
|
tools/kv62pmf.py
|
Python
|
gpl-3.0
| 3,453 | 0.019983 |
from mrjob.job import MRJob
from mrjob.step import MRStep
def get_id_from_line(line):
if line.find('.","Message-ID: <') > 0:
start = line.find("Message-ID")+13
i=0
for char in line[start:]:
i=i+1
if (not (char.isdigit() or (char == '.'))):
stop = i+start-2
break
return line[start:stop]
class MRMultilineInput(MRJob):
def steps(self):
return [
MRStep(mapper_init=self.mapper_init_count,
mapper=self.mapper_count),
MRStep(mapper=self.mapper_child)
# STEP 1
def mapper_init_count(self):
self.message_id = ''
self.in_body = False
self.body = []
self.after_key = False
self.beginning = False
self.key = False
def mapper_count(self, _, line):
line = line.strip()
if (line.find('.","Message-ID: <') > 0) and self.in_body and not self.beginning:
yield self.message_id, self.body
self.message_id = ''
self.body = []
self.in_body = False
self.after_key = False
self.beginning = False
self.key = False
if self.in_body and not self.after_key:
self.beginning = False
self.body.append(line)
if line.find('.","Message-ID: <') > 0 and not self.key:
if not self.in_body:
self.in_body = True
self.beginning = True
self.after_key = True
self.key = True
start = line.find("Message-ID")+13
i=0
for char in line[start:]:
i=i+1
if (not (char.isdigit() or (char == '.'))):
stop = i+start-2
break
self.message_id = line[start:stop]
self.after_key = False
# STEP 2
def mapper_child(self, message_id, values):
clean_body = ''
clean_date = ''
clean_from = ''
clean_to = ''
clean_values = []
start = 0
for idx, line in enumerate(values):
if "Date:" in line:
clean_date = line[5:].strip()
if line.find("From:") == 0:
clean_from = line[5:].strip()
if line.find("To:") == 0:
clean_to = line[3:].strip()
if "X-FileName:" in line:
start = idx+1
break
for i in range(start,len(values)):
if "-Original Message-" in values[i]:
break
clean_body=clean_body + values[i] + " "
clean_values.append(clean_date)
clean_values.append(clean_from)
#clean_values.append(clean_to)
#clean_values.append(clean_body.strip())
clean_values.append("TEST BODY")
newval = values
for element in values:
if "subject:" in element.lower():
subject = element
break
if "re:" in subject.lower():
newval.append("child")
elif "fw:" not in subject.lower():
newval.append("parent")
for element in newval:
if "Subject:" in element:
subject = element
break
relation = values[-1]
i = 0
colon = 0
if "<" not in subject:
for char in subject:
i=i+1
if char == ":":
colon = i
sub = subject[colon+1:].strip()
sub_relation = []
sub_relation.append(sub)
sub_relation.append(relation)
yield sub_relation, (message_id,clean_values)
if __name__ == '__main__':
MRMultilineInput.run()
|
tokamstud/enron-analysis
|
src/complex/hive_prep.py
|
Python
|
gpl-3.0
| 2,895 | 0.071157 |
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
import venv # type: ignore
import zipfile
from typing import Dict
from argparse import ArgumentParser
from dataclasses import dataclass
from pathlib import Path
from urllib.request import urlopen
from typing import Optional, Iterator, Tuple, List, Iterable
HOMEBREW_PYTHON = (3, 8)
# This should match the pattern in .bumpversion.cfg
VERSION_PATTERN = re.compile(
r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'
r'((?P<prerelease>[a-z]+)(?P<num>\d+))?'
)
class Version:
def __init__(self, raw: str) -> None:
self.raw = raw
match = VERSION_PATTERN.match(self.raw)
assert match is not None, f'Invalid version: {self.raw}'
groups = match.groupdict()
self.major: int = int(groups['major'])
self.minor: int = int(groups['minor'])
self.patch: int = int(groups['patch'])
self.prerelease: Optional[str] = None
self.num: Optional[int] = None
if groups['num'] is not None:
self.prerelease = groups['prerelease']
self.num = int(groups['num'])
def __str__(self):
return self.raw
def homebrew_class_name(self) -> str:
name = f'DbtAT{self.major}{self.minor}{self.patch}'
if self.prerelease is not None and self.num is not None:
name = f'{name}{self.prerelease.title()}{self.num}'
return name
def homebrew_filename(self):
version_str = f'{self.major}.{self.minor}.{self.patch}'
if self.prerelease is not None and self.num is not None:
version_str = f'{version_str}-{self.prerelease}{self.num}'
return f'dbt@{version_str}.rb'
@dataclass
class Arguments:
version: Version
part: str
path: Path
homebrew_path: Path
homebrew_set_default: bool
set_version: bool
build_pypi: bool
upload_pypi: bool
test_upload: bool
build_homebrew: bool
build_docker: bool
upload_docker: bool
write_requirements: bool
write_dockerfile: bool
@classmethod
def parse(cls) -> 'Arguments':
parser = ArgumentParser(
prog="Bump dbt's version, build packages"
)
parser.add_argument(
'version',
type=Version,
help="The version to set",
)
parser.add_argument(
'part',
type=str,
help="The part of the version to update",
)
parser.add_argument(
'--path',
type=Path,
help='The path to the dbt repository',
default=Path.cwd(),
)
parser.add_argument(
'--homebrew-path',
type=Path,
help='The path to the dbt homebrew install',
default=(Path.cwd() / '../homebrew-dbt'),
)
parser.add_argument(
'--homebrew-set-default',
action='store_true',
help='If set, make this homebrew version the default',
)
parser.add_argument(
'--no-set-version',
dest='set_version',
action='store_false',
help='Skip bumping the version',
)
parser.add_argument(
'--no-build-pypi',
dest='build_pypi',
action='store_false',
help='skip building pypi',
)
parser.add_argument(
'--no-build-docker',
dest='build_docker',
action='store_false',
help='skip building docker images',
)
parser.add_argument(
'--no-upload-docker',
dest='upload_docker',
action='store_false',
help='skip uploading docker images',
)
uploading = parser.add_mutually_exclusive_group()
uploading.add_argument(
'--upload-pypi',
dest='force_upload_pypi',
action='store_true',
help='upload to pypi even if building is disabled'
)
uploading.add_argument(
'--no-upload-pypi',
dest='no_upload_pypi',
action='store_true',
help='skip uploading to pypi',
)
parser.add_argument(
'--no-upload',
dest='test_upload',
action='store_false',
help='Skip uploading to pypitest',
)
parser.add_argument(
'--no-build-homebrew',
dest='build_homebrew',
action='store_false',
help='Skip building homebrew packages',
)
parser.add_argument(
'--no-write-requirements',
dest='write_requirements',
action='store_false',
help='Skip writing the requirements file. It must exist.'
)
parser.add_argument(
'--no-write-dockerfile',
dest='write_dockerfile',
action='store_false',
help='Skip writing the dockerfile. It must exist.'
)
parsed = parser.parse_args()
upload_pypi = parsed.build_pypi
if parsed.force_upload_pypi:
upload_pypi = True
elif parsed.no_upload_pypi:
upload_pypi = False
return cls(
version=parsed.version,
part=parsed.part,
path=parsed.path,
homebrew_path=parsed.homebrew_path,
homebrew_set_default=parsed.homebrew_set_default,
set_version=parsed.set_version,
build_pypi=parsed.build_pypi,
upload_pypi=upload_pypi,
test_upload=parsed.test_upload,
build_homebrew=parsed.build_homebrew,
build_docker=parsed.build_docker,
upload_docker=parsed.upload_docker,
write_requirements=parsed.write_requirements,
write_dockerfile=parsed.write_dockerfile,
)
def collect_output(cmd, cwd=None, stderr=subprocess.PIPE) -> str:
try:
result = subprocess.run(
cmd, cwd=cwd, check=True, stdout=subprocess.PIPE, stderr=stderr
)
except subprocess.CalledProcessError as exc:
print(f'Command {exc.cmd} failed')
if exc.output:
print(exc.output.decode('utf-8'))
if exc.stderr:
print(exc.stderr.decode('utf-8'), file=sys.stderr)
raise
return result.stdout.decode('utf-8')
def run_command(cmd, cwd=None) -> None:
result = collect_output(cmd, stderr=subprocess.STDOUT, cwd=cwd)
print(result)
def set_version(path: Path, version: Version, part: str):
# bumpversion --commit --no-tag --new-version "${version}" "${port}"
cmd = [
'bumpversion', '--commit', '--no-tag', '--new-version',
str(version), part
]
print(f'bumping version to {version}')
run_command(cmd, cwd=path)
print(f'bumped version to {version}')
class PypiBuilder:
_SUBPACKAGES = (
'core',
'plugins/postgres',
'plugins/redshift',
'plugins/bigquery',
'plugins/snowflake',
)
def __init__(self, dbt_path: Path):
self.dbt_path = dbt_path
@staticmethod
def _dist_for(path: Path, make=False) -> Path:
dist_path = path / 'dist'
if dist_path.exists():
shutil.rmtree(dist_path)
if make:
os.makedirs(dist_path)
build_path = path / 'build'
if build_path.exists():
shutil.rmtree(build_path)
return dist_path
@staticmethod
def _build_pypi_package(path: Path):
print(f'building package in {path}')
cmd = ['python', 'setup.py', 'sdist', 'bdist_wheel']
run_command(cmd, cwd=path)
print(f'finished building package in {path}')
@staticmethod
def _all_packages_in(path: Path) -> Iterator[Path]:
path = path / 'dist'
for pattern in ('*.tar.gz', '*.whl'):
yield from path.glob(pattern)
def _build_subpackage(self, name: str) -> Iterator[Path]:
subpath = self.dbt_path / name
self._dist_for(subpath)
self._build_pypi_package(subpath)
return self._all_packages_in(subpath)
def build(self):
print('building pypi packages')
dist_path = self._dist_for(self.dbt_path)
sub_pkgs: List[Path] = []
for path in self._SUBPACKAGES:
sub_pkgs.extend(self._build_subpackage(path))
# now build the main package
self._build_pypi_package(self.dbt_path)
# now copy everything from the subpackages in
for package in sub_pkgs:
shutil.copy(str(package), dist_path)
print('built pypi packages')
def upload(self, *, test=True):
cmd = ['twine', 'check']
cmd.extend(str(p) for p in self._all_packages_in(self.dbt_path))
run_command(cmd)
cmd = ['twine', 'upload']
if test:
cmd.extend(['--repository', 'pypitest'])
cmd.extend(str(p) for p in self._all_packages_in(self.dbt_path))
print('uploading packages: {}'.format(' '.join(cmd)))
run_command(cmd)
print('uploaded packages')
class PipInstaller(venv.EnvBuilder):
def __init__(self, packages: List[str]) -> None:
super().__init__(with_pip=True)
self.packages = packages
def post_setup(self, context):
# we can't run from the dbt directory or this gets all weird, so
# install from an empty temp directory and then remove it.
tmp = tempfile.mkdtemp()
cmd = [context.env_exe, '-m', 'pip', 'install', '--upgrade']
cmd.extend(self.packages)
print(f'installing {self.packages}')
try:
run_command(cmd, cwd=tmp)
finally:
os.rmdir(tmp)
print(f'finished installing {self.packages}')
def create(self, venv_path):
os.makedirs(venv_path.parent, exist_ok=True)
if venv_path.exists():
shutil.rmtree(venv_path)
return super().create(venv_path)
def _require_wheels(dbt_path: Path) -> List[Path]:
dist_path = dbt_path / 'dist'
wheels = list(dist_path.glob('*.whl'))
if not wheels:
raise ValueError(
f'No wheels found in {dist_path} - run scripts/build-wheels.sh'
)
return wheels
class DistFolderEnv(PipInstaller):
def __init__(self, dbt_path: Path) -> None:
self.wheels = _require_wheels(dbt_path)
super().__init__(packages=self.wheels)
class HomebrewVirtualenv(PipInstaller):
def __init__(self, dbt_version: Version) -> None:
super().__init__([f'dbt=={dbt_version}'])
@dataclass
class HomebrewDependency:
name: str
url: str
sha256: str
version: str
def render(self, indent: int = 2) -> str:
result = textwrap.dedent(f'''\
resource "{self.name}" do # {self.name}=={self.version}
url "{self.url}"
sha256 "{self.sha256}"
end
''')
return textwrap.indent(result, ' '*indent)
def __str__(self) -> str:
return self.render(indent=0)
@dataclass
class HomebrewTemplate:
url_data: str
hash_data: str
dependencies: List[HomebrewDependency]
def _make_venv_at(root: Path, name: str, builder: venv.EnvBuilder):
venv_path = root / name
os.makedirs(root, exist_ok=True)
if venv_path.exists():
shutil.rmtree(venv_path)
builder.create(venv_path)
return venv_path
class HomebrewBuilder:
def __init__(
self,
dbt_path: Path,
version: Version,
homebrew_path: Path,
set_default: bool,
) -> None:
self.dbt_path = dbt_path
self.version = version
self.homebrew_path = homebrew_path
self.set_default = set_default
self._template: Optional[HomebrewTemplate] = None
def make_venv(self) -> HomebrewVirtualenv:
env = HomebrewVirtualenv(self.version)
max_attempts = 10
for attempt in range(1, max_attempts+1):
# after uploading to pypi, it can take a few minutes for installing
# to work. Retry a few times...
try:
env.create(self.homebrew_venv_path)
return
except subprocess.CalledProcessError:
if attempt == max_attempts:
raise
else:
print(
f'installation failed - waiting 60s for pypi to see '
f'the new version (attempt {attempt}/{max_attempts})'
)
time.sleep(60)
return env
@property
def versioned_formula_path(self) -> Path:
return (
self.homebrew_path / 'Formula' / self.version.homebrew_filename()
)
@property
def default_formula_path(self) -> Path:
return (
self.homebrew_path / 'Formula/dbt.rb'
)
@property
def homebrew_venv_path(self) -> Path:
return self.dbt_path / 'build' / 'homebrew-venv'
@staticmethod
def _dbt_homebrew_formula_fmt() -> str:
return textwrap.dedent('''\
class {formula_name} < Formula
include Language::Python::Virtualenv
desc "Data build tool"
homepage "https://github.com/fishtown-analytics/dbt"
url "{url_data}"
sha256 "{hash_data}"
revision 1
bottle do
root_url "http://bottles.getdbt.com"
# bottle hashes + versions go here
end
depends_on "openssl@1.1"
depends_on "postgresql"
depends_on "python"
{dependencies}
{trailer}
end
''')
@staticmethod
def _dbt_homebrew_trailer() -> str:
dedented = textwrap.dedent('''\
def install
venv = virtualenv_create(libexec, "python3")
res = resources.map(&:name).to_set
res.each do |r|
venv.pip_install resource(r)
end
venv.pip_install_and_link buildpath
bin.install_symlink "#{libexec}/bin/dbt" => "dbt"
end
test do
(testpath/"dbt_project.yml").write(
"{name: 'test', version: '0.0.1', profile: 'default'}",
)
(testpath/".dbt/profiles.yml").write(
"{default: {outputs: {default: {type: 'postgres', threads: 1,
host: 'localhost', port: 5432, user: 'root', pass: 'password',
dbname: 'test', schema: 'test'}}, target: 'default'}}",
)
(testpath/"models/test.sql").write("select * from test")
system "#{bin}/dbt", "test"
end''')
return textwrap.indent(dedented, ' ')
def get_formula_data(
self, versioned: bool = True
) -> str:
fmt = self._dbt_homebrew_formula_fmt()
trailer = self._dbt_homebrew_trailer()
if versioned:
formula_name = self.version.homebrew_class_name()
else:
formula_name = 'Dbt'
dependencies_str = '\n'.join(
d.render() for d in self.template.dependencies
)
return fmt.format(
formula_name=formula_name,
version=self.version,
url_data=self.template.url_data,
hash_data=self.template.hash_data,
dependencies=dependencies_str,
trailer=trailer,
)
@property
def template(self) -> HomebrewTemplate:
if self._template is None:
self.make_venv()
print('done setting up virtualenv')
dependencies = []
dbt_package = None
for pkg in self._get_packages():
if pkg.name == 'dbt':
if pkg.version != str(self.version):
raise ValueError(
f'Found an invalid dbt=={pkg.version}, '
f'expected dbt=={self.version}'
)
dbt_package = pkg
else:
# we can assume that anything starting with dbt- in a fresh
# venv is a dbt package, I hope
if pkg.name.startswith('dbt-'):
if pkg.version != str(self.version):
raise ValueError(
f'Found an invalid {pkg.name}=={pkg.version}, '
f'expected {pkg.name}=={self.version}'
)
dependencies.append(pkg)
if dbt_package is None:
raise RuntimeError(
'never found dbt in "pip freeze -l" output'
)
template = HomebrewTemplate(
url_data=dbt_package.url,
hash_data=dbt_package.sha256,
dependencies=dependencies,
)
self._template = template
else:
template = self._template
return template
def _get_pypi_info(self, pkg: str, version: str) -> Tuple[str, str]:
fp = urlopen(f'https://pypi.org/pypi/{pkg}/{version}/json')
try:
data = json.load(fp)
finally:
fp.close()
assert 'urls' in data
for pkginfo in data['urls']:
assert 'packagetype' in pkginfo
if pkginfo['packagetype'] == 'sdist':
assert 'url' in pkginfo
assert 'digests' in pkginfo
assert 'sha256' in pkginfo['digests']
url = pkginfo['url']
sha256 = pkginfo['digests']['sha256']
return url, sha256
raise ValueError(f'Never got a valid sdist for {pkg}=={version}')
def _get_packages(self) -> Iterator[HomebrewDependency]:
pip = self.homebrew_venv_path / 'bin/pip'
cmd = [pip, 'freeze', '-l']
raw = collect_output(cmd).split('\n')
for line in raw:
if not line:
continue
parts = line.split('==')
if len(parts) != 2:
raise ValueError(
f'Could not parse pip freeze output line: {line}'
)
name, version = parts
url, sha256 = self._get_pypi_info(name, version)
dep = HomebrewDependency(
name=name, url=url, sha256=sha256, version=version
)
yield dep
def _remove_dbt_resource(self, lines: List[str]) -> Iterator[str]:
# TODO: fork poet or extract the good bits to avoid this
line_iter = iter(lines)
# don't do a double-newline or "brew audit" gets mad
for line in line_iter:
# skip the contents of the "dbt" resource block.
if line.strip() == 'resource "dbt" do':
for skip in line_iter:
if skip.strip() == 'end':
# skip the newline after 'end'
next(line_iter)
break
else:
yield line
def create_versioned_formula_file(self):
formula_contents = self.get_formula_data(versioned=True)
if self.versioned_formula_path.exists():
print('Homebrew formula path already exists, overwriting')
self.versioned_formula_path.write_text(formula_contents)
def commit_versioned_formula(self):
# add a commit for the new formula
run_command(
['git', 'add', self.versioned_formula_path],
cwd=self.homebrew_path
)
run_command(
['git', 'commit', '-m', f'add dbt@{self.version}'],
cwd=self.homebrew_path
)
def commit_default_formula(self):
run_command(
['git', 'add', self.default_formula_path],
cwd=self.homebrew_path
)
run_command(
['git', 'commit', '-m', f'upgrade dbt to {self.version}'],
cwd=self.homebrew_path
)
@staticmethod
def run_tests(formula_path: Path, audit: bool = True):
path = os.path.normpath(formula_path)
run_command(['brew', 'uninstall', '--force', path])
versions = [
l.strip() for l in
collect_output(['brew', 'list']).split('\n')
if l.strip().startswith('dbt@') or l.strip() == 'dbt'
]
if versions:
run_command(['brew', 'unlink'] + versions)
run_command(['brew', 'install', path])
run_command(['brew', 'test', path])
if audit:
run_command(['brew', 'audit', '--strict', path])
def create_default_package(self):
os.remove(self.default_formula_path)
formula_contents = self.get_formula_data(versioned=False)
self.default_formula_path.write_text(formula_contents)
def build(self):
self.create_versioned_formula_file()
# self.run_tests(formula_path=self.versioned_formula_path)
self.commit_versioned_formula()
if self.set_default:
self.create_default_package()
# self.run_tests(formula_path=self.default_formula_path, audit=False)
self.commit_default_formula()
class WheelInfo:
def __init__(self, path):
self.path = path
@staticmethod
def _extract_distinfo_path(wfile: zipfile.ZipFile) -> zipfile.Path:
zpath = zipfile.Path(root=wfile)
for path in zpath.iterdir():
if path.name.endswith('.dist-info'):
return path
raise ValueError('Wheel with no dist-info?')
def get_metadata(self) -> Dict[str, str]:
with zipfile.ZipFile(self.path) as wf:
distinfo = self._extract_distinfo_path(wf)
metadata = distinfo / 'METADATA'
metadata_dict: Dict[str, str] = {}
for line in metadata.read_text().split('\n'):
parts = line.split(': ', 1)
if len(parts) == 2:
metadata_dict[parts[0]] = parts[1]
return metadata_dict
def package_name(self) -> str:
metadata = self.get_metadata()
if 'Name' not in metadata:
raise ValueError('Wheel with no name?')
return metadata['Name']
class DockerBuilder:
"""The docker builder requires the existence of a dbt package"""
def __init__(self, dbt_path: Path, version: Version) -> None:
self.dbt_path = dbt_path
self.version = version
@property
def docker_path(self) -> Path:
return self.dbt_path / 'docker'
@property
def dockerfile_name(self) -> str:
return f'Dockerfile.{self.version}'
@property
def dockerfile_path(self) -> Path:
return self.docker_path / self.dockerfile_name
@property
def requirements_path(self) -> Path:
return self.docker_path / 'requirements'
@property
def requirements_file_name(self) -> str:
return f'requirements.{self.version}.txt'
@property
def dockerfile_venv_path(self) -> Path:
return self.dbt_path / 'build' / 'docker-venv'
@property
def requirements_txt_path(self) -> Path:
return self.requirements_path / self.requirements_file_name
def make_venv(self) -> DistFolderEnv:
env = DistFolderEnv(self.dbt_path)
env.create(self.dockerfile_venv_path)
return env
def get_frozen(self) -> str:
env = self.make_venv()
pip_path = self.dockerfile_venv_path / 'bin/pip'
cmd = [pip_path, 'freeze']
wheel_names = {
WheelInfo(wheel_path).package_name() for wheel_path in env.wheels
}
# remove the dependencies in dbt itself
return '\n'.join([
dep for dep in collect_output(cmd).split('\n')
if dep.split('==')[0] not in wheel_names
])
def write_lockfile(self):
freeze = self.get_frozen()
path = self.requirements_txt_path
if path.exists():
raise ValueError(f'Found existing requirements file at {path}!')
os.makedirs(path.parent, exist_ok=True)
path.write_text(freeze)
def get_dockerfile_contents(self):
dist_path = (self.dbt_path / 'dist').relative_to(Path.cwd())
wheel_paths = ' '.join(
os.path.join('.', 'dist', p.name)
for p in _require_wheels(self.dbt_path)
)
requirements_path = self.requirements_txt_path.relative_to(Path.cwd())
return textwrap.dedent(
f'''\
FROM python:3.8.1-slim-buster
RUN apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y --no-install-recommends \
git software-properties-common make build-essential \
ca-certificates libpq-dev && \
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY {requirements_path} ./{self.requirements_file_name}
COPY {dist_path} ./dist
RUN pip install --upgrade pip setuptools
RUN pip install --requirement ./{self.requirements_file_name}
RUN pip install {wheel_paths}
RUN useradd -mU dbt_user
ENV PYTHONIOENCODING=utf-8
ENV LANG C.UTF-8
WORKDIR /usr/app
VOLUME /usr/app
USER dbt_user
ENTRYPOINT dbt
'''
)
def write_dockerfile(self):
dockerfile = self.get_dockerfile_contents()
path = self.dockerfile_path
if path.exists():
raise ValueError(f'Found existing docker file at {path}!')
os.makedirs(path.parent, exist_ok=True)
path.write_text(dockerfile)
@property
def image_tag(self):
return f'dbt:{self.version}'
@property
def remote_tag(self):
return f'fishtownanalytics/{self.image_tag}'
def create_docker_image(self):
run_command(
[
'docker', 'build',
'-f', self.dockerfile_path,
'--tag', self.image_tag,
# '--no-cache',
self.dbt_path,
],
cwd=self.dbt_path
)
def set_remote_tag(self):
# tag it
run_command(
['docker', 'tag', self.image_tag, self.remote_tag],
cwd=self.dbt_path,
)
def commit_docker_folder(self):
# commit the contents of docker/
run_command(
['git', 'add', 'docker'],
cwd=self.dbt_path
)
commit_msg = f'Add {self.image_tag} dockerfiles and requirements'
run_command(['git', 'commit', '-m', commit_msg], cwd=self.dbt_path)
def build(
self,
write_requirements: bool = True,
write_dockerfile: bool = True
):
if write_requirements:
self.write_lockfile()
if write_dockerfile:
self.write_dockerfile()
self.commit_docker_folder()
self.create_docker_image()
self.set_remote_tag()
def push(self):
run_command(
['docker', 'push', self.remote_tag]
)
def sanity_check():
if sys.version_info[:len(HOMEBREW_PYTHON)] != HOMEBREW_PYTHON:
python_version_str = '.'.join(str(i) for i in HOMEBREW_PYTHON)
print(f'This script must be run with python {python_version_str}')
sys.exit(1)
# avoid "what's a bdist_wheel" errors
try:
import wheel # type: ignore # noqa
except ImportError:
print(
'The wheel package is required to build. Please run:\n'
'pip install -r dev_requirements.txt'
)
sys.exit(1)
def upgrade_to(args: Arguments):
if args.set_version:
set_version(args.path, args.version, args.part)
builder = PypiBuilder(args.path)
if args.build_pypi:
builder.build()
if args.upload_pypi:
if args.test_upload:
builder.upload()
input(
f'Ensure https://test.pypi.org/project/dbt/{args.version}/ '
'exists and looks reasonable'
)
builder.upload(test=False)
if args.build_homebrew:
if args.upload_pypi:
print('waiting a minute for pypi before trying to pip install')
# if we uploaded to pypi, wait a minute before we bother trying to
# pip install
time.sleep(60)
HomebrewBuilder(
dbt_path=args.path,
version=args.version,
homebrew_path=args.homebrew_path,
set_default=args.homebrew_set_default,
).build()
if args.build_docker:
builder = DockerBuilder(
dbt_path=args.path,
version=args.version,
)
builder.build(
write_requirements=args.write_requirements,
write_dockerfile=args.write_dockerfile,
)
if args.upload_docker:
builder.push()
def main():
sanity_check()
args = Arguments.parse()
upgrade_to(args)
if __name__ == '__main__':
main()
|
fishtown-analytics/dbt
|
scripts/build-dbt.py
|
Python
|
apache-2.0
| 29,183 | 0.000069 |
#!/usr/bin/python
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
s.decode('ascii')
return True
except UnicodeEncodeError:
return False
def load_constants(filename):
"""Read in constants file, which must be output in every language."""
constant_defs = read_json_file(filename);
constants_text = '\n'
for key in constant_defs:
value = constant_defs[key]
value = value.replace('"', '\\"')
constants_text += u'\nBlockly.Msg["{0}"] = \"{1}\";'.format(
key, value)
return constants_text
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--source_constants_file',
default=os.path.join('json', 'constants.json'),
help='Path to .json file with constant definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = source_defs.keys()
sorted_keys.sort()
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
synonym_text = '\n'.join([u'Blockly.Msg["{0}"] = Blockly.Msg["{1}"];'
.format(key, synonym_defs[key]) for key in synonym_defs])
# Read in constants file, which must be output in every language.
constants_text = load_constants(os.path.join(os.curdir, args.source_constants_file))
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms', 'constants'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
goog.provide('Blockly.Msg.{0}');
goog.require('Blockly.Msg');
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg["{0}"] = "{1}";{2}\n'
.format(key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
outfile.write(constants_text)
if not args.quiet:
print('Created {0}.'.format(outname))
if __name__ == '__main__':
main()
|
NTUTVisualScript/Visual_Script
|
static/javascript/blockly/i18n/create_messages.py
|
Python
|
mit
| 6,374 | 0.010041 |
# -*- coding: utf-8 -*-
"""
pythoner.net
Copyright (C) 2013 PYTHONER.ORG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.contrib import admin
from models import *
class ProfileAdmin(admin.ModelAdmin):
list_display = ('screen_name','city','introduction')
admin.site.register(UserProfile,ProfileAdmin)
|
yohn89/pythoner.net
|
pythoner/accounts/admin.py
|
Python
|
gpl-3.0
| 915 | 0.006557 |
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.keypair
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <mgrima@netflix.com>
"""
import json
from security_monkey.decorators import record_exception
from security_monkey.decorators import iter_account_region
from security_monkey.watcher import Watcher, ChangeItem
from security_monkey.datastore import Account
from security_monkey import app, ARN_PREFIX
class ElasticSearchService(Watcher):
index = 'elasticsearchservice'
i_am_singular = 'ElasticSearch Service Access Policy'
i_am_plural = 'ElasticSearch Service Access Policies'
def __init__(self, accounts=None, debug=False):
super(ElasticSearchService, self).__init__(accounts=accounts, debug=debug)
def slurp(self):
"""
:returns: item_list - list of ElasticSearchService Items
:return: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
@iter_account_region(index=self.index, accounts=self.accounts, service_name='es')
def slurp_items(**kwargs):
item_list = []
exception_map = {}
kwargs['exception_map'] = exception_map
account_db = Account.query.filter(Account.name == kwargs['account_name']).first()
account_num = account_db.identifier
es_info = self.get_all_es_domains_in_region(**kwargs)
if es_info is None:
return item_list, exception_map
(client, domains) = es_info
app.logger.debug("Found {} {}".format(len(domains), ElasticSearchService.i_am_plural))
for domain in domains:
if self.check_ignore_list(domain["DomainName"]):
continue
# Fetch the policy:
item = self.build_item(domain["DomainName"], client, account_num, **kwargs)
if item:
item_list.append(item)
return item_list, exception_map
return slurp_items()
@record_exception(source='{index}-watcher'.format(index=index), pop_exception_fields=False)
def get_all_es_domains_in_region(self, **kwargs):
from security_monkey.common.sts_connect import connect
client = connect(kwargs['account_name'], "boto3.es.client", region=kwargs['region'])
app.logger.debug("Checking {}/{}/{}".format(ElasticSearchService.index, kwargs['account_name'], kwargs['region']))
# No need to paginate according to: client.can_paginate("list_domain_names")
domains = self.wrap_aws_rate_limited_call(client.list_domain_names)["DomainNames"]
return client, domains
@record_exception(source='{index}-watcher'.format(index=index), pop_exception_fields=False)
def build_item(self, domain, client, account_num, **kwargs):
arn = ARN_PREFIX + ':es:{region}:{account_number}:domain/{domain_name}'.format(
region=kwargs['region'],
account_number=account_num,
domain_name=domain)
config = {
'arn': arn
}
domain_config = self.wrap_aws_rate_limited_call(client.describe_elasticsearch_domain_config,
DomainName=domain)
# Does the cluster have a policy?
if domain_config["DomainConfig"]["AccessPolicies"]["Options"] == "":
config['policy'] = {}
else:
config['policy'] = json.loads(domain_config["DomainConfig"]["AccessPolicies"]["Options"])
config['name'] = domain
return ElasticSearchServiceItem(region=kwargs['region'], account=kwargs['account_name'], name=domain, arn=arn, config=config)
class ElasticSearchServiceItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, arn=None, config={}):
super(ElasticSearchServiceItem, self).__init__(
index=ElasticSearchService.index,
region=region,
account=account,
name=name,
arn=arn,
new_config=config)
|
stackArmor/security_monkey
|
security_monkey/watchers/elasticsearch_service.py
|
Python
|
apache-2.0
| 4,759 | 0.003572 |
outdata1 = divmod(20,8)
# prefix an argument with a star when calling a function to unpack tuple
t = (20,8)
outdata2 = divmod(*t)
import os
# Note that filename = hh.grad
_, filename = os.path.split('/nfs/j3/hh.grad')
# Using * to grab excess items
# Can be used in python3, but not in python2
# a, b, *rest = range(5)
# a, b, *rest = range(3)
# a, b, *rest = range(2)
# a, *body, c, d = range(5)
# *head, b, c, d = range(5)
# Nested tuple unpacking
a = [('good', (334,213)),
('bad', (231,234))]
for cond, (x, y) in a:
print('x = {0}, y = {1}'.format(x, y))
# Namedtuple
from collections import namedtuple
place = namedtuple('place', 'condition coordinate')
tokyo = place('good', (334,213))
print(tokyo)
# _fields class attribute, _make(iterable) class method, _asdict() instance method
print(place._fields)
LatLong = namedtuple('LatLong', 'lat long')
delhi_data = ('Delhi NCR', LatLong(28.61, 77.21))
delhi = place._make(delhi_data)
for key, value in delhi._asdict().items():
print(key + ':', value)
|
helloTC/LearnPython
|
fluent_python/array_of_sequences/tuple_as_record.py
|
Python
|
mit
| 1,025 | 0.00878 |
import datetime
import decimal
from time import time
from django.utils.hashcompat import md5_constructor
from django.utils.log import getLogger
logger = getLogger('django.db.backends')
class CursorDebugWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db # Instance of a BaseDatabaseWrapper subclass
def execute(self, sql, params=()):
start = time()
try:
return self.cursor.execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration':duration, 'sql':sql, 'params':params}
)
def executemany(self, sql, param_list):
start = time()
try:
return self.cursor.executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
self.db.queries.append({
'sql': '%s times: %s' % (len(param_list), sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration':duration, 'sql':sql, 'params':param_list}
)
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return s and datetime.date(*map(int, s.split('-'))) or None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s: return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.'+microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s: return None
if not ' ' in s: return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds), int((microseconds + '000000')[:6]))
def typecast_boolean(s):
if s is None: return None
if not s: return False
return str(s)[0].lower() == 't'
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_boolean(obj, d):
return obj and '1' or '0'
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hash = md5_constructor(name).hexdigest()[:hash_len]
return '%s%s' % (name[:length-hash_len], hash)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return u'%s' % str(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return u"%.*f" % (decimal_places, value)
|
rimbalinux/MSISDNArea
|
django/db/backends/util.py
|
Python
|
bsd-3-clause
| 4,684 | 0.007472 |
import xml.etree.cElementTree as et
from collections import OrderedDict
from tabletopscanner.boardgamegeekapi.parsers import Deserializer
class SearchParser(Deserializer):
def deserialize(self, xml):
tree = et.fromstring(xml)
return [SearchParser.__make_search_result(el) for el in tree.findall('item')]
@staticmethod
def __make_search_result(el):
geekid = geekid = el.attrib['id']
name = el.find('name').attrib['value']
yearpublished = el.find('yearpublished').attrib['value']
return OrderedDict({
'geekid': geekid,
'name': name,
'yearpublished': yearpublished
})
|
ramseyboy/tabletop-scanner
|
tabletopscanner/boardgamegeekapi/search.py
|
Python
|
apache-2.0
| 674 | 0.001484 |
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from nose.tools import assert_true, assert_in, assert_false # pylint: disable=E0611
from auth.authz import get_user_by_email, get_course_groupname_for_role
from django.conf import settings
from selenium.webdriver.common.keys import Keys
import time
import os
from django.contrib.auth.models import Group
from logging import getLogger
logger = getLogger(__name__)
from terrain.browser import reset_data
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001
# in your settings.py file.
world.visit('/')
signin_css = 'a.action-signin'
assert world.is_css_present(signin_css)
@step('I am logged into Studio$')
def i_am_logged_into_studio(_step):
log_into_studio()
@step('I confirm the alert$')
def i_confirm_with_ok(_step):
world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(_step, category):
if category == 'section':
css = 'a.delete-button.delete-section-button span.delete-icon'
elif category == 'subsection':
css = 'a.delete-button.delete-subsection-button span.delete-icon'
else:
assert False, 'Invalid category: %s' % category
world.css_click(css)
@step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(_step):
open_new_course()
@step('(I select|s?he selects) the new course')
def select_new_course(_step, whom):
course_link_css = 'a.course-link'
world.css_click(course_link_css)
@step(u'I press the "([^"]*)" notification button$')
def press_the_notification_button(_step, name):
# Because the notification uses a CSS transition,
# Selenium will always report it as being visible.
# This makes it very difficult to successfully click
# the "Save" button at the UI level.
# Instead, we use JavaScript to reliably click
# the button.
btn_css = 'div#page-notification a.action-%s' % name.lower()
world.trigger_event(btn_css, event='focus')
world.browser.execute_script("$('{}').click()".format(btn_css))
world.wait_for_ajax_complete()
@step('I change the "(.*)" field to "(.*)"$')
def i_change_field_to_value(_step, field, value):
field_css = '#%s' % '-'.join([s.lower() for s in field.split()])
ele = world.css_find(field_css).first
ele.fill(value)
ele._element.send_keys(Keys.ENTER)
@step('I reset the database')
def reset_the_db(_step):
"""
When running Lettuce tests using examples (i.e. "Confirmation is
shown on save" in course-settings.feature), the normal hooks
aren't called between examples. reset_data should run before each
scenario to flush the test database. When this doesn't happen we
get errors due to trying to insert a non-unique entry. So instead,
we delete the database manually. This has the effect of removing
any users and courses that have been created during the test run.
"""
reset_data(None)
@step('I see a confirmation that my changes have been saved')
def i_see_a_confirmation(step):
confirmation_css = '#alert-confirmation'
assert world.is_css_present(confirmation_css)
def open_new_course():
world.clear_courses()
create_studio_user()
log_into_studio()
create_a_course()
def create_studio_user(
uname='robot',
email='robot+studio@edx.org',
password='test',
is_staff=False):
studio_user = world.UserFactory(
username=uname,
email=email,
password=password,
is_staff=is_staff)
registration = world.RegistrationFactory(user=studio_user)
registration.register(studio_user)
registration.activate()
return studio_user
def fill_in_course_info(
name='Robot Super Course',
org='MITx',
num='101',
run='2013_Spring'):
world.css_fill('.new-course-name', name)
world.css_fill('.new-course-org', org)
world.css_fill('.new-course-number', num)
world.css_fill('.new-course-run', run)
def log_into_studio(
uname='robot',
email='robot+studio@edx.org',
password='test',
name='Robot Studio'):
world.log_in(username=uname, password=password, email=email, name=name)
# Navigate to the studio dashboard
world.visit('/')
assert_in(uname, world.css_text('h2.title', timeout=10))
def add_course_author(user, course):
"""
Add the user to the instructor group of the course
so they will have the permissions to see it in studio
"""
for role in ("staff", "instructor"):
groupname = get_course_groupname_for_role(course.location, role)
group, __ = Group.objects.get_or_create(name=groupname)
user.groups.add(group)
user.save()
def create_a_course():
course = world.CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
world.scenario_dict['COURSE'] = course
user = world.scenario_dict.get("USER")
if not user:
user = get_user_by_email('robot+studio@edx.org')
add_course_author(user, course)
# Navigate to the studio dashboard
world.visit('/')
course_link_css = 'a.course-link'
world.css_click(course_link_css)
course_title_css = 'span.course-title'
assert_true(world.is_css_present(course_title_css))
def add_section(name='My Section'):
link_css = 'a.new-courseware-section-button'
world.css_click(link_css)
name_css = 'input.new-section-name'
save_css = 'input.new-section-name-save'
world.css_fill(name_css, name)
world.css_click(save_css)
span_css = 'span.section-name-span'
assert_true(world.is_css_present(span_css))
def add_subsection(name='Subsection One'):
css = 'a.new-subsection-item'
world.css_click(css)
name_css = 'input.new-subsection-name-input'
save_css = 'input.new-subsection-name-save'
world.css_fill(name_css, name)
world.css_click(save_css)
def set_date_and_time(date_css, desired_date, time_css, desired_time):
world.css_fill(date_css, desired_date)
# hit TAB to get to the time field
e = world.css_find(date_css).first
# pylint: disable=W0212
e._element.send_keys(Keys.TAB)
world.css_fill(time_css, desired_time)
e = world.css_find(time_css).first
e._element.send_keys(Keys.TAB)
time.sleep(float(1))
@step('I have enabled the (.*) advanced module$')
def i_enabled_the_advanced_module(step, module):
step.given('I have opened a new course section in Studio')
world.css_click('.nav-course-settings')
world.css_click('.nav-course-settings-advanced a')
type_in_codemirror(0, '["%s"]' % module)
press_the_notification_button(step, 'Save')
@world.absorb
def create_course_with_unit():
"""
Prepare for tests by creating a course with a section, subsection, and unit.
Performs the following:
Clear out all courseware
Create a course with a section, subsection, and unit
Create a user and make that user a course author
Log the user into studio
Open the course from the dashboard
Expand the section and click on the New Unit link
The end result is the page where the user is editing the new unit
"""
world.clear_courses()
course = world.CourseFactory.create()
world.scenario_dict['COURSE'] = course
section = world.ItemFactory.create(parent_location=course.location)
world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',
)
user = create_studio_user(is_staff=False)
add_course_author(user, course)
log_into_studio()
world.css_click('a.course-link')
world.wait_for_js_to_load()
css_selectors = [
'div.section-item a.expand-collapse-icon', 'a.new-unit-item'
]
for selector in css_selectors:
world.css_click(selector)
world.wait_for_mathjax()
world.wait_for_xmodule()
assert world.is_css_present('ul.new-component-type')
@step('I have clicked the new unit button$')
@step(u'I am in Studio editing a new unit$')
def edit_new_unit(step):
create_course_with_unit()
@step('the save notification button is disabled')
def save_button_disabled(step):
button_css = '.action-save'
disabled = 'is-disabled'
assert world.css_has_class(button_css, disabled)
@step('the "([^"]*)" button is disabled')
def button_disabled(step, value):
button_css = 'input[value="%s"]' % value
assert world.css_has_class(button_css, 'is-disabled')
def _do_studio_prompt_action(intent, action):
"""
Wait for a studio prompt to appear and press the specified action button
See cms/static/js/views/feedback_prompt.js for implementation
"""
assert intent in ['warning', 'error', 'confirmation', 'announcement',
'step-required', 'help', 'mini']
assert action in ['primary', 'secondary']
world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))
action_css = 'li.nav-item > a.action-{}'.format(action)
world.trigger_event(action_css, event='focus')
world.browser.execute_script("$('{}').click()".format(action_css))
world.wait_for_ajax_complete()
world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))
@world.absorb
def confirm_studio_prompt():
_do_studio_prompt_action('warning', 'primary')
@step('I confirm the prompt')
def confirm_the_prompt(step):
confirm_studio_prompt()
@step(u'I am shown a prompt$')
def i_am_shown_a_notification(step):
assert world.is_css_present('.wrapper-prompt')
def type_in_codemirror(index, text):
world.wait(1) # For now, slow this down so that it works. TODO: fix it.
world.css_click("div.CodeMirror-lines", index=index)
world.browser.execute_script("$('div.CodeMirror.CodeMirror-focused > div').css('overflow', '')")
g = world.css_find("div.CodeMirror.CodeMirror-focused > div > textarea")
if world.is_mac():
g._element.send_keys(Keys.COMMAND + 'a')
else:
g._element.send_keys(Keys.CONTROL + 'a')
g._element.send_keys(Keys.DELETE)
g._element.send_keys(text)
if world.is_firefox():
world.trigger_event('div.CodeMirror', index=index, event='blur')
world.wait_for_ajax_complete()
def upload_file(filename):
path = os.path.join(TEST_ROOT, filename)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
world.browser.attach_file('file', os.path.abspath(path))
button_css = '.upload-dialog .action-upload'
world.css_click(button_css)
@step(u'"([^"]*)" logs in$')
def other_user_login(step, name):
step.given('I log out')
world.visit('/')
signin_css = 'a.action-signin'
world.is_css_present(signin_css)
world.css_click(signin_css)
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill(name + '@edx.org')
login_form.find_by_name('password').fill("test")
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
assert_true(world.is_css_present('.new-course-button'))
world.scenario_dict['USER'] = get_user_by_email(name + '@edx.org')
@step(u'the user "([^"]*)" exists( as a course (admin|staff member|is_staff))?$')
def create_other_user(_step, name, has_extra_perms, role_name):
email = name + '@edx.org'
user = create_studio_user(uname=name, password="test", email=email)
if has_extra_perms:
if role_name == "is_staff":
user.is_staff = True
else:
if role_name == "admin":
# admins get staff privileges, as well
roles = ("staff", "instructor")
else:
roles = ("staff",)
location = world.scenario_dict["COURSE"].location
for role in roles:
groupname = get_course_groupname_for_role(location, role)
group, __ = Group.objects.get_or_create(name=groupname)
user.groups.add(group)
user.save()
@step('I log out')
def log_out(_step):
world.visit('logout')
|
abo-abo/edx-platform
|
cms/djangoapps/contentstore/features/common.py
|
Python
|
agpl-3.0
| 12,319 | 0.000812 |
import unittest
import os
import json
import time
from os import environ
from ConfigParser import ConfigParser
from pprint import pprint
from biokbase.workspace.client import Workspace as workspaceService
from MyContigFilter.MyContigFilterImpl import MyContigFilter
class MyContigFilterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
cls.ctx = {'token': token, 'provenance': [{'service': 'MyContigFilter',
'method': 'please_never_use_it_in_production', 'method_params': []}],
'authenticated': 1}
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('MyContigFilter'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = MyContigFilter(cls.cfg)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_MyContigFilter_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName})
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
def test_filter_contigs_ok(self):
obj_name = "contigset.1"
contig1 = {'id': '1', 'length': 10, 'md5': 'md5', 'sequence': 'agcttttcat'}
contig2 = {'id': '2', 'length': 5, 'md5': 'md5', 'sequence': 'agctt'}
contig3 = {'id': '3', 'length': 12, 'md5': 'md5', 'sequence': 'agcttttcatgg'}
obj1 = {'contigs': [contig1, contig2, contig3], 'id': 'id', 'md5': 'md5', 'name': 'name',
'source': 'source', 'source_id': 'source_id', 'type': 'type'}
self.getWsClient().save_objects({'workspace': self.getWsName(), 'objects':
[{'type': 'KBaseGenomes.ContigSet', 'name': obj_name, 'data': obj1}]})
ret = self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': obj_name, 'min_length': '10', 'output_name': 'my_output'})
obj2 = self.getWsClient().get_objects([{'ref': self.getWsName()+'/'+'my_output'}])[0]['data']
self.assertEqual(len(obj2['contigs']), 2)
self.assertTrue(len(obj2['contigs'][0]['sequence']) >= 10)
self.assertTrue(len(obj2['contigs'][1]['sequence']) >= 10)
self.assertEqual(ret[0]['n_initial_contigs'], 3)
self.assertEqual(ret[0]['n_contigs_removed'], 1)
self.assertEqual(ret[0]['n_contigs_remaining'], 2)
def test_filter_contigs_err1(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': 10, 'output_name': 'fake'})
self.assertTrue('Error loading original ContigSet object' in str(context.exception))
def test_filter_contigs_err2(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': '-10', 'output_name': 'fake'})
self.assertTrue('min_length parameter shouldn\'t be negative' in str(context.exception))
def test_filter_contigs_err3(self):
with self.assertRaises(ValueError) as context:
self.getImpl().filter_contigs(self.getContext(), {'workspace': self.getWsName(),
'contigset_id': 'fake', 'min_length': 'ten', 'output_name': 'fake'})
self.assertTrue('Cannot parse integer from min_length parameter' in str(context.exception))
|
briehl/wjr_sdk_test
|
test/MyContigFilter_server_test.py
|
Python
|
mit
| 4,171 | 0.007672 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Here are all the test parameters and values for the each
`~astropy.modeling.FittableModel` defined. There is a dictionary for 1D and a
dictionary for 2D models.
Explanation of keywords of the dictionaries:
"parameters" : list or dict
Model parameters, the model is tested with. Make sure you keep the right
order. For polynomials you can also use a dict to specify the
coefficients. See examples below.
"x_values" : list
x values where the model is evaluated.
"y_values" : list
Reference y values for the in x_values given positions.
"z_values" : list
Reference z values for the in x_values and y_values given positions.
(2D model option)
"x_lim" : list
x test range for the model fitter. Depending on the model this can differ
e.g. the PowerLaw model should be tested over a few magnitudes.
"y_lim" : list
y test range for the model fitter. Depending on the model this can differ
e.g. the PowerLaw model should be tested over a few magnitudes. (2D model
option)
"log_fit" : bool
PowerLaw models should be tested over a few magnitudes. So log_fit should
be true.
"requires_scipy" : bool
If a model requires scipy (Bessel functions etc.) set this flag.
"integral" : float
Approximate value of the integral in the range x_lim (and y_lim).
"deriv_parameters" : list
If given the test of the derivative will use these parameters to create a
model (optional)
"deriv_initial" : list
If given the test of the derivative will use these parameters as initial
values for the fit (optional)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..functional_models import (
Gaussian1D, Sine1D, Box1D, Linear1D, Lorentz1D,
MexicanHat1D, Trapezoid1D, Const1D, Moffat1D,
Gaussian2D, Const2D, Box2D, MexicanHat2D,
TrapezoidDisk2D, AiryDisk2D, Moffat2D, Disk2D,
Ring2D)
from ..polynomial import Polynomial1D, Polynomial2D
from ..powerlaws import (
PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D,
LogParabola1D)
import numpy as np
#1D Models
models_1D = {
Gaussian1D: {
'parameters': [1, 0, 1],
'x_values': [0, np.sqrt(2), -np.sqrt(2)],
'y_values': [1.0, 0.367879, 0.367879],
'x_lim': [-10, 10],
'integral': np.sqrt(2 * np.pi)
},
Sine1D: {
'parameters': [1, 0.1],
'x_values': [0, 2.5],
'y_values': [0, 1],
'x_lim': [-10, 10],
'integral': 0
},
Box1D: {
'parameters': [1, 0, 10],
'x_values': [-5, 5, 0, -10, 10],
'y_values': [1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'integral': 10
},
Linear1D: {
'parameters': [1, 0],
'x_values': [0, np.pi, 42, -1],
'y_values': [0, np.pi, 42, -1],
'x_lim': [-10, 10],
'integral': 0
},
Lorentz1D: {
'parameters': [1, 0, 1],
'x_values': [0, -1, 1, 0.5, -0.5],
'y_values': [1., 0.2, 0.2, 0.5, 0.5],
'x_lim': [-10, 10],
'integral': 1
},
MexicanHat1D: {
'parameters': [1, 0, 1],
'x_values': [0, 1, -1, 3, -3],
'y_values': [1.0, 0.0, 0.0, -0.088872, -0.088872],
'x_lim': [-20, 20],
'integral': 0
},
Trapezoid1D: {
'parameters': [1, 0, 2, 1],
'x_values': [0, 1, -1, 1.5, -1.5, 2, 2],
'y_values': [1, 1, 1, 0.5, 0.5, 0, 0],
'x_lim': [-10, 10],
'integral': 3
},
Const1D: {
'parameters': [1],
'x_values': [-1, 1, np.pi, -42., 0],
'y_values': [1, 1, 1, 1, 1],
'x_lim': [-10, 10],
'integral': 20
},
Moffat1D: {
'parameters': [1, 0, 1, 2],
'x_values': [0, 1, -1, 3, -3],
'y_values': [1.0, 0.25, 0.25, 0.01, 0.01],
'x_lim': [-10, 10],
'integral': 1,
'deriv_parameters': [23.4, 1.2, 2.1, 2.3],
'deriv_initial': [10, 1, 1, 1]
},
PowerLaw1D: {
'parameters': [1, 1, 2],
'constraints': {'fixed': {'x_0': True}},
'x_values': [1, 10, 100],
'y_values': [1.0, 0.01, 0.0001],
'x_lim': [1, 10],
'log_fit': True,
'integral': 0.99
},
BrokenPowerLaw1D: {
'parameters': [1, 1, 2, 3],
'constraints': {'fixed': {'x_break': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [1e2, 1.0, 1e-3, 1e-6],
'x_lim': [0.1, 100],
'log_fit': True
},
ExponentialCutoffPowerLaw1D: {
'parameters': [1, 1, 2, 3],
'constraints': {'fixed': {'x_0': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [9.67216100e+01, 7.16531311e-01, 3.56739933e-04,
3.33823780e-19],
'x_lim': [0.01, 100],
'log_fit': True
},
LogParabola1D: {
'parameters': [1, 2, 3, 0.1],
'constraints': {'fixed': {'x_0': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [3.26089063e+03, 7.62472488e+00, 6.17440488e-03,
1.73160572e-06],
'x_lim': [0.1, 100],
'log_fit': True
},
Polynomial1D: {
'parameters': {'degree': 2, 'c0': 1., 'c1': 1., 'c2': 1.},
'x_values': [1, 10, 100],
'y_values': [3, 111, 10101],
'x_lim': [-3, 3]
}
}
#2D Models
models_2D = {
Gaussian2D: {
'parameters': [1, 0, 0, 1, 1],
'constraints': {'fixed': {'theta': True}},
'x_values': [0, np.sqrt(2), -np.sqrt(2)],
'y_values': [0, np.sqrt(2), -np.sqrt(2)],
'z_values': [1, 1. / np.exp(1) ** 2, 1. / np.exp(1) ** 2],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 2 * np.pi,
'deriv_parameters': [137., 5.1, 5.4, 1.5, 2., np.pi/4],
'deriv_initial': [10, 5, 5, 4, 4, .5]
},
Const2D: {
'parameters': [1],
'x_values': [-1, 1, np.pi, -42., 0],
'y_values': [0, 1, 42, np.pi, -1],
'z_values': [1, 1, 1, 1, 1],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 400
},
Box2D: {
'parameters': [1, 0, 0, 10, 10],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [1, 1, 1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 100
},
MexicanHat2D: {
'parameters': [1, 0, 0, 1],
'x_values': [0, 0, 0, 0, 0, 1, -1, 3, -3],
'y_values': [0, 1, -1, 3, -3, 0, 0, 0, 0],
'z_values': [1.0, 0.303265, 0.303265, -0.038881, -0.038881,
0.303265, 0.303265, -0.038881, -0.038881],
'x_lim': [-10, 11],
'y_lim': [-10, 11],
'integral': 0
},
TrapezoidDisk2D: {
'parameters': [1, 0, 0, 1, 1],
'x_values': [0, 0.5, 0, 1.5],
'y_values': [0, 0.5, 1.5, 0],
'z_values': [1, 1, 0.5, 0.5],
'x_lim': [-3, 3],
'y_lim': [-3, 3]
},
AiryDisk2D: {
'parameters': [7, 0, 0, 10],
'x_values': [0, 1, -1, -0.5, -0.5],
'y_values': [0, -1, 0.5, 0.5, -0.5],
'z_values': [7., 6.50158267, 6.68490643, 6.87251093, 6.87251093],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'requires_scipy': True
},
Moffat2D: {
'parameters': [1, 0, 0, 1, 2],
'x_values': [0, 1, -1, 3, -3],
'y_values': [0, -1, 3, 1, -3],
'z_values': [1.0, 0.111111, 0.008264, 0.008264, 0.00277],
'x_lim': [-3, 3],
'y_lim': [-3, 3]
},
Polynomial2D: {
'parameters': {'degree': 1, 'c0_0': 1., 'c1_0': 1., 'c0_1': 1.},
'x_values': [1, 2, 3],
'y_values': [1, 3, 2],
'z_values': [3, 6, 6],
'x_lim': [1, 100],
'y_lim': [1, 100]
},
Disk2D: {
'parameters': [1, 0, 0, 5],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [0, 0, 1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': np.pi * 5 ** 2
},
Ring2D: {
'parameters': [1, 0, 0, 5, 5],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [1, 1, 1, 1, 0, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': np.pi * (10 ** 2 - 5 ** 2)
}
}
|
piotroxp/scibibscan
|
scib/lib/python3.5/site-packages/astropy/modeling/tests/example_models.py
|
Python
|
mit
| 8,459 | 0.000236 |
DATA_DIR = '/media/d/ssd2/dstl/'
|
danzelmo/dstl-competition
|
global_vars.py
|
Python
|
mit
| 32 | 0.03125 |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.conf import settings
from django.core.files.base import ContentFile
from django.template.loader import get_template
from django.template import TemplateDoesNotExist,Template,Context
from massmedia import settings as appsettings
from cStringIO import StringIO
import mimetypes
import os
import zipfile
from django_extensions.db.fields import AutoSlugField
# Patch mimetypes w/ any extra types
mimetypes.types_map.update(appsettings.EXTRA_MIME_TYPES)
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from iptcinfo import IPTCInfo
iptc = 1
except ImportError:
iptc = 0
# Try to load a user-defined category model
if appsettings.CATEGORIES_MODULE:
CATEGORIES_MODULE = appsettings.CATEGORIES_MODULE
else:
# Otherwise use dummy category
CATEGORIES_MODULE = 'Category'
class Category(models.Model):
name = models.CharField(max_length=150)
def __unicode__(self): return self.name
try:
import Image as PilImage
except ImportError:
try:
from PIL import Image as PilImage
except ImportError:
PilImage = 0
try:
from hachoir_core.error import HachoirError
from hachoir_core.stream import InputStreamError
from hachoir_parser import createParser
from hachoir_metadata import extractMetadata
except ImportError:
extractMetadata = None
class upload_to(object):
"""
This tricky little bugger allows us to use all lowercase urls and stuff.
"""
def __init__(self, format, field='file'):
self.format = format
self.field = field
def __call__(self, instance, filename):
get_filename = instance._meta.get_field(self.field).get_filename
return os.path.join(self.get_directory_name(), get_filename(filename))
def get_directory_name(self):
import datetime
return os.path.normpath(datetime.datetime.now().strftime(self.format)).lower()
def parse_metadata(path):
try:
parser = createParser(unicode(path))
except InputStreamError:
return
if not parser:
return
try:
metadata = extractMetadata(parser, appsettings.INFO_QUALITY)
except HachoirError:
return
if not metadata:
return
data = {}
text = metadata.exportPlaintext(priority=None, human=False)
for line in text:
if not line.strip().startswith('-'):
key = line.strip().lower().split(':')[0]
value = []
else:
key = line.strip().split('- ')[1].split(': ')[0]
value = line.split(key)[1][2:]
if key in data:
if hasattr(data[key],'__iter__'):
value = data[key] + [value]
else:
value = [data[key],value]
if value:
data[key] = value
return data
class PickledObjectField(models.Field):
""" Django snippet - http://www.djangosnippets.org/snippets/513/ """
__metaclass__ = models.SubfieldBase
def to_python(self, value):
try:
return pickle.loads(str(value))
except:
# If an error was raised, just return the plain value
return value
def get_db_prep_save(self, value):
if value is not None:
value = pickle.dumps(value)
return str(value)
def get_internal_type(self):
return 'TextField'
def get_db_prep_lookup(self, lookup_type, value):
if lookup_type == 'exact':
value = self.get_db_prep_save(value)
return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value)
elif lookup_type == 'in':
value = [self.get_db_prep_save(v) for v in value]
return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value)
else:
raise TypeError('Lookup type %s is not supported.' % lookup_type)
class Media(models.Model):
title = models.CharField(max_length=255)
slug = AutoSlugField(max_length=50, overwrite=True, populate_from=("title",))
creation_date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, blank=True, null=True, limit_choices_to={'is_staff':True})
one_off_author = models.CharField('one-off author', max_length=100, blank=True)
credit = models.CharField(max_length=150, blank=True)
caption = models.TextField(blank=True)
metadata = PickledObjectField(blank=True)
sites = models.ManyToManyField(Site,related_name='%(class)s_sites')
categories = models.ManyToManyField(CATEGORIES_MODULE, blank=True)
reproduction_allowed = models.BooleanField("we have reproduction rights for this media", default=True)
public = models.BooleanField(help_text="this media is publicly available", default=True)
external_url = models.URLField(blank=True,null=True,help_text="If this URLField is set, the media will be pulled externally")
mime_type = models.CharField(max_length=150,blank=True,null=True)
width = models.IntegerField(blank=True, null=True)
height = models.IntegerField(blank=True, null=True)
widget_template = models.CharField(max_length=255,blank=True,null=True,
help_text='The template name used to generate the widget (defaults to mime_type layout)')
class Meta:
ordering = ('-creation_date',)
abstract = True
unique_together = (('slug', 'creation_date'),)
def __unicode__(self):
return self.title
def get_absolute_url(self):
if self.external_url:
return self.external_url
if hasattr(self,'file') and getattr(self,'file',None):
return self.absolute_url((
settings.MEDIA_URL,
'/'.join([self.creation_date.strftime("%Y"), self.creation_date.strftime("%b").lower(), self.creation_date.strftime("%d")]),
os.path.basename(self.file.path)))
return ''
def absolute_url(self, format):
raise NotImplementedError
def save(self, *args, **kwargs):
if self.file and not self.mime_type:
self.mime_type = mimetypes.guess_type(self.file.path)[0]
if not(self.metadata) and self.file and extractMetadata:
self.metadata = parse_metadata(self.file.path) or ''
super(Media, self).save(*args, **kwargs)
def get_mime_type(self):
if self.mime_type:
return self.mime_type
if self.metadata and 'mime_type' in self.metadata:
return self.metadata['mime_type']
return
def get_template(self):
mime_type = self.get_mime_type()
if self.widget_template:
if appsettings.TEMPLATE_MODE == appsettings.FILE_SYSTEM:
return get_template(self.widget_template)
else:
return MediaTemplate.objects.get(name=self.widget_template).template()
elif mime_type is None:
if appsettings.TEMPLATE_MODE == appsettings.FILE_SYSTEM:
if appsettings.USE_VOXANT and isinstance(self, VoxantVideo):
return get_template('massmedia/voxant.html')
else:
return get_template('massmedia/generic.html')
else:
return MediaTemplate.objects.get(mimetype='').tempate()
else:
if appsettings.TEMPLATE_MODE == appsettings.FILE_SYSTEM:
try:
return get_template('massmedia/%s.html'%mime_type)
except TemplateDoesNotExist:
try:
return get_template('massmedia/%s/generic.html'%mime_type.split('/')[0])
except TemplateDoesNotExist:
return get_template('massmedia/generic.html')
else:
try:
return MediaTemplate.objects.get(mimetype=mime_type)
except MediaTemplate.DoesNotExist:
try:
return MediaTemplate.objects.get(mimetype=mime_type.split('/')[0])
except MediaTemplate.DoesNotExist:
return MediaTemplate.objects.get(mimetype='').tempate()
def render_template(self):
return self.get_template().render(Context({
'media':self,
'MEDIA_URL':settings.MEDIA_URL
}))
class Image(Media):
file = models.ImageField(upload_to=upload_to('img/%Y/%b/%d'), blank=True, null=True)
def save(self, *args, **kwargs):
if iptc:
try:
data.update(IPTCInfo(path).__dict__['_data'])
except:
pass
super(Image, self).save(*args, **kwargs)
def thumb(self):
if self.file:
thumbnail = '%s.thumb%s'%os.path.splitext(self.file.path)
thumburl = thumbnail[len(settings.MEDIA_ROOT)-1:]
if not os.path.exists(thumbnail):
im = PilImage.open(self.file)
im.thumbnail(appsettings.THUMB_SIZE,PilImage.ANTIALIAS)
try:
im.save(thumbnail,im.format)
except KeyError:
pass
return '<a href="%s"><img src="%s%s"/></a>'%\
(self.get_absolute_url(),settings.MEDIA_URL,thumburl)
elif self.external_url:
return '<a href="%s"><img src="%s"/></a>'%\
(self.get_absolute_url(),self.get_absolute_url())
thumb.allow_tags = True
thumb.short_description = 'Thumbnail'
def absolute_url(self, format):
return "%simg/%s/%s" % format
class Video(Media):
file = models.FileField(upload_to=upload_to('video/%Y/%b/%d'), blank=True, null=True)
thumbnail = models.ForeignKey(Image, null=True, blank=True)
def thumb(self):
return self.thumbnail.thumb()
thumb.allow_tags = True
thumb.short_description = 'Thumbnail'
def absolute_url(self, format):
return "%svideo/%s/%s" % format
if appsettings.USE_VOXANT:
class VoxantVideo(Video):
asset_id = models.CharField(max_length=255,help_text='Voxant video asset ID (the `a` parameter)')
layout_id = models.CharField(max_length=255,help_text='Voxant video asset ID (the `m` parameter)')
def absolute_url(self, format):
return "%svoxantvideo/%s/%s" % format
class Audio(Media):
file = models.FileField(upload_to=upload_to('audio/%Y/%b/%d'), blank=True, null=True)
class Meta:
verbose_name_plural = 'audio'
def absolute_url(self, format):
return "%saudio/%s/%s" % format
class Flash(Media):
file = models.FileField(upload_to=upload_to('flash/%Y/%b/%d'), blank=True, null=True)
class Meta:
verbose_name_plural = 'flash'
def absolute_url(self, format):
return "%sflash/%s/%s" % format
class Collection(models.Model):
creation_date = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=255, unique=True)
slug = AutoSlugField(max_length=50, overwrite=True, populate_from=("title",))
caption = models.TextField(blank=True)
zip_file = models.FileField('Media files in a .zip', upload_to='tmp', blank=True,null=True,
help_text='Select a .zip file of media to upload into a the Collection.')
public = models.BooleanField(help_text="this collection is publicly available", default=True)
sites = models.ManyToManyField(Site)
categories = models.ManyToManyField(CATEGORIES_MODULE, blank=True)
class Meta:
ordering = ['-creation_date']
get_latest_by = 'creation_date'
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
super(Collection, self).save(*args, **kwargs)
self.process_zipfile()
def process_zipfile(self):
if self.zip_file and os.path.isfile(self.zip_file.path):
zip = zipfile.ZipFile(self.zip_file.path)
if zip.testzip():
raise Exception('"%s" in the .zip archive is corrupt.' % bad_file)
for filename in zip.namelist():
if filename.startswith('__'): # do not process meta files
continue
data = zip.read(filename)
size = len(data)
if size:
title,ext = os.path.splitext(os.path.basename(filename))
ext = ext[1:]
slug = slugify(title)
if ext in appsettings.IMAGE_EXTS:
model = Image
try:
trial_image = PilImage.open(StringIO(data))
trial_image.load()
trial_image = PilImage.open(StringIO(data))
trial_image.verify()
except Exception:
continue
elif ext in appsettings.VIDEO_EXTS:
model = Video
elif ext in appsettings.AUDIO_EXTS:
model = Audio
elif ext in appsettings.FLASH_EXTS:
model = Flash
else:
raise TypeError, 'Unknown media extension %s'%ext
try:
media = model.objects.get(slug=slug) #XXX
except model.DoesNotExist:
media = model(title=title, slug=slug)
media.file.save(filename, ContentFile(data))
# XXX: Make site relations possible, send signals
media.sites.add(Site.objects.get_current())
CollectionRelation(content_object=media,collection=self).save()
zip.close()
os.remove(self.zip_file.path)
self.zip_file.delete()
super(Collection, self).save(*(), **{})
collection_limits = {'model__in':('image','audio','video','flash')}
class CollectionRelation(models.Model):
collection = models.ForeignKey(Collection)
content_type = models.ForeignKey(ContentType, limit_choices_to=collection_limits)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
def __unicode__(self):
return unicode(self.content_object)
class MediaTemplate(models.Model):
name = models.CharField(max_length=255)
mimetype = models.CharField(max_length=255,null=True,blank=True)
content = models.TextField()
def __unicode__(self):
return self.name
def template(self):
return Template(self.content)
|
uclastudentmedia/django-massmedia
|
massmedia/models.py
|
Python
|
apache-2.0
| 14,915 | 0.007643 |
#!/usr/bin/env python
'''
2D Group Members:
> Charlotte Phang
> Lau Wenkie
> Mok Jun Neng
> Martin Tan
> Dicson Candra
'''
#Import relevant modules
import RPi.GPIO as GPIO
import os
import glob
import time
from PIDsm import PID_ControllerSM
### PIN NUMBERS ###
tempPin = 4
motorPin = 12
fanPin = 13
### PARAMETERS ###
pwmFreq = 100
#Code to read temperature from the ####################### sensor
class tempSensor:
#Location of file to read from for temperature: /sys/bus/w1/devices/28-000008ae29b8/w1_slave
#to manually read, "cat /sys/bus/w1/devices/28-000008ae29b8/w1_slave" in terminal
def __init__(self):
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
#define directory of the temperature data in the linux filesystem
self.base_dir = '/sys/bus/w1/devices/'
self.device_folder = glob.glob(self.base_dir + '28*')[0]
self.device_file = self.device_folder + '/w1_slave'
def read_temp_raw(self): #reading raw output of the 1 wire bus
f = open(self.device_file, 'r') #open file defined in self.device_file
lines = f.readlines()
f.close() #close file to reset the file pointer
return lines
def __call__(self): #function to extract temperature data from the raw data in string
lines = self.read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = self.read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
#Set up global variables
GPIO.setmode(GPIO.BCM) #use BCM pin numbering system
GPIO.setup(tempPin, GPIO.IN, GPIO.PUD_UP) #set up the 1 wire interface
GPIO.setup(motorPin, GPIO.OUT) #setup the motor pin
GPIO.setup(fanPin, GPIO.OUT) #setup the fan pin
#define the fan and pump pins as PWM pins and initialise them at 0% PWM (off)
pump = GPIO.PWM(motorPin, pwmFreq)
pump.start(0.0)
fan = GPIO.PWM(fanPin, pwmFreq)
fan.start(0.0)
#create controller object from MotorSM class
targetTemperature = raw_input('Please key in your desired target temperature: ')
motorController = PID_ControllerSM(float(targetTemperature),30,0,10)
motorController.start()
fanController = PID_ControllerSM(float(targetTemperature),50,0,5)
fanController.start()
#create sensor object
temp = tempSensor()
def main(): #main code to loop indefinitely here
#check current temperature
currentTemp = temp()
print 'Current temp: %.3f' %(currentTemp) #for monitoring in the terminal
motorOutput = motorController.step(currentTemp) #get the amount of PWM to output to fan and pump from the state machine
fanOutput = fanController.step(currentTemp)
pump.ChangeDutyCycle(motorOutput) #output the pump PWM. ChangeDutyCycle takes a value from 0 to 100%
fan.ChangeDutyCycle(fanOutput) #output the fan PWM
#####################################################################################
### Run the main code unless user terminates using Ctrl+C. ###
### Before exiting, code will reset and release GPIO control to deactivate motor. ###
#####################################################################################
while True:
try:
main() #execute main()
except KeyboardInterrupt:
print 'Cleaning and Exiting...'
GPIO.cleanup() #clean up the pins and exit the program
print 'Done'
exit()
|
tgymartin/green-fingers-2d
|
DW/part2_and_part3/Cohort_6_Team_6/part3_code/prototype.py
|
Python
|
gpl-3.0
| 3,524 | 0.018729 |
from django.conf import settings as django_settings
# noinspection PyPep8Naming
class LazySettings:
@property
def REQUIRE_MAIN_NAME(self):
return getattr(django_settings, 'REQUIRE_MAIN_NAME', 'main')
@property
def DEFAULT_PAGINATE_BY(self):
return getattr(django_settings, 'DEFAULT_PAGINATE_BY', 30)
@property
def FILTER_SEARCH_INPUT_BY(self):
return getattr(django_settings, 'FILTER_SEARCH_INPUT_BY', 10)
@property
def AUTO_PAGE_SIZE(self):
return getattr(django_settings, 'AUTO_PAGE_SIZE', True)
@property
def AUTO_FORM_HEADLINE(self):
return getattr(django_settings, 'AUTO_FORM_HEADLINE', True)
@property
def CREATE_FORM_HEADLINE_PREFIX(self):
return getattr(django_settings, 'CREATE_FORM_HEADLINE_PREFIX', 'Add')
@property
def UPDATE_FORM_HEADLINE_PREFIX(self):
return getattr(django_settings, 'UPDATE_FORM_HEADLINE_PREFIX', 'Edit')
@property
def FORM_RELATED_OBJECT_IDS(self):
return getattr(django_settings, 'FORM_RELATED_OBJECT_IDS', True)
@property
def GENERIC_FORM_BASE_TEMPLATE(self):
return getattr(django_settings, 'GENERIC_FORM_BASE_TEMPLATE', 'ajaxviews/generic_form.html')
@property
def AUTO_DELETE_URL(self):
return getattr(django_settings, 'AUTO_DELETE_URL', True)
@property
def FORM_DELETE_CONFIRMATION(self):
return getattr(django_settings, 'FORM_DELETE_CONFIRMATION', True)
@property
def AUTO_SUCCESS_URL(self):
return getattr(django_settings, 'AUTO_SUCCESS_URL', True)
settings = LazySettings()
|
Pyco7/django-ajax-views
|
ajaxviews/conf.py
|
Python
|
mit
| 1,622 | 0.001233 |
import re
import time
class BaseCounters:
def __init__(self):
self.keyre = re.compile('\A[\w.]+\Z')
def ping(self, key):
self.validate_key(key)
self.do_ping(key, int(time.time()))
def hit(self, key, n=1):
self.validate_key(key)
self.do_hit(key, n)
def validate_key(self, key):
if re.match(self.keyre, key):
pass
else:
raise ValueError("Counters keys must only contain letters, numbers, the underscore (_) and fullstop (.), received \"%s\"" % key)
|
francois/pycounters
|
counters/base_counters.py
|
Python
|
mit
| 499 | 0.022044 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
from typing import (
Any,
Callable,
Dict,
Generic,
Hashable,
Iterable,
Iterator,
IO,
List,
NoReturn,
Optional,
Sequence,
Tuple,
Union,
TypeVar,
cast,
overload,
TYPE_CHECKING,
)
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import (
AutoBatchedSerializer,
BatchedSerializer,
NoOpSerializer,
CartesianDeserializer,
CloudPickleSerializer,
PairDeserializer,
CPickleSerializer,
Serializer,
pack_long,
read_int,
write_int,
)
from pyspark.join import (
python_join,
python_left_outer_join,
python_right_outer_join,
python_full_outer_join,
python_cogroup,
)
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resource.requests import ExecutorResourceRequests, TaskResourceRequests
from pyspark.resource.profile import ResourceProfile
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import (
Aggregator,
ExternalMerger,
get_used_memory,
ExternalSorter,
ExternalGroupBy,
)
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration, _parse_memory
if TYPE_CHECKING:
import socket
import io
from pyspark._typing import NonUDFType
from pyspark._typing import S, NumberOrArray
from pyspark.context import SparkContext
from pyspark.sql.pandas._typing import (
PandasScalarUDFType,
PandasGroupedMapUDFType,
PandasGroupedAggUDFType,
PandasWindowAggUDFType,
PandasScalarIterUDFType,
PandasMapIterUDFType,
PandasCogroupedMapUDFType,
ArrowMapIterUDFType,
)
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import AtomicType, StructType
from pyspark.sql._typing import AtomicValue, RowLike, SQLBatchedUDFType
from py4j.java_gateway import JavaObject # type: ignore[import]
from py4j.java_collections import JavaArray # type: ignore[import]
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
U = TypeVar("U")
K = TypeVar("K", bound=Hashable)
V = TypeVar("V")
V1 = TypeVar("V1")
V2 = TypeVar("V2")
V3 = TypeVar("V3")
__all__ = ["RDD"]
class PythonEvalType:
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF: "NonUDFType" = 0
SQL_BATCHED_UDF: "SQLBatchedUDFType" = 100
SQL_SCALAR_PANDAS_UDF: "PandasScalarUDFType" = 200
SQL_GROUPED_MAP_PANDAS_UDF: "PandasGroupedMapUDFType" = 201
SQL_GROUPED_AGG_PANDAS_UDF: "PandasGroupedAggUDFType" = 202
SQL_WINDOW_AGG_PANDAS_UDF: "PandasWindowAggUDFType" = 203
SQL_SCALAR_PANDAS_ITER_UDF: "PandasScalarIterUDFType" = 204
SQL_MAP_PANDAS_ITER_UDF: "PandasMapIterUDFType" = 205
SQL_COGROUPED_MAP_PANDAS_UDF: "PandasCogroupedMapUDFType" = 206
SQL_MAP_ARROW_ITER_UDF: "ArrowMapIterUDFType" = 207
def portable_hash(x: Hashable) -> int:
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
Examples
--------
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if "PYTHONHASHSEED" not in os.environ:
raise RuntimeError("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
Examples
--------
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
confidence: float
low: float
high: float
def __new__(cls, mean: float, confidence: float, low: float, high: float) -> "BoundedFloat":
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _create_local_socket(sock_info: "JavaArray") -> "io.BufferedRWPair":
"""
Create a local socket that can be used to load deserialized data from the JVM
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
Returns
-------
sockfile file descriptor of the local socket
"""
sockfile: "io.BufferedRWPair"
sock: "socket.socket"
port: int = sock_info[0]
auth_secret: str = sock_info[1]
sockfile, sock = local_connect_and_auth(port, auth_secret)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
"""
Connect to a local socket described by sock_info and use the given serializer to yield data
Parameters
----------
sock_info : tuple
Tuple containing port number and authentication secret for a local socket.
serializer : :py:class:`Serializer`
The PySpark serializer to use
Returns
-------
result of :py:meth:`Serializer.load_stream`,
usually a generator that yields deserialized data
"""
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info: "JavaArray", serializer: Serializer) -> Iterator[Any]:
class PyLocalIterable:
"""Create a synchronous local iterable over a socket"""
def __init__(self, _sock_info: "JavaArray", _serializer: Serializer):
port: int
auth_secret: str
jsocket_auth_server: "JavaObject"
port, auth_secret, self.jsocket_auth_server = _sock_info
self._sockfile = _create_local_socket((port, auth_secret))
self._serializer = _serializer
self._read_iter: Iterator[Any] = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self) -> Iterator[Any]:
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, join serving thread and raise any exceptions from the JVM
elif self._read_status == -1:
self.jsocket_auth_server.getResult()
def __del__(self) -> None:
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
class Partitioner:
def __init__(self, numPartitions: int, partitionFunc: Callable[[Any], int]):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, Partitioner)
and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc
)
def __call__(self, k: Any) -> int:
return self.partitionFunc(k) % self.numPartitions
class RDD(Generic[T_co]):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(
self,
jrdd: "JavaObject",
ctx: "SparkContext",
jrdd_deserializer: Serializer = AutoBatchedSerializer(CPickleSerializer()),
):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.has_resource_profile = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner: Optional[Partitioner] = None
def _pickled(self: "RDD[T]") -> "RDD[T]":
return self._reserialize(AutoBatchedSerializer(CPickleSerializer()))
def id(self) -> int:
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self) -> str:
return self._jrdd.toString()
def __getnewargs__(self) -> NoReturn:
# This method is called when attempting to pickle an RDD, which is always an error:
raise RuntimeError(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self) -> "SparkContext":
"""
The :class:`SparkContext` that this RDD was created on.
"""
return self.ctx
def cache(self: "RDD[T]") -> "RDD[T]":
"""
Persist this RDD with the default storage level (`MEMORY_ONLY`).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self: "RDD[T]", storageLevel: StorageLevel = StorageLevel.MEMORY_ONLY) -> "RDD[T]":
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_ONLY`).
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self: "RDD[T]", blocking: bool = False) -> "RDD[T]":
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self) -> None:
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with :meth:`SparkContext.setCheckpointDir` and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self) -> bool:
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self) -> None:
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
`spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
The checkpoint directory set through :meth:`SparkContext.setCheckpointDir` is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self) -> bool:
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self) -> Optional[str]:
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
return checkpointFile.get() if checkpointFile.isDefined() else None
def map(self: "RDD[T]", f: Callable[[T], U], preservesPartitioning: bool = False) -> "RDD[U]":
"""
Return a new RDD by applying a function to each element of this RDD.
Examples
--------
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(
self: "RDD[T]", f: Callable[[T], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
Examples
--------
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(
self: "RDD[T]", f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(_: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(
self: "RDD[T]",
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> "RDD[U]":
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
.. deprecated:: 0.9.0
use :py:meth:`RDD.mapPartitionsWithIndex` instead.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn(
"mapPartitionsWithSplit is deprecated; use mapPartitionsWithIndex instead",
FutureWarning,
stacklevel=2,
)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self) -> int:
"""
Returns the number of partitions in RDD
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self: "RDD[T]", f: Callable[[T], bool]) -> "RDD[T]":
"""
Return a new RDD containing only the elements that satisfy a predicate.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator: Iterable[T]) -> Iterable[T]:
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return a new RDD containing the distinct elements in this RDD.
Examples
--------
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return (
self.map(lambda x: (x, None))
.reduceByKey(lambda x, _: x, numPartitions)
.map(lambda x: x[0])
)
def sample(
self: "RDD[T]", withReplacement: bool, fraction: float, seed: Optional[int] = None
) -> "RDD[T]":
"""
Return a sampled subset of this RDD.
Parameters
----------
withReplacement : bool
can elements be sampled multiple times (replaced when sampled out)
fraction : float
expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
seed : int, optional
seed for the random number generator
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
Examples
--------
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(
self: "RDD[T]", weights: Sequence[Union[int, float]], seed: Optional[int] = None
) -> "List[RDD[T]]":
"""
Randomly splits this RDD with the provided weights.
weights : list
weights for splits, will be normalized if they don't sum to 1
seed : int, optional
random seed
Returns
-------
list
split RDDs in a list
Examples
--------
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [
self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])
]
# this is ported from scala/spark/RDD.scala
def takeSample(
self: "RDD[T]", withReplacement: bool, num: int, seed: Optional[int] = None
) -> List[T]:
"""
Return a fixed-size sampled subset of this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError("Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(
sampleSizeLowerBound: int, total: int, withReplacement: bool
) -> float:
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if sampleSizeLowerBound < 12:
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = -log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd: "RDD[Union[T, U]]" = RDD(
self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer
)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer)
if (
self.partitioner == other.partitioner
and self.getNumPartitions() == rdd.getNumPartitions()
):
rdd.partitioner = self.partitioner
return rdd
def intersection(self: "RDD[T]", other: "RDD[T]") -> "RDD[T]":
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Notes
-----
This method performs a shuffle internally.
Examples
--------
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return (
self.map(lambda v: (v, None))
.cogroup(other.map(lambda v: (v, None)))
.filter(lambda k_vs: all(k_vs[1]))
.keys()
)
def _reserialize(self: "RDD[T]", serializer: Optional[Serializer] = None) -> "RDD[T]":
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self: "RDD[T]", other: "RDD[U]") -> "RDD[Union[T, U]]":
"""
Return the union of this RDD and another one.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[S, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[["S"], int] = ...,
ascending: bool = ...,
) -> "RDD[Tuple[S, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int],
ascending: bool,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = ...,
partitionFunc: Callable[[K], int] = ...,
ascending: bool = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def repartitionAndSortWithinPartitions(
self: "RDD[Tuple[Any, Any]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[Any], int] = portable_hash,
ascending: bool = True,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[Any, Any]]":
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
Examples
--------
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
@overload
def sortByKey(
self: "RDD[Tuple[S, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool,
numPartitions: int,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
@overload
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: bool = ...,
numPartitions: Optional[int] = ...,
*,
keyfunc: Callable[[K], "S"],
) -> "RDD[Tuple[K, V]]":
...
def sortByKey(
self: "RDD[Tuple[K, V]]",
ascending: Optional[bool] = True,
numPartitions: Optional[int] = None,
keyfunc: Callable[[Any], Any] = lambda x: x,
) -> "RDD[Tuple[K, V]]":
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, V]]:
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [
samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)
]
def rangePartitioner(k: K) -> int:
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p # type: ignore[operator]
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(
self: "RDD[T]",
keyfunc: Callable[[T], "S"],
ascending: bool = True,
numPartitions: Optional[int] = None,
) -> "RDD[T]":
"""
Sorts this RDD by the given keyfunc
Examples
--------
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return (
self.keyBy(keyfunc) # type: ignore[type-var]
.sortByKey(ascending, numPartitions)
.values()
)
def glom(self: "RDD[T]") -> "RDD[List[T]]":
"""
Return an RDD created by coalescing all elements within each partition
into a list.
Examples
--------
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator: Iterable[T]) -> Iterable[List[T]]:
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements ``(a, b)`` where ``a`` is in `self` and
``b`` is in `other`.
Examples
--------
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(
self: "RDD[T]",
f: Callable[[T], K],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[T]]]":
"""
Return an RDD of grouped items.
Examples
--------
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
def pipe(
self, command: str, env: Optional[Dict[str, str]] = None, checkCode: bool = False
) -> "RDD[str]":
"""
Return an RDD created by piping elements to a forked external process.
Parameters
----------
command : str
command to run.
env : dict, optional
environment variables to set.
checkCode : bool, optional
whether or not to check the return value of the shell command.
Examples
--------
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
if env is None:
env = dict()
def func(iterator: Iterable[T]) -> Iterable[str]:
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out: IO[bytes]) -> None:
for obj in iterator:
s = str(obj).rstrip("\n") + "\n"
out.write(s.encode("utf-8"))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code() -> Iterable[int]:
pipe.wait()
if checkCode and pipe.returncode:
raise RuntimeError(
"Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode)
)
else:
for i in range(0):
yield i
return (
cast(bytes, x).rstrip(b"\n").decode("utf-8")
for x in chain(
iter(cast(IO[bytes], pipe.stdout).readline, b""), check_return_code()
)
)
return self.mapPartitions(func)
def foreach(self: "RDD[T]", f: Callable[[T], None]) -> None:
"""
Applies a function to all elements of this RDD.
Examples
--------
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator: Iterable[T]) -> Iterable[Any]:
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self: "RDD[T]", f: Callable[[Iterable[T]], None]) -> None:
"""
Applies a function to each partition of this RDD.
Examples
--------
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it: Iterable[T]) -> Iterable[Any]:
r = f(it)
try:
return iter(r) # type: ignore[call-overload]
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self: "RDD[T]") -> List[T]:
"""
Return a list that contains all of the elements in this RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def collectWithJobGroup(
self: "RDD[T]", groupId: str, description: str, interruptOnCancel: bool = False
) -> "List[T]":
"""
When collect rdd, use this method to specify job group.
.. versionadded:: 3.0.0
.. deprecated:: 3.1.0
Use :class:`pyspark.InheritableThread` with the pinned thread mode enabled.
"""
warnings.warn(
"Deprecated in 3.1, Use pyspark.InheritableThread with "
"the pinned thread mode enabled.",
FutureWarning,
)
with SCCallSiteSync(self.context):
assert self.ctx._jvm is not None
sock_info = self.ctx._jvm.PythonRDD.collectAndServeWithJobGroup(
self._jrdd.rdd(), groupId, description, interruptOnCancel
)
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self: "RDD[T]", f: Callable[[T, T], T]) -> T:
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self: "RDD[T]", f: Callable[[T, T], T], depth: int = 2) -> T:
"""
Reduces the elements of this RDD in a multi-level tree pattern.
Parameters
----------
f : function
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
# Use the second entry to indicate whether this is a dummy value.
zeroValue: Tuple[T, bool] = ( # type: ignore[assignment]
None,
True,
)
def op(x: Tuple[T, bool], y: Tuple[T, bool]) -> Tuple[T, bool]:
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False # type: ignore[arg-type]
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self: "RDD[T]", zeroValue: T, op: Callable[[T, T], T]) -> T:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
Examples
--------
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator: Iterable[T]) -> Iterable[T]:
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(
self: "RDD[T]", zeroValue: U, seqOp: Callable[[U, T], U], combOp: Callable[[U, U], U]
) -> U:
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions ``op(t1, t2)`` is allowed to modify ``t1`` and return it
as its result value to avoid object allocation; however, it should not
modify ``t2``.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
Examples
--------
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(
self: "RDD[T]",
zeroValue: U,
seqOp: Callable[[U, T], U],
combOp: Callable[[U, U], U],
depth: int = 2,
) -> U:
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
depth : int, optional
suggested depth of the tree (default: 2)
Examples
--------
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator: Iterable[T]) -> Iterable[U]:
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale # type: ignore[assignment]
curNumPartitions = int(numPartitions)
def mapPartition(i: int, iterator: Iterable[U]) -> Iterable[Tuple[int, U]]:
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = (
partiallyAggregated.mapPartitionsWithIndex(mapPartition)
.reduceByKey(combOp, curNumPartitions)
.values()
)
return partiallyAggregated.reduce(combOp)
@overload
def max(self: "RDD[S]") -> "S":
...
@overload
def max(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def max(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the maximum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max) # type: ignore[arg-type]
return self.reduce(lambda a, b: max(a, b, key=key)) # type: ignore[arg-type]
@overload
def min(self: "RDD[S]") -> "S":
...
@overload
def min(self: "RDD[T]", key: Callable[[T], "S"]) -> T:
...
def min(self: "RDD[T]", key: Optional[Callable[[T], "S"]] = None) -> T:
"""
Find the minimum item in this RDD.
Parameters
----------
key : function, optional
A function used to generate key for comparing
Examples
--------
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min) # type: ignore[arg-type]
return self.reduce(lambda a, b: min(a, b, key=key)) # type: ignore[arg-type]
def sum(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Add up the elements in this RDD.
Examples
--------
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold( # type: ignore[return-value]
0, operator.add
)
def count(self) -> int:
"""
Return the number of elements in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self: "RDD[NumberOrArray]") -> StatCounter:
"""
Return a :class:`StatCounter` object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter: StatCounter, right_counter: StatCounter) -> StatCounter:
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce( # type: ignore[arg-type]
redFunc
)
def histogram(
self: "RDD[S]", buckets: Union[int, List["S"], Tuple["S", ...]]
) -> Tuple[Sequence["S"], List[int]]:
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) insertion to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
Examples
--------
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x: Any) -> bool:
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a: Tuple["S", "S"], b: Tuple["S", "S"]) -> Tuple["S", "S"]:
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets # type: ignore[operator]
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv: # type: ignore[operator]
inc = (maxv - minv) * 1.0 / buckets # type: ignore[operator]
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [
buckets[i + 1] - buckets[i] # type: ignore[operator]
for i in range(len(buckets) - 1)
]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1) # type: ignore[operator]
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator: Iterable["S"]) -> Iterable[List[int]]:
counters = [0] * len(buckets) # type: ignore[arg-type]
for i in iterator:
if (
i is None
or (isinstance(i, float) and isnan(i)) # type: ignore[arg-type]
or i > maxv
or i < minv
):
continue
t = (
int((i - minv) / inc) # type: ignore[operator]
if even
else bisect.bisect_right(buckets, i) - 1 # type: ignore[arg-type]
)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a: List[int], b: List[int]) -> List[int]:
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the mean of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean() # type: ignore[return-value]
def variance(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the variance of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance() # type: ignore[return-value]
def stdev(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the standard deviation of this RDD's elements.
Examples
--------
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev() # type: ignore[return-value]
def sampleStdev(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev() # type: ignore[return-value]
def sampleVariance(self: "RDD[NumberOrArray]") -> "NumberOrArray":
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
Examples
--------
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance() # type: ignore[return-value]
def countByValue(self: "RDD[K]") -> Dict[K, int]:
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
Examples
--------
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator: Iterable[K]) -> Iterable[Dict[K, int]]:
counts: Dict[K, int] = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1: Dict[K, int], m2: Dict[K, int]) -> Dict[K, int]:
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
@overload
def top(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def top(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def top(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the top N elements from an RDD.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
It returns the list sorted in descending order.
Examples
--------
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator: Iterable[T]) -> Iterable[List[T]]:
yield heapq.nlargest(num, iterator, key=key)
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
@overload
def takeOrdered(self: "RDD[S]", num: int) -> List["S"]:
...
@overload
def takeOrdered(self: "RDD[T]", num: int, key: Callable[[T], "S"]) -> List[T]:
...
def takeOrdered(self: "RDD[T]", num: int, key: Optional[Callable[[T], "S"]] = None) -> List[T]:
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a: List[T], b: List[T]) -> List[T]:
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self: "RDD[T]", num: int) -> List[T]:
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items: List[T] = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator: Iterable[T]) -> Iterable[T]:
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self: "RDD[T]") -> T:
"""
Return the first element in this RDD.
Examples
--------
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self) -> bool:
"""
Returns true if and only if the RDD contains no elements at all.
Notes
-----
An RDD may be empty even when it has at least 1 partition.
Examples
--------
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True
)
def saveAsNewAPIHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
Hadoop job configuration (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
)
def saveAsHadoopDataset(
self: "RDD[Tuple[K, V]]",
conf: Dict[str, str],
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
"org.apache.spark.api.python.JavaToWritableConverter".
Parameters
----------
conf : dict
Hadoop job configuration
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(
pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False
)
def saveAsHadoopFile(
self: "RDD[Tuple[K, V]]",
path: str,
outputFormatClass: str,
keyClass: Optional[str] = None,
valueClass: Optional[str] = None,
keyConverter: Optional[str] = None,
valueConverter: Optional[str] = None,
conf: Optional[Dict[str, str]] = None,
compressionCodecClass: Optional[str] = None,
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or "org.apache.spark.api.python.JavaToWritableConverter". The
`conf` is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
Parameters
----------
path : str
path to Hadoop file
outputFormatClass : str
fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
keyClass : str, optional
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
keyConverter : str, optional
fully qualified classname of key converter (None by default)
valueConverter : str, optional
fully qualified classname of value converter (None by default)
conf : dict, optional
(None by default)
compressionCodecClass : str
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsHadoopFile(
pickledRDD._jrdd,
True,
path,
outputFormatClass,
keyClass,
valueClass,
keyConverter,
valueConverter,
jconf,
compressionCodecClass,
)
def saveAsSequenceFile(
self: "RDD[Tuple[K, V]]", path: str, compressionCodecClass: Optional[str] = None
) -> None:
"""
Output a Python RDD of key-value pairs (of form ``RDD[(K, V)]``) to any Hadoop file
system, using the "org.apache.hadoop.io.Writable" types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pickle is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
Parameters
----------
path : str
path to sequence file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
"""
pickledRDD = self._pickled()
assert self.ctx._jvm is not None
self.ctx._jvm.PythonRDD.saveAsSequenceFile(
pickledRDD._jrdd, True, path, compressionCodecClass
)
def saveAsPickleFile(self, path: str, batchSize: int = 10) -> None:
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is :class:`pyspark.serializers.CPickleSerializer`, default batch size
is 10.
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
ser: Serializer
if batchSize == 0:
ser = AutoBatchedSerializer(CPickleSerializer())
else:
ser = BatchedSerializer(CPickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path: str, compressionCodecClass: Optional[str] = None) -> None:
"""
Save this RDD as a text file, using string representations of elements.
Parameters
----------
path : str
path to text file
compressionCodecClass : str, optional
fully qualified classname of the compression codec class
i.e. "org.apache.hadoop.io.compress.GzipCodec" (None by default)
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> from tempfile import NamedTemporaryFile
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> from tempfile import NamedTemporaryFile
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> ''.join([r.decode('utf-8') if isinstance(r, bytes) else r for r in result])
'bar\\nfoo\\n'
"""
def func(split: int, iterator: Iterable[Any]) -> Iterable[bytes]:
for x in iterator:
if isinstance(x, bytes):
yield x
elif isinstance(x, str):
yield x.encode("utf-8")
else:
yield str(x).encode("utf-8")
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self: "RDD[Tuple[K, V]]") -> Dict[K, V]:
"""
Return the key-value pairs in this RDD to the master as a dictionary.
Notes
-----
This method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self: "RDD[Tuple[K, V]]") -> "RDD[K]":
"""
Return an RDD with the keys of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self: "RDD[Tuple[K, V]]") -> "RDD[V]":
"""
Return an RDD with the values of each tuple.
Examples
--------
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(
self: "RDD[Tuple[K, V]]",
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with `numPartitions` partitions, or
the default parallelism level if `numPartitions` is not specified.
Default partitioner is hash-partition.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self: "RDD[Tuple[K, V]]", func: Callable[[V, V], V]) -> Dict[K, V]:
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Examples
--------
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator: Iterable[Tuple[K, V]]) -> Iterable[Dict[K, V]]:
m: Dict[K, V] = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1: Dict[K, V], m2: Dict[K, V]) -> Dict[K, V]:
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self: "RDD[Tuple[K, V]]") -> Dict[K, int]:
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, U]]]":
"""
Return an RDD containing all pairs of elements with matching keys in
`self` and `other`.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in `self` and (k, v2) is in `other`.
Performs a hash join across the cluster.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[V, Optional[U]]]]":
"""
Perform a left outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], U]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, w) in `other`, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[Optional[V], Optional[U]]]]":
"""
Perform a right outer join of `self` and `other`.
For each element (k, v) in `self`, the resulting RDD will either
contain all pairs (k, (v, w)) for w in `other`, or the pair
(k, (v, None)) if no elements in `other` have key k.
Similarly, for each element (k, w) in `other`, the resulting RDD will
either contain all pairs (k, (v, w)) for v in `self`, or the pair
(k, (None, w)) if no elements in `self` have key k.
Hash-partitions the resulting RDD into the given number of partitions.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int],
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Return a copy of the RDD partitioned using the specified partitioner.
Examples
--------
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = self._memory_limit() / 2
def add_shuffle_key(split: int, iterator: Iterable[Tuple[K, V]]) -> Iterable[bytes]:
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000) # type: ignore[operator]
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v)) # type: ignore[operator]
c += 1
# check used memory and avg size of chunk of objects
if c % 1000 == 0 and get_used_memory() > limit or c > batch:
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch = min(sys.maxsize, batch * 1.5) # type: ignore[assignment]
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True # type: ignore[attr-defined]
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd: "RDD[Tuple[K, V]]" = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(
self: "RDD[Tuple[K, V]]",
createCombiner: Callable[[V], U],
mergeValue: Callable[[U, V], U],
mergeCombiners: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- `createCombiner`, which turns a V into a C (e.g., creates
a one-element list)
- `mergeValue`, to merge a V into a C (e.g., adds it to the end of
a list)
- `mergeCombiners`, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
Notes
-----
V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator: Iterable[Tuple[K, U]]) -> Iterable[Tuple[K, U]]:
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: U,
seqFunc: Callable[[U, V], U],
combFunc: Callable[[U, U], U],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, U]]":
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero() -> U:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc
)
def foldByKey(
self: "RDD[Tuple[K, V]]",
zeroValue: V,
func: Callable[[V, V], V],
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, V]]":
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero() -> V:
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc
)
def _memory_limit(self) -> int:
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(
self: "RDD[Tuple[K, V]]",
numPartitions: Optional[int] = None,
partitionFunc: Callable[[K], int] = portable_hash,
) -> "RDD[Tuple[K, Iterable[V]]]":
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Notes
-----
If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
Examples
--------
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x: V) -> List[V]:
return [x]
def mergeValue(xs: List[V], x: V) -> List[V]:
xs.append(x)
return xs
def mergeCombiners(a: List[V], b: List[V]) -> List[V]:
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator: Iterable[Tuple[K, V]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it: Iterable[Tuple[K, List[V]]]) -> Iterable[Tuple[K, List[V]]]:
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(
self: "RDD[Tuple[K, V]]", f: Callable[[V], Iterable[U]]
) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
def flat_map_fn(kv: Tuple[K, V]) -> Iterable[Tuple[K, U]]:
return ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self: "RDD[Tuple[K, V]]", f: Callable[[V], U]) -> "RDD[Tuple[K, U]]":
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
Examples
--------
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
def map_values_fn(kv: Tuple[K, V]) -> Tuple[K, U]:
return kv[0], f(kv[1])
return self.map(map_values_fn, preservesPartitioning=True)
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]", other: "RDD[Tuple[K, V1]]", __o1: "RDD[Tuple[K, V2]]"
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[V1], ResultIterable[V2]]]]":
...
@overload
def groupWith(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, V1]]",
_o1: "RDD[Tuple[K, V2]]",
_o2: "RDD[Tuple[K, V3]]",
) -> """RDD[
Tuple[
K,
Tuple[
ResultIterable[V],
ResultIterable[V1],
ResultIterable[V2],
ResultIterable[V3],
],
]
]""":
...
def groupWith( # type: ignore[misc]
self: "RDD[Tuple[Any, Any]]", other: "RDD[Tuple[Any, Any]]", *others: "RDD[Tuple[Any, Any]]"
) -> "RDD[Tuple[Any, Tuple[ResultIterable[Any], ...]]]":
"""
Alias for cogroup but with support for multiple RDDs.
Examples
--------
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom partitioner
def cogroup(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, U]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, Tuple[ResultIterable[V], ResultIterable[U]]]]":
"""
For each key k in `self` or `other`, return a resulting RDD that
contains a tuple with the list of values for that key in `self` as
well as `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(
self: "RDD[Tuple[K, V]]",
withReplacement: bool,
fractions: Dict[K, Union[float, int]],
seed: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
Examples
--------
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True
)
def subtractByKey(
self: "RDD[Tuple[K, V]]",
other: "RDD[Tuple[K, Any]]",
numPartitions: Optional[int] = None,
) -> "RDD[Tuple[K, V]]":
"""
Return each (key, value) pair in `self` that has no pair with matching
key in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair: Tuple[K, Tuple[V, Any]]) -> bool:
key, (val1, val2) = pair
return val1 and not val2 # type: ignore[return-value]
return (
self.cogroup(other, numPartitions)
.filter(filter_func) # type: ignore[arg-type]
.flatMapValues(lambda x: x[0])
)
def subtract(self: "RDD[T]", other: "RDD[T]", numPartitions: Optional[int] = None) -> "RDD[T]":
"""
Return each value in `self` that is not contained in `other`.
Examples
--------
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self: "RDD[T]", f: Callable[[T], K]) -> "RDD[Tuple[K, T]]":
"""
Creates tuples of the elements in this RDD by applying `f`.
Examples
--------
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self: "RDD[T]", numPartitions: int) -> "RDD[T]":
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
Examples
--------
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self: "RDD[T]", numPartitions: int, shuffle: bool = False) -> "RDD[T]":
"""
Return a new RDD that is reduced into `numPartitions` partitions.
Examples
--------
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(CPickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self: "RDD[T]", other: "RDD[U]") -> "RDD[Tuple[T, U]]":
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
Examples
--------
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser: Serializer) -> int:
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd: "RDD[V]", batchSize: int) -> "RDD[V]":
return rdd._reserialize(BatchedSerializer(CPickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self: "RDD[T]") -> "RDD[Tuple[T, int]]":
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
:meth:`zipWithIndex`.
Examples
--------
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k: int, it: Iterable[T]) -> Iterable[Tuple[T, int]]:
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self) -> Optional[str]:
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
return n if n else None
def setName(self: "RDD[T]", name: str) -> "RDD[T]":
"""
Assign a name to this RDD.
Examples
--------
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self) -> Optional[bytes]:
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
return debug_string.encode("utf-8") if debug_string else None
def getStorageLevel(self) -> StorageLevel:
"""
Get the RDD's current storage level.
Examples
--------
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(
java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication(),
)
return storage_level
def _defaultReducePartitions(self) -> int:
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self: "RDD[Tuple[K, V]]", key: K) -> List[V]:
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
Examples
--------
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self) -> "JavaObject":
"""Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pickle, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
assert self.ctx._jvm is not None
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout: int, confidence: float = 0.95) -> int:
"""
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the sum within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(
self: "RDD[Union[float, int]]", timeout: int, confidence: float = 0.95
) -> BoundedFloat:
"""
Approximate operation to return the mean within a timeout
or meet the confidence.
Examples
--------
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
assert self.ctx._jvm is not None
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self: "RDD[T]", relativeSD: float = 0.05) -> int:
"""
Return approximate number of distinct elements in the RDD.
Parameters
----------
relativeSD : float, optional
Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
Notes
-----
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
Examples
--------
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self: "RDD[T]", prefetchPartitions: bool = False) -> Iterator[T]:
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
With prefetch it may consume up to the memory of the 2 largest partitions.
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition
before it is needed.
Examples
--------
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
assert self.ctx._jvm is not None
with SCCallSiteSync(self.context):
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(
self._jrdd.rdd(), prefetchPartitions
)
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self: "RDD[T]") -> "RDDBarrier[T]":
"""
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
.. versionadded:: 2.4.0
Returns
-------
:class:`RDDBarrier`
instance that provides actions within a barrier stage.
See Also
--------
pyspark.BarrierTaskContext
Notes
-----
For additional information see
- `SPIP: Barrier Execution Mode <http://jira.apache.org/jira/browse/SPARK-24374>`_
- `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
This API is experimental
"""
return RDDBarrier(self)
def _is_barrier(self) -> bool:
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def withResources(self: "RDD[T]", profile: ResourceProfile) -> "RDD[T]":
"""
Specify a :class:`pyspark.resource.ResourceProfile` to use when calculating this RDD.
This is only supported on certain cluster managers and currently requires dynamic
allocation to be enabled. It will result in new executors with the resources specified
being acquired to calculate the RDD.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental
"""
self.has_resource_profile = True
if profile._java_resource_profile is not None:
jrp = profile._java_resource_profile
else:
assert self.ctx._jvm is not None
builder = self.ctx._jvm.org.apache.spark.resource.ResourceProfileBuilder()
ereqs = ExecutorResourceRequests(self.ctx._jvm, profile._executor_resource_requests)
treqs = TaskResourceRequests(self.ctx._jvm, profile._task_resource_requests)
builder.require(ereqs._java_executor_resource_requests)
builder.require(treqs._java_task_resource_requests)
jrp = builder.build()
self._jrdd.withResources(jrp)
return self
def getResourceProfile(self) -> Optional[ResourceProfile]:
"""
Get the :class:`pyspark.resource.ResourceProfile` specified with this RDD or None
if it wasn't specified.
.. versionadded:: 3.1.0
Returns
-------
:py:class:`pyspark.resource.ResourceProfile`
The user specified profile or None if none were specified
Notes
-----
This API is experimental
"""
rp = self._jrdd.getResourceProfile()
if rp is not None:
return ResourceProfile(_java_resource_profile=rp)
else:
return None
@overload
def toDF(
self: "RDD[RowLike]",
schema: Optional[Union[List[str], Tuple[str, ...]]] = None,
sampleRatio: Optional[float] = None,
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[RowLike]", schema: Optional[Union["StructType", str]] = None
) -> "DataFrame":
...
@overload
def toDF(
self: "RDD[AtomicValue]",
schema: Union["AtomicType", str],
) -> "DataFrame":
...
def toDF(
self: "RDD[Any]", schema: Optional[Any] = None, sampleRatio: Optional[float] = None
) -> "DataFrame":
raise RuntimeError("""RDD.toDF was called before SparkSession was initialized.""")
def _prepare_for_python_RDD(sc: "SparkContext", command: Any) -> Tuple[bytes, Any, Any, Any]:
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
assert sc._jvm is not None
if len(pickled_command) > sc._jvm.PythonUtils.getBroadcastThreshold(sc._jsc): # Default 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(
sc: "SparkContext", func: Callable, deserializer: Any, serializer: Any, profiler: Any = None
) -> "JavaObject":
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
assert sc._jvm is not None
return sc._jvm.PythonFunction(
bytearray(pickled_command),
env,
includes,
sc.pythonExec,
sc.pythonVer,
broadcast_vars,
sc._javaAccumulator,
)
class RDDBarrier(Generic[T]):
"""
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def __init__(self, rdd: RDD[T]):
self.rdd = rdd
def mapPartitions(
self, f: Callable[[Iterable[T]], Iterable[U]], preservesPartitioning: bool = False
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
Notes
-----
This API is experimental
"""
def func(s: int, iterator: Iterable[T]) -> Iterable[U]:
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
def mapPartitionsWithIndex(
self,
f: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
) -> RDD[U]:
"""
Returns a new RDD by applying a function to each partition of the wrapped RDD, while
tracking the index of the original partition. And all tasks are launched together
in a barrier stage.
The interface is the same as :func:`RDD.mapPartitionsWithIndex`.
Please see the API doc there.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental
"""
return PipelinedRDD(self.rdd, f, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD[U], Generic[T, U]):
"""
Examples
--------
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(
self,
prev: RDD[T],
func: Callable[[int, Iterable[T]], Iterable[U]],
preservesPartitioning: bool = False,
isFromBarrier: bool = False,
):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func: Callable[[int, Iterable[V]], Iterable[T]] = prev.func
def pipeline_func(split: int, iterator: Iterable[V]) -> Iterable[U]:
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.has_resource_profile = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val: Optional["JavaObject"] = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self) -> int:
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self) -> "JavaObject":
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(
self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler
)
assert self.ctx._jvm is not None
python_rdd = self.ctx._jvm.PythonRDD(
self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning, self.is_barrier
)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
assert self._jrdd_val is not None
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self) -> int:
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self) -> bool:
return not (self.is_cached or self.is_checkpointed or self.has_resource_profile)
def _is_barrier(self) -> bool:
return self.is_barrier
def _test() -> None:
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
vinodkc/spark
|
python/pyspark/rdd.py
|
Python
|
apache-2.0
| 126,212 | 0.001965 |
from datetime import datetime
class PanoplyException(Exception):
def __init__(self, args=None, retryable=True):
super(PanoplyException, self).__init__(args)
self.retryable = retryable
class IncorrectParamError(Exception):
def __init__(self, msg: str = "Incorrect input parametr"):
super().__init__(msg)
class DataSourceException(Exception):
def __init__(self, message, code, exception_cls,
phase, source_type, source_id, database_id):
super().__init__(message)
self.message = message
self.code = code
self.phase = phase
self.source_type = source_type
self.source_id = source_id
self.database_id = database_id
self.exception_cls = exception_cls
self.created_at = datetime.utcnow()
class TokenValidationException(PanoplyException):
def __init__(self, original_error, args=None, retryable=True):
super().__init__(args, retryable)
self.original_error = original_error
|
panoplyio/panoply-python-sdk
|
panoply/errors/exceptions.py
|
Python
|
mit
| 1,017 | 0 |
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print "Estimated coefficients (true, normal, RANSAC):"
print coef, model.coef_, model_ransac.estimator_.coef_
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
|
Tong-Chen/scikit-learn
|
examples/linear_model/plot_ransac.py
|
Python
|
bsd-3-clause
| 1,671 | 0 |
from Channel import Channel
import telepot
class AmbrosioBot(telepot.Bot):
"""AmbrosioBot is my telgram bot"""
def __init__(self, token):
super(AmbrosioBot, self).__init__(token)
self.clist = None
self.chat_id = None
def set_list(self,clist):
self.clist = clist
def on_chat_message(self, msg):
content_type, chat_type, chat_id, = telepot.glance(msg)
if content_type == 'text':
command =msg['text']
if self.clist is not None:
self.clist.append(command)
self.chat_id = chat_id
def respond(self, response):
if self.chat_id is not None:
self.sendMessage(self.chat_id, response)
class TelegramChannel(Channel):
"""channel class received commands from telegram"""
def __init__(self, name="TelegramChannel"):
super(TelegramChannel, self).__init__(name)
self.bot = AmbrosioBot("189884221:AAHls9d0EkCDfU0wgQ-acs5Z39aibA7BZmc")
self.messages = []
self.bot.set_list(self.messages)
self.bot.notifyOnMessage()
def get_msg(self):
if self.msg_avail():
return self.messages.pop(0)
def msg_avail(self):
return len(self.messages) > 0
def respond(self, response):
if response is None:
response = "Command not understand"
self.bot.respond(response)
|
oscarforri/ambrosio
|
ambrosio/channels/TelegramChannel.py
|
Python
|
gpl-3.0
| 1,405 | 0.003559 |
"""
This module contains a class to make requests to the Gemini API.
Author: Mike Marzigliano
"""
import time
import json
import hmac
import base64
import hashlib
import requests
class Geminipy(object):
"""
A class to make requests to the Gemini API.
Make public or authenticated requests according to the API documentation:
https://docs.gemini.com/
"""
live_url = 'https://api.gemini.com'
sandbox_url = 'https://api.sandbox.gemini.com'
base_url = sandbox_url
api_key = ''
secret_key = ''
def __init__(self, api_key='', secret_key='', live=False):
"""
Initialize the class.
Arguments:
api_key -- your Gemini API key
secret_key -- your Gemini API secret key for signatures
live -- use the live API? otherwise, use the sandbox (default False)
"""
self.api_key = api_key
self.secret_key = secret_key
if live:
self.base_url = self.live_url
# public requests
def symbols(self):
"""Send a request for all trading symbols, return the response."""
url = self.base_url + '/v1/symbols'
return requests.get(url)
def pubticker(self, symbol='btcusd'):
"""Send a request for latest ticker info, return the response."""
url = self.base_url + '/v1/pubticker/' + symbol
return requests.get(url)
def book(self, symbol='btcusd', limit_bids=0, limit_asks=0):
"""
Send a request to get the public order book, return the response.
Arguments:
symbol -- currency symbol (default 'btcusd')
limit_bids -- limit the number of bids returned (default 0)
limit_asks -- limit the number of asks returned (default 0)
"""
url = self.base_url + '/v1/book/' + symbol
params = {
'limit_bids': limit_bids,
'limit_asks': limit_asks
}
return requests.get(url, params)
def trades(self, symbol='btcusd', since=0, limit_trades=50,
include_breaks=0):
"""
Send a request to get all public trades, return the response.
Arguments:
symbol -- currency symbol (default 'btcusd')
since -- only return trades after this unix timestamp (default 0)
limit_trades -- maximum number of trades to return (default 50).
include_breaks -- whether to display broken trades (default False)
"""
url = self.base_url + '/v1/trades/' + symbol
params = {
'since': since,
'limit_trades': limit_trades,
'include_breaks': include_breaks
}
return requests.get(url, params)
def auction(self, symbol='btcusd'):
"""Send a request for latest auction info, return the response."""
url = self.base_url + '/v1/auction/' + symbol
return requests.get(url)
def auction_history(self, symbol='btcusd', since=0,
limit_auction_results=50, include_indicative=1):
"""
Send a request for auction history info, return the response.
Arguments:
symbol -- currency symbol (default 'btcusd')
since -- only return auction events after this timestamp (default 0)
limit_auction_results -- maximum number of auction events to return
(default 50).
include_indicative -- whether to include publication of indicative
prices and quantities. (default True)
"""
url = self.base_url + '/v1/auction/' + symbol + '/history'
params = {
'since': since,
'limit_auction_results': limit_auction_results,
'include_indicative': include_indicative
}
return requests.get(url, params)
# authenticated requests
def new_order(self, amount, price, side, client_order_id=None,
symbol='btcusd', type='exchange limit', options=None):
"""
Send a request to place an order, return the response.
Arguments:
amount -- quoted decimal amount of BTC to purchase
price -- quoted decimal amount of USD to spend per BTC
side -- 'buy' or 'sell'
client_order_id -- an optional client-specified order id (default None)
symbol -- currency symbol (default 'btcusd')
type -- the order type (default 'exchange limit')
"""
request = '/v1/order/new'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'symbol': symbol,
'amount': amount,
'price': price,
'side': side,
'type': type
}
if client_order_id is not None:
params['client_order_id'] = client_order_id
if options is not None:
params['options'] = options
return requests.post(url, headers=self.prepare(params))
def cancel_order(self, order_id):
"""
Send a request to cancel an order, return the response.
Arguments:
order_id - the order id to cancel
"""
request = '/v1/order/cancel'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'order_id': order_id
}
return requests.post(url, headers=self.prepare(params))
def cancel_session(self):
"""Send a request to cancel all session orders, return the response."""
request = '/v1/order/cancel/session'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
return requests.post(url, headers=self.prepare(params))
def cancel_all(self):
"""Send a request to cancel all orders, return the response."""
request = '/v1/order/cancel/all'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
return requests.post(url, headers=self.prepare(params))
def order_status(self, order_id):
"""
Send a request to get an order status, return the response.
Arguments:
order_id -- the order id to get information on
"""
request = '/v1/order/status'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'order_id': order_id
}
return requests.post(url, headers=self.prepare(params))
def active_orders(self):
"""Send a request to get active orders, return the response."""
request = '/v1/orders'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
return requests.post(url, headers=self.prepare(params))
def past_trades(self, symbol='btcusd', limit_trades=50, timestamp=0):
"""
Send a trade history request, return the response.
Arguements:
symbol -- currency symbol (default 'btcusd')
limit_trades -- maximum number of trades to return (default 50)
timestamp -- only return trades after this unix timestamp (default 0)
"""
request = '/v1/mytrades'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'symbol': symbol,
'limit_trades': limit_trades,
'timestamp': timestamp
}
return requests.post(url, headers=self.prepare(params))
def tradevolume(self):
"""Send a request to get your trade volume, return the response."""
request = '/v1/tradevolume'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
return requests.post(url, headers=self.prepare(params))
def balances(self):
"""Send an account balance request, return the response."""
request = '/v1/balances'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
return requests.post(url, headers=self.prepare(params))
def newAddress(self, currency='btc', label=''):
"""
Send a request for a new cryptocurrency deposit address
with an optional label. Return the response.
Arguements:
currency -- a Gemini supported cryptocurrency (btc, eth)
label -- optional label for the deposit address
"""
request = '/v1/deposit/' + currency + '/newAddress'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
if label != '':
params['label'] = label
return requests.post(url, headers=self.prepare(params))
def fees(self):
"""Send a request to get fee and notional volume, return the response."""
request = '/v1/notionalvolume'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
return requests.post(url, headers=self.prepare(params))
def heartbeat(self):
"""Send a heartbeat message, return the response."""
request = '/v1/heartbeat'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce()
}
return requests.post(url, headers=self.prepare(params))
def get_nonce(self):
"""Return the current millisecond timestamp as the nonce."""
return int(round(time.time() * 1000))
def prepare(self, params):
"""
Prepare, return the required HTTP headers.
Base 64 encode the parameters, sign it with the secret key,
create the HTTP headers, return the whole payload.
Arguments:
params -- a dictionary of parameters
"""
jsonparams = json.dumps(params)
payload = base64.b64encode(jsonparams.encode())
signature = hmac.new(self.secret_key.encode(), payload,
hashlib.sha384).hexdigest()
return {'X-GEMINI-APIKEY': self.api_key,
'X-GEMINI-PAYLOAD': payload,
'X-GEMINI-SIGNATURE': signature}
|
geminipy/geminipy
|
geminipy/__init__.py
|
Python
|
gpl-3.0
| 10,473 | 0.000095 |
import click
from complex.cli import pass_context
@click.command('status', short_help='Shows file changes.')
@pass_context
def cli(ctx):
"""Shows file changes in the current working directory."""
ctx.log('Changed files: none')
ctx.vlog('bla bla bla, debug info')
|
staranjeet/fjord
|
vendor/packages/click/examples/complex/complex/commands/cmd_status.py
|
Python
|
bsd-3-clause
| 277 | 0 |
from __future__ import absolute_import
import six
import logging
from .. import py3_errmsg
logger = logging.getLogger(__name__)
try:
import enaml
except ImportError:
if six.PY3:
logger.exception(py3_errmsg)
else:
raise
else:
from .model import (GetLastModel, DisplayHeaderModel, WatchForHeadersModel,
ScanIDSearchModel)
with enaml.imports():
from .view import (GetLastView, GetLastWindow, WatchForHeadersView,
ScanIDSearchView)
|
NSLS-II/replay
|
replay/search/__init__.py
|
Python
|
bsd-3-clause
| 527 | 0 |
import typer
from controller import log
from controller.app import Application
from controller.deploy.docker import Docker
@Application.app.command(help="Provide instructions to join new nodes")
def join(
manager: bool = typer.Option(
False, "--manager", show_default=False, help="join new node with manager role"
)
) -> None:
Application.print_command(
Application.serialize_parameter("--manager", manager, IF=manager),
)
Application.get_controller().controller_init()
docker = Docker()
manager_address = "N/A"
# Search for the manager address
for node in docker.client.node.list():
role = node.spec.role
state = node.status.state
availability = node.spec.availability
if (
role == "manager"
and state == "ready"
and availability == "active"
and node.manager_status
):
manager_address = node.manager_status.addr
if manager:
log.info("To add a manager to this swarm, run the following command:")
token = docker.swarm.get_token("manager")
else:
log.info("To add a worker to this swarm, run the following command:")
token = docker.swarm.get_token("worker")
print("")
print(f"docker swarm join --token {token} {manager_address}")
print("")
|
rapydo/do
|
controller/commands/swarm/join.py
|
Python
|
mit
| 1,350 | 0.000741 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('viewer', '0006_meter_on_auditlist'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
],
options={
},
bases=(models.Model,),
),
migrations.RenameField(
model_name='profiledatapoint',
old_name='kwh',
new_name='kw',
),
migrations.AddField(
model_name='meter',
name='groups',
field=models.ManyToManyField(to='viewer.Group'),
preserve_default=True,
),
]
|
impactlab/jps-handoff
|
webapp/viewer/migrations/0007_auto_20150408_1402.py
|
Python
|
mit
| 935 | 0.00107 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting code to AST.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
import gast
from tensorflow.python.util import tf_inspect
def parse_object(obj):
"""Return the AST of given object."""
return parse_str(tf_inspect.getsource(obj))
def parse_str(src):
"""Return the AST of given piece of code."""
return gast.parse(textwrap.dedent(src))
|
jwlawson/tensorflow
|
tensorflow/contrib/py2tf/pyct/parser.py
|
Python
|
apache-2.0
| 1,152 | 0.003472 |
# -*- coding: utf-8 -*-
import os
import ConfigParser
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
from global_functions import app_dir
class Mailer():
"""
Instance to manage the mailing.
"""
def __init__(self):
"""
Setup all needed info.
"""
# Gets all the connection info from the .ini file
self.Config = ConfigParser.ConfigParser()
self.Config.read(os.path.join(app_dir, "institution.ini"))
self.server = unicode(self.Config.get("Mail", "server"))
self.port = int(self.Config.get("Mail", "port"))
self.email = unicode(self.Config.get("Mail", "email"))
self.password = unicode(self.Config.get("Mail", "password"))
def connect(self):
"""
Connects to the mail server using the .ini info.
"""
self.smtp_server = smtplib.SMTP(self.server, self.port)
self.smtp_server.ehlo()
self.smtp_server.starttls()
try:
self.smtp_server.login(self.email, self.password)
return 1
except:
return 0
def send_certificate(self, path, send_to):
"""
Send each certificate from the configured email.
"""
# Email info
msg = MIMEMultipart()
msg["From"] = self.email
msg["To"] = send_to
msg["Subject"] = u"Certificado"
body = u"""Em anexo a este e-mail encontra-se o seu certificado de participação de um de nossos eventos.
Qualquer problema, entre em contato respondendo a este e-mail ou procure-nos em:
{address}
Fone: {phone}
""".format(
address=unicode(self.Config.get("Contact", "address")),
phone=unicode(self.Config.get("Contact", "phone"))
)
msg.attach(MIMEText(unicode(body), 'plain', 'utf-8'))
# Add the certificate file
attachment = open(unicode(path), "rb")
filename = os.path.basename(unicode(path))
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header(u'Content-Disposition',
"attachment; filename= %s" % filename)
msg.attach(part)
text = msg.as_string()
# Send the email
self.smtp_server.sendmail(self.email, send_to, text)
def quit(self):
# Quits the connection
self.smtp_server.quit()
|
juliarizza/certificate_generator
|
models/mail.py
|
Python
|
gpl-3.0
| 2,605 | 0.001153 |
import os, sys
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import csv
from slicer.util import VTKObservationMixin
import platform
import time
import urllib
import shutil
from CommonUtilities import *
from packaging import version
def _setSectionResizeMode(header, *args, **kwargs):
if version.parse(qt.Qt.qVersion()) < version.parse("5.0.0"):
header.setResizeMode(*args, **kwargs)
else:
header.setSectionResizeMode(*args, **kwargs)
#
# ShapeAnalysisModule
#
class ShapeAnalysisModule(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Shape Analysis Module"
self.parent.categories = ["SPHARM"]
self.parent.dependencies = []
self.parent.contributors = ["Laura Pascal (Kitware Inc.), Beatriz Paniagua (Kitware Inc.), Hina Shah (Kitware Inc.)"]
self.parent.helpText = """
SPHARM-PDM is a tool that computes point-based models using a parametric
boundary description for the computing of Shape Analysis.
"""
self.parent.acknowledgementText = """
This work was supported by NIH NIBIB R01EB021391
(Shape Analysis Toolbox for Medical Image Computing Projects).
"""
#
# ShapeAnalysisModuleWidget
#
class ShapeAnalysisModuleWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#
# Global variables
#
self.Logic = ShapeAnalysisModuleLogic()
self.progressbars_layout = None
#
# Interface
#
loader = qt.QUiLoader()
self.moduleName = 'ShapeAnalysisModule'
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' % self.moduleName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile, self.parent)
self.layout = self.parent.layout()
self.widget = widget
self.layout.addWidget(widget)
# Global variables of the Interface
# Group Project IO
self.CollapsibleButton_GroupProjectIO = self.getWidget('CollapsibleButton_GroupProjectIO')
self.GroupProjectInputDirectory = self.getWidget('DirectoryButton_GroupProjectInputDirectory')
self.GroupProjectOutputDirectory = self.getWidget('DirectoryButton_GroupProjectOutputDirectory')
self.Debug = self.getWidget('checkBox_Debug')
# Post Processed Segmentation
self.CollapsibleButton_SegPostProcess = self.getWidget('CollapsibleButton_SegPostProcess')
self.OverwriteSegPostProcess = self.getWidget('checkBox_OverwriteSegPostProcess')
self.label_RescaleSegPostProcess = self.getWidget('label_RescaleSegPostProcess')
self.RescaleSegPostProcess = self.getWidget('checkBox_RescaleSegPostProcess')
self.sx = self.getWidget('SliderWidget_sx')
self.sy = self.getWidget('SliderWidget_sy')
self.sz = self.getWidget('SliderWidget_sz')
self.label_sx = self.getWidget('label_sx')
self.label_sy = self.getWidget('label_sy')
self.label_sz = self.getWidget('label_sz')
self.LabelState = self.getWidget('checkBox_LabelState')
self.label_ValueLabelNumber = self.getWidget('label_ValueLabelNumber')
self.ValueLabelNumber = self.getWidget('SliderWidget_ValueLabelNumber')
# Generate Mesh Parameters
self.CollapsibleButton_GenParaMesh = self.getWidget('CollapsibleButton_GenParaMesh')
self.OverwriteGenParaMesh = self.getWidget('checkBox_OverwriteGenParaMesh')
self.NumberofIterations = self.getWidget('SliderWidget_NumberofIterations')
# Parameters to SPHARM Mesh
self.CollapsibleButton_ParaToSPHARMMesh = self.getWidget('CollapsibleButton_ParaToSPHARMMesh')
self.OverwriteParaToSPHARMMesh = self.getWidget('checkBox_OverwriteParaToSPHARMMesh')
self.SubdivLevelValue = self.getWidget('SliderWidget_SubdivLevelValue')
self.SPHARMDegreeValue = self.getWidget('SliderWidget_SPHARMDegreeValue')
self.thetaIterationValue = self.getWidget('spinBox_thetaIterationValue')
self.phiIterationValue = self.getWidget('spinBox_phiIterationValue')
self.medialMesh = self.getWidget('checkBox_medialMesh')
# Advanced Post Processed Segmentation
self.CollapsibleButton_AdvancedPostProcessedSegmentation = self.getWidget('CollapsibleButton_AdvancedPostProcessedSegmentation')
self.GaussianFiltering = self.getWidget('checkBox_GaussianFiltering')
self.label_VarianceX = self.getWidget('label_VarianceX')
self.VarianceX = self.getWidget('SliderWidget_VarianceX')
self.label_VarianceY = self.getWidget('label_VarianceY')
self.VarianceY = self.getWidget('SliderWidget_VarianceY')
self.label_VarianceZ = self.getWidget('label_VarianceZ')
self.VarianceZ = self.getWidget('SliderWidget_VarianceZ')
# Advanced Parameters to SPHARM Mesh
self.CollapsibleButton_AdvancedParametersToSPHARMMesh = self.getWidget('CollapsibleButton_AdvancedParametersToSPHARMMesh')
self.useRegTemplate = self.getWidget('checkBox_useRegTemplate')
self.label_regTemplate = self.getWidget('label_regTemplate')
self.regTemplate = self.getWidget('PathLineEdit_regTemplate')
self.useFlipTemplate = self.getWidget('checkBox_useFlipTemplate')
self.label_flipTemplate = self.getWidget('label_flipTemplate')
self.flipTemplate = self.getWidget('PathLineEdit_flipTemplate')
self.choiceOfFlip = self.getWidget('comboBox_choiceOfFlip')
self.sameFlipForAll = self.getWidget('checkBox_sameFlipForAll')
self.tableWidget_ChoiceOfFlip = self.getWidget('tableWidget_ChoiceOfFlip')
# Visualization
self.CollapsibleButton_Visualization = self.getWidget('CollapsibleButton_Visualization')
self.visualizationInSPV = self.getWidget('pushButton_visualizationInSPV')
self.CheckableComboBox_visualization = self.getWidget('CheckableComboBox_visualization')
self.tableWidget_visualization = self.getWidget('tableWidget_visualization')
# Apply CLIs
self.ApplyButton = self.getWidget('applyButton')
self.progress_layout = self.getWidget('progress_layout')
# Connections
# Group Project IO
self.CollapsibleButton_GroupProjectIO.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_GroupProjectIO))
self.GroupProjectInputDirectory.connect('directoryChanged(const QString &)', self.onInputDirectoryChanged)
self.GroupProjectOutputDirectory.connect('directoryChanged(const QString &)', self.onOutputDirectoryChanged)
self.Debug.connect('clicked(bool)', self.onDebug)
# Post Processed Segmentation
self.CollapsibleButton_SegPostProcess.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_SegPostProcess))
self.OverwriteSegPostProcess.connect('clicked(bool)', self.onOverwriteFilesSegPostProcess)
self.RescaleSegPostProcess.connect('stateChanged(int)', self.onSelectSpacing)
self.sx.connect('valueChanged(double)', self.onSxValueChanged)
self.sy.connect('valueChanged(double)', self.onSyValueChanged)
self.sz.connect('valueChanged(double)', self.onSzValueChanged)
self.LabelState.connect('clicked(bool)', self.onSelectValueLabelNumber)
self.ValueLabelNumber.connect('valueChanged(double)', self.onLabelNumberValueChanged)
# Generate Mesh Parameters
self.CollapsibleButton_GenParaMesh.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_GenParaMesh))
self.OverwriteGenParaMesh.connect('clicked(bool)', self.onOverwriteFilesGenParaMesh)
self.NumberofIterations.connect('valueChanged(double)', self.onNumberofIterationsValueChanged)
# Parameters to SPHARM Mesh
self.CollapsibleButton_ParaToSPHARMMesh.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_ParaToSPHARMMesh))
self.OverwriteParaToSPHARMMesh.connect('clicked(bool)', self.onOverwriteFilesParaToSPHARMMesh)
self.SubdivLevelValue.connect('valueChanged(double)', self.onSubdivLevelValueChanged)
self.SPHARMDegreeValue.connect('valueChanged(double)', self.onSPHARMDegreeValueChanged)
self.thetaIterationValue.connect('valueChanged(int)', self.onThetaIterationValueChanged)
self.phiIterationValue.connect('valueChanged(int)', self.onPhiIterationValueChanged)
self.medialMesh.connect('clicked(bool)', self.onMedialMeshValueChanged)
# Advanced Post Processed Segmentation
self.CollapsibleButton_AdvancedPostProcessedSegmentation.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_AdvancedPostProcessedSegmentation))
self.GaussianFiltering.connect('clicked(bool)', self.onSelectGaussianVariance)
self.VarianceX.connect('valueChanged(double)', self.onVarianceXValueChanged)
self.VarianceY.connect('valueChanged(double)', self.onVarianceYValueChanged)
self.VarianceZ.connect('valueChanged(double)', self.onVarianceZValueChanged)
# Advanced Parameters to SPHARM Mesh
self.CollapsibleButton_AdvancedParametersToSPHARMMesh.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_AdvancedParametersToSPHARMMesh))
self.useRegTemplate.connect('clicked(bool)', self.onEnableRegTemplate)
self.regTemplate.connect('currentPathChanged(const QString)', self.onRegTemplateValueChanged)
self.useFlipTemplate.connect('clicked(bool)', self.onEnableFlipTemplate)
self.flipTemplate.connect('currentPathChanged(const QString)', self.onFlipTemplateValueChanged)
self.choiceOfFlip.connect('currentIndexChanged(int)', self.onChoiceOfFlipValueChanged)
self.sameFlipForAll.connect('clicked(bool)', self.onEnableFlipChoices)
# Visualization
self.CollapsibleButton_Visualization.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_Visualization))
self.CheckableComboBox_visualization.connect('checkedIndexesChanged()', self.onCheckableComboBoxValueChanged)
self.visualizationInSPV.connect('clicked(bool)', self.onSPHARMMeshesVisualizationInSPV)
# Apply CLIs
self.ApplyButton.connect('clicked(bool)', self.onApplyButton)
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
# Widget Configuration
# Table for the Flip Options
self.tableWidget_ChoiceOfFlip.setColumnCount(2)
self.tableWidget_ChoiceOfFlip.setHorizontalHeaderLabels([' Input Files ', ' Choice of Flip '])
self.tableWidget_ChoiceOfFlip.setColumnWidth(0, 400)
horizontalHeader = self.tableWidget_ChoiceOfFlip.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
self.tableWidget_ChoiceOfFlip.verticalHeader().setVisible(False)
# Progress Bar
self.progress_layout.addWidget(self.Logic.ProgressBar)
# Table for the visualization in SPV
self.tableWidget_visualization.setColumnCount(2)
self.tableWidget_visualization.setHorizontalHeaderLabels([' VTK Files ', ' Visualization '])
self.tableWidget_visualization.setColumnWidth(0, 400)
horizontalHeader = self.tableWidget_visualization.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
self.tableWidget_visualization.verticalHeader().setVisible(False)
# Configuration of the parameters of the widget
self.Logic.parameters.setTableForChoiceOfFlip(self.tableWidget_ChoiceOfFlip)
def enter(self):
if not hasattr(slicer.modules, 'shapepopulationviewer') and not hasattr(slicer.modules, 'launcher'):
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("Shape Population Viewer is not installed!")
messageBox.setInformativeText("To install Shape Population Viewer in order to display the SPHARM meshes outputs generated by Shape Analysis Module, you can:\n"
"Solution 1: \n"
" - Install it via the Extensions Managers\n"
" - Restart 3DSlicer\n"
"Solution 2: \n"
" - Download it on https://www.nitrc.org/projects/shapepopviewer/\n"
" - Add the folder where you stored it in Edit/Application Settings/Modules/Add\n"
" - Restart 3DSlicer")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
else:
self.CollapsibleButton_Visualization.enabled = True
def onCloseScene(self, obj, event):
# Group Project IO
self.CollapsibleButton_GroupProjectIO.setChecked(True)
self.Logic.InputCases = []
self.GroupProjectInputDirectory.directory = slicer.app.slicerHome
self.GroupProjectOutputDirectory.directory = slicer.app.slicerHome
self.Debug.setChecked(False)
# Post Processed Segmentation
self.CollapsibleButton_SegPostProcess.setChecked(False)
self.OverwriteSegPostProcess.setChecked(False)
self.RescaleSegPostProcess.setChecked(True)
self.sx.setValue(0.5)
self.sy.setValue(0.5)
self.sz.setValue(0.5)
self.LabelState.setChecked(False)
self.ValueLabelNumber.setValue(0)
# Generate Mesh Parameters
self.CollapsibleButton_GenParaMesh.setChecked(False)
self.OverwriteGenParaMesh.setChecked(False)
self.NumberofIterations.setValue(1000)
# Parameters to SPHARM Mesh
self.CollapsibleButton_ParaToSPHARMMesh.setChecked(False)
self.OverwriteParaToSPHARMMesh.setChecked(False)
self.SubdivLevelValue.setValue(10)
self.SPHARMDegreeValue.setValue(15)
self.thetaIterationValue.setValue(100)
self.phiIterationValue.setValue(100)
self.medialMesh.setChecked(False)
# Advanced Post Processed Segmentation
self.CollapsibleButton_AdvancedPostProcessedSegmentation.setChecked(False)
self.GaussianFiltering.setChecked(False)
self.VarianceX.setValue(10)
self.VarianceY.setValue(10)
self.VarianceZ.setValue(10)
# Advanced Parameters to SPHARM Mesh
self.CollapsibleButton_AdvancedParametersToSPHARMMesh.setChecked(False)
self.useRegTemplate.setChecked(False)
self.regTemplate.setCurrentPath(" ")
self.useFlipTemplate.setChecked(False)
self.flipTemplate.setCurrentPath(" ")
self.choiceOfFlip.setCurrentIndex(0)
self.choiceOfFlip.enabled = True
self.sameFlipForAll.setChecked(True)
self.tableWidget_ChoiceOfFlip.enabled = False
self.tableWidget_ChoiceOfFlip.clear()
self.tableWidget_ChoiceOfFlip.setColumnCount(2)
self.tableWidget_ChoiceOfFlip.setHorizontalHeaderLabels([' Input Files ', ' Choice of Flip '])
self.tableWidget_ChoiceOfFlip.setColumnWidth(0, 400)
horizontalHeader = self.tableWidget_ChoiceOfFlip.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
self.tableWidget_ChoiceOfFlip.verticalHeader().setVisible(False)
# Visualization
self.CollapsibleButton_Visualization.setChecked(False)
self.CheckableComboBox_visualization.model().clear()
self.tableWidget_visualization.clear()
self.tableWidget_visualization.setColumnCount(2)
self.tableWidget_visualization.setHorizontalHeaderLabels([' VTK Files ', ' Visualization '])
self.tableWidget_visualization.setColumnWidth(0, 400)
horizontalHeader = self.tableWidget_visualization.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
self.tableWidget_visualization.verticalHeader().setVisible(False)
# Apply
if self.ApplyButton.text == "Cancel":
self.ApplyButton.click()
self.Logic.ProgressBar.hide()
if self.progressbars_layout:
self.CLIProgressBars.hide()
# Functions to recover the widget in the .ui file
def getWidget(self, objectName):
return self.findWidget(self.widget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
# Only one tab can be displayed at the same time:
# When one tab is opened all the other tabs are closed
def onSelectedCollapsibleButtonOpen(self, selectedCollapsibleButton):
if selectedCollapsibleButton.isChecked():
collapsibleButtonList = [self.CollapsibleButton_GroupProjectIO,
self.CollapsibleButton_SegPostProcess,
self.CollapsibleButton_GenParaMesh,
self.CollapsibleButton_ParaToSPHARMMesh,
self.CollapsibleButton_AdvancedPostProcessedSegmentation,
self.CollapsibleButton_AdvancedParametersToSPHARMMesh,
self.CollapsibleButton_Visualization]
for collapsibleButton in collapsibleButtonList:
collapsibleButton.setChecked(False)
selectedCollapsibleButton.setChecked(True)
#
# Group Project IO
#
def onInputDirectoryChanged(self):
inputDirectory = self.GroupProjectInputDirectory.directory.encode('utf-8')
# Update of the input directory path
self.Logic.parameters.setInputDirectory(inputDirectory)
# Possible extensions
exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"]
# Search cases and add the filename to a list
self.Logic.InputCases = []
for file in os.listdir(inputDirectory):
for ext in exts:
if file.endswith(ext):
self.Logic.InputCases.append(file)
if file.endswith(".nii") or file.endswith(".nii.gz"):
self.RescaleSegPostProcess.setCheckState(qt.Qt.Unchecked)
self.label_RescaleSegPostProcess.enabled = False
self.RescaleSegPostProcess.enabled = False
# Update of the output directory path
def onOutputDirectoryChanged(self):
outputDirectory = self.GroupProjectOutputDirectory.directory.encode('utf-8')
self.Logic.parameters.setOutputDirectory(outputDirectory)
# Update of the debug parameter
def onDebug(self):
self.Logic.parameters.setDebug(self.Debug.checkState())
#
# Post Processed Segmentation
#
def onOverwriteFilesSegPostProcess(self):
# Update of the overwrite boolean for the Post Processed Segmentation step
self.Logic.parameters.setOverwriteSegPostProcess(self.OverwriteSegPostProcess.checkState())
if self.OverwriteSegPostProcess.checkState():
# Message for the user
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("<p align='center'>Applying the overwrite option to Post Processed Segmentation step will also apply to the next steps</p>")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
# Check the overwrite option for the next steps
self.OverwriteGenParaMesh.setCheckState(qt.Qt.Checked)
self.Logic.parameters.setOverwriteGenParaMesh(self.OverwriteGenParaMesh.checkState())
self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked)
self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState())
def onSelectSpacing(self):
# Update of the rescale boolean for the Post Processed Segmentation step
self.Logic.parameters.setRescaleSegPostProcess(self.RescaleSegPostProcess.checkState())
# Enable/Disable the spacing x,y, and z parameters in the UI
self.label_sx.enabled = self.RescaleSegPostProcess.checkState()
self.label_sy.enabled = self.RescaleSegPostProcess.checkState()
self.label_sz.enabled = self.RescaleSegPostProcess.checkState()
self.sx.enabled = self.RescaleSegPostProcess.checkState()
self.sy.enabled = self.RescaleSegPostProcess.checkState()
self.sz.enabled = self.RescaleSegPostProcess.checkState()
# Update of the spacing x parameter for the Post Processed Segmentation step
def onSxValueChanged(self):
self.Logic.parameters.setSx(self.sx.value)
# Update of the spacing y parameter for the Post Processed Segmentation step
def onSyValueChanged(self):
self.Logic.parameters.setSy(self.sy.value)
# Update of the spacing z parameter for the Post Processed Segmentation step
def onSzValueChanged(self):
self.Logic.parameters.setSz(self.sz.value)
# Enable/Disable the label number value in the UI
def onSelectValueLabelNumber(self):
self.label_ValueLabelNumber.enabled = self.LabelState.checkState()
self.ValueLabelNumber.enabled = self.LabelState.checkState()
# Update of the label parameter for the Post Processed Segmentation step
def onLabelNumberValueChanged(self):
self.Logic.parameters.setLabelNumber(self.ValueLabelNumber.value)
#
# Generate Mesh Parameters
#
def onOverwriteFilesGenParaMesh(self):
# If the overwrite option for GenParaMesh is unchecked
if not self.OverwriteGenParaMesh.checkState():
# If the overwrite option for the previous step is checked, the overwrite option need to be checked for this step too
if self.OverwriteSegPostProcess.checkState():
self.OverwriteGenParaMesh.setCheckState(qt.Qt.Checked)
# Message for the user
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("<p align='center'>The overwrite option need to be applied to this step as it is set for the previous step</p>")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
# If the overwrite option for GenParaMesh is checked
else:
# Message for the user
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("<p align='center'>Applying the overwrite option to Generate Mesh Parameters step will also apply to the next steps</p>")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
# Check the overwrite option for the next step
self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked)
self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState())
# Update of the overwrite boolean for the Generate Mesh Parameters step
self.Logic.parameters.setOverwriteGenParaMesh(self.OverwriteGenParaMesh.checkState())
# Update of the iterations parameter for the Generate Mesh Parameters step
def onNumberofIterationsValueChanged(self):
self.Logic.parameters.setNumberofIterations(self.NumberofIterations.value)
#
# Parameters to SPHARM Mesh
#
def onOverwriteFilesParaToSPHARMMesh(self):
# If the overwrite option for ParaToSPHARMMesh is unchecked
if not self.OverwriteParaToSPHARMMesh.checkState():
# If the overwrite option for a previous step is checked, the overwrite option need to be checked for this step too
if self.OverwriteSegPostProcess.checkState() or self.OverwriteGenParaMesh.checkState():
self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked)
# Message for the user
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("<p align='center'>The overwrite option need to be applied to this step as it is set for the previous step</p>")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
# Update of the overwrite boolean for the Parameters to SPHARM Mesh step
self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState())
# Update of the sub-division parameter for the Parameters to SPHARM Mesh step
def onSubdivLevelValueChanged(self):
self.Logic.parameters.setSubdivLevelValue(self.SubdivLevelValue.value)
# Update of the SPHARM degree parameter for the Parameters to SPHARM Mesh step
def onSPHARMDegreeValueChanged(self):
self.Logic.parameters.setSPHARMDegreeValue(self.SPHARMDegreeValue.value)
# Update of the theta iteration parameter for the Parameters to SPHARM Mesh step
def onThetaIterationValueChanged(self):
self.Logic.parameters.setThetaIterationValue(self.thetaIterationValue.value)
# Update of the phi iteration parameter for the Parameters to SPHARM Mesh step
def onPhiIterationValueChanged(self):
self.Logic.parameters.setPhiIterationValue(self.phiIterationValue.value)
# Update of the medial mesh boolean for the Parameters to SPHARM Mesh step
def onMedialMeshValueChanged(self):
self.Logic.parameters.setMedialMesh(self.medialMesh.checkState())
#
# Advanced Post Processed Segmentation
#
def onSelectGaussianVariance(self):
# Update of the gaussian variance boolean for the Post Processed Segmentation step
self.Logic.parameters.setGaussianFiltering(self.GaussianFiltering.checkState())
# Enable/Disable the gaussian variance parameters in the UI
self.label_VarianceX.enabled = self.GaussianFiltering.checkState()
self.VarianceX.enabled = self.GaussianFiltering.checkState()
self.label_VarianceY.enabled = self.GaussianFiltering.checkState()
self.VarianceY.enabled = self.GaussianFiltering.checkState()
self.label_VarianceZ.enabled = self.GaussianFiltering.checkState()
self.VarianceZ.enabled = self.GaussianFiltering.checkState()
# Update of the variance x parameter for the Post Processed Segmentation step
def onVarianceXValueChanged(self):
self.Logic.parameters.setVarianceX(self.VarianceX.value)
# Update of the variance y parameter for the Post Processed Segmentation step
def onVarianceYValueChanged(self):
self.Logic.parameters.setVarianceY(self.VarianceY.value)
# Update of the variance z parameter for the Post Processed Segmentation step
def onVarianceZValueChanged(self):
self.Logic.parameters.setVarianceZ(self.VarianceZ.value)
#
# Advanced Parameters to SPHARM Mesh
#
def onEnableRegTemplate(self):
# Update of the registration template boolean for the Parameters to SPHARM Mesh step
self.Logic.parameters.setUseRegTemplate(self.useRegTemplate.checkState())
# Enable/Disable the registration template path in the UI
self.label_regTemplate.enabled = self.useRegTemplate.checkState()
self.regTemplate.enabled = self.useRegTemplate.checkState()
# Update of the registration template path for the Parameters to SPHARM Mesh step
def onRegTemplateValueChanged(self):
self.Logic.parameters.setRegTemplate(self.regTemplate.currentPath)
def onEnableFlipTemplate(self):
# Update of the flip template boolean for the Parameters to SPHARM Mesh step
self.Logic.parameters.setUseFlipTemplate(self.useFlipTemplate.checkState())
# Enable/Disable the flip template path in the UI
self.label_flipTemplate.enabled = self.useFlipTemplate.checkState()
self.flipTemplate.enabled = self.useFlipTemplate.checkState()
# Update of the flip template path for the Parameters to SPHARM Mesh step
def onFlipTemplateValueChanged(self):
self.Logic.parameters.setFlipTemplate(self.flipTemplate.currentPath)
# Update of the flip parameter for the Parameters to SPHARM Mesh step
def onChoiceOfFlipValueChanged(self):
self.Logic.parameters.setChoiceOfFlip(self.choiceOfFlip.currentIndex)
def onEnableFlipChoices(self):
# Update of the flip option boolean for the Parameters to SPHARM Mesh step
self.Logic.parameters.setSameFlipForAll(self.sameFlipForAll.checkState())
self.choiceOfFlip.enabled = self.sameFlipForAll.checkState()
self.tableWidget_ChoiceOfFlip.enabled = not self.sameFlipForAll.checkState()
if not self.sameFlipForAll.checkState():
self.fillTableForFlipOptions()
#
# Apply CLIs
#
def onApplyButton(self):
# Run workflow
if not self.Logic.Node.IsBusy():
# Check the registration template file
if self.useRegTemplate.checkState():
if not os.path.exists(self.regTemplate.currentPath) or not self.regTemplate.currentPath.endswith(".vtk"):
slicer.util.errorDisplay("Invalid registration template file in Advanced Parameters to SPHARM Mesh Tab")
return
# Check the flip template file
if self.useFlipTemplate.checkState():
if not os.path.exists(self.flipTemplate.currentPath) or not self.flipTemplate.currentPath.endswith(".coef"):
slicer.util.errorDisplay("Invalid flip template file in Advanced Parameters to SPHARM Mesh Tab")
return
# Empty the output folders if the overwrite options are checked
self.Logic.cleanOutputFolders()
# Change the apply buttons
logging.info('Widget: Running ShapeAnalysisModule')
self.ApplyButton.setText("Cancel")
self.Logic.addObserver(self.Logic.Node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent,
self.onLogicModified)
self.Logic.Node.SetStatus(self.Logic.Node.Scheduled)
self.Logic.allCaseStartTime = time.time()
self.Logic.ShapeAnalysisCases()
# Cancel Workflow
else:
logging.info("Widget: Cancelling ShapeAnalysisModule")
self.ApplyButton.setEnabled(False)
self.Logic.Cancel()
def onLogicModified(self, logic_node, event):
status = logic_node.GetStatusString()
logging.info('-- %s : ShapeAnalysisModule', status)
# if not busy (completed, error, cancelled)
if not logic_node.IsBusy():
self.Logic.removeObserver(logic_node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent,
self.onLogicModified)
# Create Error Message
if status == 'Completed with errors' or status == 'Cancelled':
logging.error(self.Logic.ErrorMessage)
qt.QMessageBox.critical(slicer.util.mainWindow(),
'ShapeAnalysisModule',
self.Logic.ErrorMessage)
elif status == 'Completed':
self.configurationVisualization()
# Empty lists
self.Logic.pipeline = {}
self.Logic.completed = {}
# Change the apply buttons
self.ApplyButton.setEnabled(True)
self.ApplyButton.setText("Run ShapeAnalysisModule")
# if running, create some progress bars for each cases
elif status == 'Running':
self.Logic.ProgressBar.show()
if self.progressbars_layout:
self.CLIProgressBars.hide()
self.CLIProgressBars = ctk.ctkCollapsibleGroupBox()
self.CLIProgressBars.setTitle('Detail')
self.progress_layout.addWidget(self.CLIProgressBars)
self.progressbars_layout = qt.QVBoxLayout(self.CLIProgressBars)
for i in range(len(self.Logic.pipeline)):
self.progressbars_layout.addWidget(self.Logic.pipeline[i].ProgressBar)
# Function to update the checkable comboBox and the table's checkBoxes in the visualization tab according of the check of one checkBox in the checkable comboBox
def onCheckableComboBoxValueChanged(self):
currentText = self.CheckableComboBox_visualization.currentText
currentIndex = self.CheckableComboBox_visualization.currentIndex
currentItem = self.CheckableComboBox_visualization.model().item(currentIndex, 0)
# ******* Update the CheckableComboBox ******* #
# Check/Uncheck the "Case i: case_name [..]" checkboxes in the checkacle comboBox
if currentText == "All Models":
self.checkedItems("SPHARM", currentItem.checkState())
elif currentText == "All SPHARM Models":
self.checkedItems("SPHARM Models", currentItem.checkState())
elif currentText == "All SPHARM Ellipse Aligned Models":
self.checkedItems("SPHARM Ellipse Aligned Models", currentItem.checkState())
elif currentText == "All SPHARM Medial Meshes":
self.checkedItems("SPHARM Medial Meshes", currentItem.checkState())
elif currentText == "All SPHARM Procrustes Aligned Models":
self.checkedItems("SPHARM Procrustes Aligned Models", currentItem.checkState())
# Check/Uncheck the "All [..]" checkboxes in the checkacle comboBox
self.checkedAllItems()
self.CheckableComboBox_visualization.blockSignals(False)
# ******* Update the checkboxes in the table ******* #
for row in range(0, self.tableWidget_visualization.rowCount):
actionOnCheckBox = False
label = self.tableWidget_visualization.cellWidget(row, 0)
outputRootname = label.text
if currentText == "All Models":
actionOnCheckBox = True
elif currentText == "All SPHARM Models":
if not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1:
actionOnCheckBox = True
elif currentText == "All SPHARM Ellipse Aligned Models":
if not outputRootname.find("SPHARM_ellalign") == -1:
actionOnCheckBox = True
elif currentText == "All SPHARM Medial Meshes":
if not outputRootname.find("SPHARMMedialMesh") == -1:
actionOnCheckBox = True
elif currentText == "All SPHARM Procrustes Aligned Models":
if not outputRootname.find("SPHARM_procalign") == -1:
actionOnCheckBox = True
else:
for inputFilename in self.Logic.InputCases:
inputRootname = inputFilename.split('/')[-1].split('.')[0]
if not currentText.find(inputRootname) == -1:
if not currentText.find("SPHARM Models") == -1:
if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1:
actionOnCheckBox = True
elif not currentText.find("SPHARM Ellipse Aligned Models") == -1:
if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM_ellalign") == -1:
actionOnCheckBox = True
elif not currentText.find("SPHARM Medial Meshes") == -1:
if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARMMedialMesh") == -1:
actionOnCheckBox = True
elif not currentText.find("SPHARM Procrustes Aligned Models") == -1:
if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM_procalign") == -1:
actionOnCheckBox = True
# check/uncheck the checkBox at (row,1)
if actionOnCheckBox:
widget = self.tableWidget_visualization.cellWidget(row, 1)
tuple = widget.children()
checkBox = tuple[1]
checkBox.blockSignals(True)
item = self.CheckableComboBox_visualization.model().item(currentIndex, 0)
if item.checkState():
checkBox.setChecked(True)
else:
checkBox.setChecked(False)
checkBox.blockSignals(False)
# Function to update the checkboxes in the checkbable comboBox in the visualization tab according of the check of a checBox in the visualization tab
def onCheckBoxTableValueChanged(self):
self.CheckableComboBox_visualization.blockSignals(True)
list = self.CheckableComboBox_visualization.model()
table = self.tableWidget_visualization
allSPHARMMesdialMeshesIndex = self.CheckableComboBox_visualization.findText("All SPHARM Medial Meshes") # If == -1 "All SPHARM Medial Meshes" checkBox doesn't exist
allSPHARMProcrustesAlignedModelsIndex = self.CheckableComboBox_visualization.findText("All SPHARM Procrustes Aligned Models") # If == -1 "All SPHARM Procrustes Aligned Models" checkBox doesn't exist
for i in range(len(self.Logic.InputCases)):
allCaseSPHARMModelsChecked = True
allCaseSPHARMEllalignModelsChecked = True
allCaseSPHARMMedialMeshesChecked = True
allCaseSPHARMProcrustesAlignedModelsChecked = True
inputRootname = self.Logic.InputCases[i].split('/')[-1].split('.')[0]
for row in range(0,table.rowCount):
label = table.cellWidget(row, 0)
outputRootname = label.text
if not outputRootname.find(inputRootname) == -1:
widget = table.cellWidget(row, 1)
tuple = widget.children()
checkBox = tuple[1]
if not checkBox.checkState():
if not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1:
allCaseSPHARMModelsChecked = False
if not outputRootname.find("SPHARM_ellalign") == -1:
allCaseSPHARMEllalignModelsChecked = False
if not allSPHARMMesdialMeshesIndex == -1:
if not outputRootname.find("SPHARMMedialMesh") == -1:
allCaseSPHARMMedialMeshesChecked = False
if not allSPHARMProcrustesAlignedModelsIndex == -1:
if not outputRootname.find("SPHARM_procalign") == -1:
allCaseSPHARMProcrustesAlignedModelsChecked = False
# Check/uncheck checbox case according of the checkbox in the table
text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Models"
self.checkedCaseItem(text, allCaseSPHARMModelsChecked)
text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Ellipse Aligned Models"
self.checkedCaseItem(text, allCaseSPHARMEllalignModelsChecked)
if not allSPHARMMesdialMeshesIndex == -1:
text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Medial Meshes"
self.checkedCaseItem(text, allCaseSPHARMMedialMeshesChecked)
if not allSPHARMProcrustesAlignedModelsIndex == -1:
text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Procrustes Aligned Models"
self.checkedCaseItem(text, allCaseSPHARMProcrustesAlignedModelsChecked)
# Check/Uncheck the "All [..]" checkboxes in the checkacle comboBox
self.checkedAllItems()
self.CheckableComboBox_visualization.blockSignals(False)
# Visualization of the SPHARM Mesh outputs in Shape Population Viewer
def onSPHARMMeshesVisualizationInSPV(self):
# Creation of a CSV file to load the vtk files in ShapePopulationViewer
filePathCSV = slicer.app.temporaryPath + '/' + 'PreviewForVisualizationInSPV.csv'
self.Logic.creationCSVFileForSPV(self.tableWidget_visualization, filePathCSV)
# Creation of the parameters of SPV
parameters = {}
parameters["CSVFile"] = filePathCSV
# If a binary of SPV has been installed
if hasattr(slicer.modules, 'shapepopulationviewer'):
SPV = slicer.modules.shapepopulationviewer
# If SPV has been installed via the Extension Manager
elif hasattr(slicer.modules, 'launcher'):
SPV = slicer.modules.launcher
# Launch SPV
slicer.cli.run(SPV, None, parameters, wait_for_completion=True)
# Deletion of the CSV files in the Slicer temporary directory
if os.path.exists(filePathCSV):
os.remove(filePathCSV)
# Function to fill the flip options table for all the SPHARM mesh outputs
# - Column 0: filename of the input files
# - Column 1: comboBox with the flip corresponding to the output file
def fillTableForFlipOptions(self):
table = self.tableWidget_ChoiceOfFlip
row = 0
for basename in self.Logic.InputCases:
table.setRowCount(row + 1)
# Column 0:
rootname = basename.split('/')[-1].split('.')[0]
labelVTKFile = qt.QLabel(rootname)
labelVTKFile.setAlignment(0x84)
table.setCellWidget(row, 0, labelVTKFile)
# Column 1:
widget = qt.QWidget()
layout = qt.QHBoxLayout(widget)
comboBox = qt.QComboBox()
comboBox.addItems(['No Flip',
'Flip Along Axis of x and y',
'Flip Along Axis of y and z',
'Flip Along Axis of x and z',
'Flip Along Axis of x',
'Flip Along Axis of y',
'Flip Along Axis of x, y and z',
'Flip Along Axis of z',
'All'])
comboBox.setCurrentIndex(self.choiceOfFlip.currentIndex)
layout.addWidget(comboBox)
layout.setAlignment(0x84)
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
table.setCellWidget(row, 1, widget)
row = row + 1
# Function to configure the checkable comboBox and the table of the visualization tab
def configurationVisualization(self):
# Configuration of the checkable comboBox
checkableComboBox = self.CheckableComboBox_visualization
# clean the checkable comboBox
list = checkableComboBox.model()
list.clear()
# add items according of the SPHARM Mesh computed by ParaToSPHARMMesh
checkableComboBox.blockSignals(True)
checkableComboBox.addItem("All Models")
checkableComboBox.addItem("All SPHARM Models")
checkableComboBox.addItem("All SPHARM Ellipse Aligned Models")
if self.medialMesh.checkState():
checkableComboBox.addItem("All SPHARM Medial Meshes")
if self.useRegTemplate.checkState():
checkableComboBox.addItem("All SPHARM Procrustes Aligned Models")
# Fill the checkable comboBox
for i in range(len(self.Logic.InputCases)):
checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Models")
checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Ellipse Aligned Models")
if self.medialMesh.checkState():
checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Medial Meshes")
if self.useRegTemplate.checkState():
checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Procrustes Aligned Models")
checkableComboBox.blockSignals(False)
# Configuration of the table
# column 0: filename of the SPHARM Meshes generated by ParaToSPHARMMesh
# column 1: checkbox that allows to the user to select what output he wants to display in Shape Population Viewer
table = self.tableWidget_visualization
outputDirectory = self.GroupProjectOutputDirectory.directory.encode('utf-8')
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh/"
row = 0
for filename in os.listdir(SPHARMMeshOutputDirectory):
if filename.endswith(".vtk") and not filename.endswith("_para.vtk") and not filename.endswith("SPHARMMedialAxis.vtk"):
table.setRowCount(row + 1)
# Column 0:
labelVTKFile = qt.QLabel(os.path.splitext(filename)[0])
labelVTKFile.setAlignment(0x84)
table.setCellWidget(row, 0, labelVTKFile)
# Column 1:
widget = qt.QWidget()
layout = qt.QHBoxLayout(widget)
checkBox = qt.QCheckBox()
layout.addWidget(checkBox)
layout.setAlignment(0x84)
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
table.setCellWidget(row, 1, widget)
checkBox.connect('stateChanged(int)', self.onCheckBoxTableValueChanged)
row = row + 1
# Functions to update the checkable comboBox in the visualization tab
# Check/Uncheck checkBoxes with the label 'text'
def checkedItems(self, text, checkState):
list = self.CheckableComboBox_visualization.model()
for i in range(1, list.rowCount()):
item = list.item(i, 0)
if not item.text().find(text) == -1:
item.setCheckState(checkState)
# Check/Uncheck "All [..]" checkBoxes in the checkable comboBox
def checkedAllItems(self):
list = self.CheckableComboBox_visualization.model()
allIndex = self.CheckableComboBox_visualization.findText("All Models")
allItem = list.item(allIndex, 0)
allSPHARMIndex = self.CheckableComboBox_visualization.findText("All SPHARM Models")
allSPHARMItem = list.item(allSPHARMIndex, 0)
allSPHARMEllalignIndex = self.CheckableComboBox_visualization.findText("All SPHARM Ellipse Aligned Models")
allSPHARMEllalignItem = list.item(allSPHARMEllalignIndex, 0)
allSPHARMMesdialMeshesIndex = self.CheckableComboBox_visualization.findText("All SPHARM Medial Meshes")
if not allSPHARMMesdialMeshesIndex == -1:
allSPHARMMesdialMeshesItem = list.item(allSPHARMMesdialMeshesIndex, 0)
allSPHARMProcrustesAlignedModelsIndex = self.CheckableComboBox_visualization.findText("All SPHARM Procrustes Aligned Models")
if not allSPHARMProcrustesAlignedModelsIndex == -1:
allSPHARMProcrustesAlignedModelsItem = list.item(allSPHARMProcrustesAlignedModelsIndex, 0)
# Check/Uncheck "All SPHARM Models" checkBox
self.checkedAllItem("- SPHARM Models", allSPHARMItem)
# Check/Uncheck "All SPHARM Ellipse Aligned Models" checkBox
self.checkedAllItem("- SPHARM Ellipse Aligned Models", allSPHARMEllalignItem)
# Check/Uncheck "All SPHARM Medial Mesh" checkBox
if not allSPHARMMesdialMeshesIndex == -1:
self.checkedAllItem("- SPHARM Medial Meshes", allSPHARMMesdialMeshesItem)
# Check/Uncheck "All SPHARM Procrustes Aligned Models" checkBox
if not allSPHARMProcrustesAlignedModelsIndex == -1:
self.checkedAllItem("- SPHARM Procrustes Aligned Models", allSPHARMProcrustesAlignedModelsItem)
# Check/Uncheck "All Models" checkBox
if allSPHARMEllalignItem.checkState() and allSPHARMItem.checkState():
if allSPHARMMesdialMeshesIndex == -1 and allSPHARMProcrustesAlignedModelsIndex == -1:
allItem.setCheckState(qt.Qt.Checked)
return
elif not allSPHARMMesdialMeshesIndex == -1 and not allSPHARMProcrustesAlignedModelsIndex == -1:
if allSPHARMMesdialMeshesItem.checkState() and allSPHARMProcrustesAlignedModelsItem.checkState():
allItem.setCheckState(qt.Qt.Checked)
return
elif not allSPHARMMesdialMeshesIndex == -1 and allSPHARMProcrustesAlignedModelsIndex == -1:
if allSPHARMMesdialMeshesItem.checkState():
allItem.setCheckState(qt.Qt.Checked)
return
elif allSPHARMMesdialMeshesIndex == -1 and not allSPHARMProcrustesAlignedModelsIndex == -1:
if allSPHARMProcrustesAlignedModelsItem.checkState():
allItem.setCheckState(qt.Qt.Checked)
return
allItem.setCheckState(qt.Qt.Unchecked)
# Check/Uncheck "Case i: case_name - SPHARM [..]" checkBox in the checkable comboBox
def checkedCaseItem(self, text, doCheck):
list = self.CheckableComboBox_visualization.model()
item = list.findItems(text)[0]
if doCheck:
item.setCheckState(qt.Qt.Checked)
else:
item.setCheckState(qt.Qt.Unchecked)
# Check/Uncheck "All [..]" (except "All Models") checkBox in the checkable comboBox
def checkedAllItem(self, text, item):
if self.areAllCasesChecked(text):
item.setCheckState(qt.Qt.Checked)
else:
item.setCheckState(qt.Qt.Unchecked)
# Specify if all the "Case i: case_name - SPHARM [..]" checkBoxes of one type of Model are checked
def areAllCasesChecked(self, text):
list = self.CheckableComboBox_visualization.model()
isChecked = True
for i in range(3, list.rowCount()):
item = list.item(i, 0)
if not item.text().find(text) == -1:
if not item.checkState():
isChecked = False
return isChecked
def clearFlipOptionsTable(self):
table = self.tableWidget_ChoiceOfFlip
table.clear()
table.setColumnCount(2)
table.setHorizontalHeaderLabels([' Files ', ' Choice of Flip '])
table.setColumnWidth(0, 400)
horizontalHeader = table.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
table.verticalHeader().setVisible(False)
#
# ShapeAnalysisModuleParameters
#
class ShapeAnalysisModuleParameters(object):
def __init__(self):
#
self.waitForCompletion = False
# Group Project IO
self.inputDirectory = " "
self.outputDirectory = " "
self.debug = False
# Post Processed Segmentation
self.OverwriteSegPostProcess = False
self.RescaleSegPostProcess = True
self.sx = 0.5
self.sy = 0.5
self.sz = 0.5
self.labelNumber = 0
# Generate Mesh Parameters
self.OverwriteGenParaMesh = False
self.NumberofIterations = 1000
# Parameters to SPHARM Mesh
self.OverwriteParaToSPHARMMesh = False
self.SubdivLevelValue = 10
self.SPHARMDegreeValue = 15
self.thetaIterationValue = 100
self.phiIterationValue = 100
self.medialMesh = False
self.tableWidget_ChoiceOfFlip = None
# Advanced Post Processed Segmentation
self.GaussianFiltering = False
self.VarianceX = 10
self.VarianceY = 10
self.VarianceZ = 10
# Advanced Parameters to SPHARM Mesh
self.useRegTemplate = False
self.regTemplate = " "
self.useFlipTemplate = False
self.flipTemplate = " "
self.choiceOfFlip = 0
self.sameFlipForAll = True
def setWaitForCompletion(self, bool):
self.waitForCompletion = bool
def setInputDirectory(self, path):
self.inputDirectory = path
def setOutputDirectory(self, path):
self.outputDirectory = path
def setDebug(self, bool):
self.debug = bool
def setOverwriteSegPostProcess(self, bool):
self.OverwriteSegPostProcess = bool
def setRescaleSegPostProcess(self, bool):
self.RescaleSegPostProcess = bool
def setSx(self, value):
self.sx = value
def setSy(self, value):
self.sy = value
def setSz(self, value):
self.sz = value
def setLabelNumber(self, value):
self.labelNumber = value
def setOverwriteGenParaMesh(self, bool):
self.OverwriteGenParaMesh = bool
def setNumberofIterations(self, value):
self.NumberofIterations = value
def setOverwriteParaToSPHARMMesh(self, bool):
self.OverwriteParaToSPHARMMesh = bool
def setSubdivLevelValue(self, value):
self.SubdivLevelValue = value
def setSPHARMDegreeValue(self, value):
self.SPHARMDegreeValue = value
def setThetaIterationValue(self, value):
self.thetaIterationValue = value
def setPhiIterationValue(self, value):
self.phiIterationValue = value
def setMedialMesh(self, bool):
self.medialMesh = bool
def setTableForChoiceOfFlip(self, table):
self.tableWidget_ChoiceOfFlip = table
def setGaussianFiltering(self, bool):
self.GaussianFiltering = bool
def setVarianceX(self, value):
self.VarianceX = value
def setVarianceY(self, value):
self.VarianceY = value
def setVarianceZ(self, value):
self.VarianceZ = value
def setUseRegTemplate(self, bool):
self.useRegTemplate = bool
def setRegTemplate(self, path):
self.regTemplate = path
def setUseFlipTemplate(self, bool):
self.useFlipTemplate = bool
def setFlipTemplate(self, path):
self.flipTemplate = path
def setChoiceOfFlip(self, value):
self.choiceOfFlip = value
def setSameFlipForAll(self, bool):
self.sameFlipForAll = bool
#
# ShapeAnalysisModuleLogic
#
class ShapeAnalysisModuleLogic(LogicMixin):
"""
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self):
LogicMixin.__init__(self, "ShapeAnalysisModule")
self.parameters = ShapeAnalysisModuleParameters()
def ShapeAnalysisCases(self):
# No cases
if not len(self.InputCases) > 0:
inputDirectory = self.parameters.inputDirectory
self.ErrorMessage = "No cases found in " + inputDirectory
self.Node.SetStatus(self.Node.CompletedWithErrors)
return -1
# Create pipelines
else:
logging.info('%d case(s) found', len(self.InputCases))
# Init
for i in range(len(self.InputCases)):
self.completed[i] = False
self.pipeline[i] = ShapeAnalysisModulePipeline(i, self.InputCases[i], self.parameters)
self.addObserver(self.pipeline[i].Node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent,
self.onPipelineModified)
# Logic ready
self.Node.SetStatus(self.Node.Running)
# Launch Workflow
self.startPipeline(0)
return 0
# Empty the output folders if the overwrite option is checked
def cleanOutputFolders(self):
outputDirectory = self.parameters.outputDirectory
if self.parameters.OverwriteSegPostProcess:
PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess"
if os.path.exists(PostProcessOutputDirectory):
for filename in os.listdir(PostProcessOutputDirectory):
os.remove(os.path.join(PostProcessOutputDirectory, filename))
if self.parameters.OverwriteGenParaMesh:
GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh"
if os.path.exists(GenParaMeshOutputDirectory):
for filename in os.listdir(GenParaMeshOutputDirectory):
os.remove(os.path.join(GenParaMeshOutputDirectory, filename))
if self.parameters.OverwriteParaToSPHARMMesh:
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh"
if os.path.exists(SPHARMMeshOutputDirectory):
for filename in os.listdir(SPHARMMeshOutputDirectory):
os.remove(os.path.join(SPHARMMeshOutputDirectory, filename))
# Function to create a CSV file containing all the SPHARM mesh output files
# that the user wants to display in ShapePopultaionViewer
def creationCSVFileForSPV(self, table, filepathCSV):
# Creation of a CSV file with a header 'VTK Files'
file = open(filepathCSV, 'w')
cw = csv.writer(file, delimiter=',')
cw.writerow(['VTK Files'])
# Add the filepath of the vtk file checked in the table
outputDirectory = self.parameters.outputDirectory
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh/"
# Add the path of the vtk files if the users selected it
for row in range(0, table.rowCount):
# check the checkBox
widget = table.cellWidget(row, 1)
tuple = widget.children()
checkBox = tuple[1]
if checkBox.isChecked():
# Recovery of the vtk filename
qlabel = table.cellWidget(row, 0)
vtkRootname = qlabel.text
VTKfilepath = SPHARMMeshOutputDirectory + vtkRootname + ".vtk"
if os.path.exists(VTKfilepath):
cw.writerow([VTKfilepath])
file.close()
#
# ShapeAnalysisModulePipeline
#
class ShapeAnalysisModulePipeline(PipelineMixin):
def __init__(self, pipelineID, CaseInput, interface):
PipelineMixin.__init__(self, pipelineID, CaseInput, interface)
self.interface = interface
def setupSkipCLIs(self):
self.skip_meshToLabelMap = False
self.skip_segPostProcess = False
self.skip_genParaMesh = False
self.skip_paraToSPHARMMesh = False
outputDirectory = self.interface.outputDirectory
# Skip MeshToLabelMap?
if not self.inputExtension == "vtk" and not self.inputExtension == "vtp":
self.skip_meshToLabelMap = True
else:
MeshToLabelMapOutputDirectory = outputDirectory + "/Step0_MeshToLabelMap"
MeshToLabelMapOutputFilepath = MeshToLabelMapOutputDirectory + "/" + self.inputRootname + ".nrrd"
if os.path.exists(MeshToLabelMapOutputFilepath):
self.inputExtension = "nrrd"
self.skip_meshToLabelMap = True
# If MeshToLabelMap is not skipped, do not skip the next CLIs: SegPostProcess, GenParaMesh and ParaToSPHARMMesh
if self.skip_meshToLabelMap == False:
return
# Skip SegPostProcess ?
if not self.interface.OverwriteSegPostProcess:
PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess"
PostProcessOutputFilepath = PostProcessOutputDirectory + "/" + self.inputRootname + "_pp.nrrd"
if os.path.exists(PostProcessOutputFilepath):
self.skip_segPostProcess = True
# If SegPostProcess is not skip, do not skip the next CLIs: GenParaMesh and ParaToSPHARMMesh
if self.skip_segPostProcess == False:
return
# Skip GenParaMesh ?
if not self.interface.OverwriteGenParaMesh:
GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh"
ParaOutputFilepath = GenParaMeshOutputDirectory + "/" + self.inputRootname + "_pp_para.vtk"
SurfOutputFilepath = GenParaMeshOutputDirectory + "/" + self.inputRootname + "_pp_surf.vtk"
if os.path.exists(ParaOutputFilepath) and os.path.exists(SurfOutputFilepath):
self.skip_genParaMesh = True
# If GenParaMesh is not skipped, do not skip the next CLI: ParaToSPHARMMesh
if self.skip_genParaMesh == False:
return
# Skip ParaToSPHARMMesh ?
if not self.interface.OverwriteParaToSPHARMMesh:
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh"
SPHARMMeshRootname = self.inputRootname + "_pp_surf"
if os.path.exists(SPHARMMeshOutputDirectory):
for file in os.listdir(SPHARMMeshOutputDirectory):
if not file.find(SPHARMMeshRootname) == -1:
self.skip_paraToSPHARMMesh = True
def setup(self):
# Initialization of global variables
self.setupGlobalVariables()
self.setupSkipCLIs()
inputDirectory = self.interface.inputDirectory
outputDirectory = self.interface.outputDirectory
## Mesh To Label Map: Transform model in label map
cli_nodes = list() # list of the nodes used in the Mesh to Label Map step
cli_dirnames = list() # list of the directory pathes where the nodes used in the Mesh to Label Map step are stored
MeshToLabelMapOutputDirectory = outputDirectory + "/Step0_MeshToLabelMap"
MeshToLabelMapOutputFilename = self.inputRootname + ".nrrd"
MeshToLabelMapOutputFilepath = os.path.join(MeshToLabelMapOutputDirectory, MeshToLabelMapOutputFilename)
if not self.skip_meshToLabelMap:
# Setup of the parameters of the CLI
self.ID += 1
cli_parameters = {}
model_input_node = MRMLUtility.loadMRMLNode(self.inputRootname, inputDirectory, self.CaseInput, 'ModelFile')
cli_parameters["mesh"] = model_input_node
meshtolabelmap_output_node = MRMLUtility.createNewMRMLNode(self.inputRootname, slicer.vtkMRMLLabelMapVolumeNode())
cli_parameters["labelMap"] = meshtolabelmap_output_node
cli_parameters["spacingVec"] = "0.1,0.1,0.1"
self.inputExtension = "nrrd"
self.setupModule(slicer.modules.meshtolabelmap, cli_parameters)
# Setup of the nodes created by the CLI
# Creation of a folder in the output folder : LabelMap
if not os.path.exists(MeshToLabelMapOutputDirectory):
os.makedirs(MeshToLabelMapOutputDirectory)
cli_nodes.append(model_input_node)
cli_nodes.append(meshtolabelmap_output_node)
cli_dirnames.append(inputDirectory)
cli_dirnames.append(MeshToLabelMapOutputDirectory)
self.setupNode(0, cli_nodes, cli_dirnames, [False, True], [True, True])
else:
if os.path.exists(MeshToLabelMapOutputFilepath):
# Setup of the nodes which will be used by the next CLI
meshtolabelmap_output_node = MRMLUtility.loadMRMLNode(self.inputRootname, MeshToLabelMapOutputDirectory, MeshToLabelMapOutputFilename, 'LabelMap')
cli_nodes.append(meshtolabelmap_output_node)
cli_dirnames.append(MeshToLabelMapOutputDirectory)
self.setupNode(0, cli_nodes, cli_dirnames, [False], [True])
## Post Processed Segmentation
cli_nodes = list() # list of the nodes used in the Post Processed Segmentation step
cli_dirnames = list() # list of the directory pathes where the nodes used in the Post Processed Segmentation step are stored
PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess"
PostProcessOutputRootname = self.inputRootname + "_pp"
PostProcessOutputFilename = self.inputRootname + "_pp.nrrd"
if not self.skip_segPostProcess:
# Setup of the parameters of the CLI
self.ID += 1
cli_parameters = {}
# IF Mesh To Label Map has been skipped AND the input given was already a label map
if self.skip_meshToLabelMap and not os.path.exists(MeshToLabelMapOutputFilepath):
PossProcessInputDirectory = inputDirectory
labelmap_input_node = MRMLUtility.loadMRMLNode(self.inputRootname, inputDirectory, self.CaseInput, 'LabelMap')
# ELSE the input given was a model which has been transformed by MeshToLabelMap and store in the folder LabelMap
else:
labelmap_input_node = meshtolabelmap_output_node
PossProcessInputDirectory = MeshToLabelMapOutputDirectory
cli_parameters["fileName"] = labelmap_input_node
pp_output_node = MRMLUtility.createNewMRMLNode(PostProcessOutputRootname, slicer.vtkMRMLLabelMapVolumeNode())
cli_parameters["outfileName"] = pp_output_node.GetID()
if self.interface.RescaleSegPostProcess:
cli_parameters["scaleOn"] = True
cli_parameters["spacing_vect"] = str(self.interface.sx) + "," + str(self.interface.sy) + "," + str(self.interface.sz)
cli_parameters["label"] = self.interface.labelNumber
if self.interface.debug:
cli_parameters["debug"] = True
# Advanced parameters
if self.interface.GaussianFiltering:
cli_parameters["gaussianOn"] = True
cli_parameters["variance_vect"] = str(self.interface.VarianceX) + "," + str(self.interface.VarianceY) + "," + str(self.interface.VarianceZ)
self.setupModule(slicer.modules.segpostprocessclp, cli_parameters)
# Setup of the nodes created by the CLI
# Creation of a folder in the output folder : Step1_SegPostProcess
if not os.path.exists(PostProcessOutputDirectory):
os.makedirs(PostProcessOutputDirectory)
cli_nodes.append(labelmap_input_node)
cli_nodes.append(pp_output_node)
cli_dirnames.append(PossProcessInputDirectory)
cli_dirnames.append(PostProcessOutputDirectory)
self.setupNode(1, cli_nodes, cli_dirnames, [False,True], [True,True])
else:
# Setup of the nodes which will be used by the next CLI
pp_output_node = MRMLUtility.loadMRMLNode(PostProcessOutputRootname, PostProcessOutputDirectory, PostProcessOutputFilename, 'LabelMap')
cli_nodes.append(pp_output_node)
cli_dirnames.append(PostProcessOutputDirectory)
self.setupNode(1, cli_nodes, cli_dirnames, [False], [True])
## Generate Mesh Parameters
cli_nodes = list() # list of the nodes used in the Generate Mesh Parameters step
cli_dirnames = list() # list of the directory pathes where the nodes used in the Generate Mesh Parameters step are stored
GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh"
GenParaMeshOutputParaRootname = PostProcessOutputRootname + "_para"
GenParaMeshOutputSurfRootname = PostProcessOutputRootname + "_surf"
GenParaMeshOutputParaFilename = PostProcessOutputRootname + "_para.vtk"
GenParaMeshOutputSurfFilename = PostProcessOutputRootname + "_surf.vtk"
if not self.skip_genParaMesh:
# Setup of the parameters of the CLI
self.ID += 1
cli_parameters = {}
cli_parameters["infile"] = pp_output_node
para_output_model = MRMLUtility.createNewMRMLNode(GenParaMeshOutputParaRootname, slicer.vtkMRMLModelNode())
cli_parameters["outParaName"] = para_output_model
surfmesh_output_model = MRMLUtility.createNewMRMLNode(GenParaMeshOutputSurfRootname, slicer.vtkMRMLModelNode())
cli_parameters["outSurfName"] = surfmesh_output_model
cli_parameters["numIterations"] = self.interface.NumberofIterations
if self.interface.debug:
cli_parameters["debug"] = True
self.setupModule(slicer.modules.genparameshclp, cli_parameters)
# Setup of the nodes created by the CLI
# Creation of a folder in the output folder : Step2_GenParaMesh
if not os.path.exists(GenParaMeshOutputDirectory):
os.makedirs(GenParaMeshOutputDirectory)
cli_nodes.append(para_output_model)
cli_nodes.append(surfmesh_output_model)
cli_dirnames.append(GenParaMeshOutputDirectory)
cli_dirnames.append(GenParaMeshOutputDirectory)
self.setupNode(2, cli_nodes, cli_dirnames, [True,True], [True,True])
else:
# Setup of the nodes which will be used by the next CLI
para_output_model = MRMLUtility.loadMRMLNode(GenParaMeshOutputParaRootname, GenParaMeshOutputDirectory, GenParaMeshOutputParaFilename, 'ModelFile')
surfmesh_output_model = MRMLUtility.loadMRMLNode(GenParaMeshOutputSurfRootname, GenParaMeshOutputDirectory, GenParaMeshOutputSurfFilename, 'ModelFile')
cli_nodes.append(para_output_model)
cli_nodes.append(surfmesh_output_model)
cli_dirnames.append(GenParaMeshOutputDirectory)
cli_dirnames.append(GenParaMeshOutputDirectory)
self.setupNode(2, cli_nodes, cli_dirnames, [False, False], [True, True])
## Parameters to SPHARM Mesh
cli_nodes = list() # list of the nodes used in the Parameters To SPHARM Mesh step
cli_dirnames = list() # list of the directory pathes where the nodes used in the Parameters To SPHARM Mesh step are stored
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh"
if not self.skip_paraToSPHARMMesh:
# Search of the flip to apply:
# 1 = flip along axes of x & y,
# 2 = flip along y & z,
# 3 = flip along x & z
# 4 = flip along x,
# 5 = flip along y,
# 6 = flip along x & y & z,
# 7 = flip along z where y is the smallest, x is the second smallest and z is the long axis of the ellipsoid
# 8 = All the flips
if not self.interface.sameFlipForAll:
# Recovery of the flip chosen by the user
row = self.pipelineID
widget = self.interface.tableWidget_ChoiceOfFlip.cellWidget(row, 1)
tuple = widget.children()
comboBox = qt.QComboBox()
comboBox = tuple[1]
flipIndexToApply = comboBox.currentIndex
pass
else:
flipIndexToApply = self.interface.choiceOfFlip
# Only one flip to apply
if flipIndexToApply < 8:
L = [1]
# All the flips to apply
else:
L = range(1,8)
for i in L:
# Setup of the parameters of the CLI
self.ID += 1
cli_parameters = {}
cli_parameters["inParaFile"] = para_output_model
cli_parameters["inSurfFile"] = surfmesh_output_model
# Creation of a folder in the output folder : Step3_ParaToSPHARMMesh
if not os.path.exists(SPHARMMeshOutputDirectory):
os.makedirs(SPHARMMeshOutputDirectory)
if flipIndexToApply < 8:
SPHARMMeshRootname = SPHARMMeshOutputDirectory + "/" + GenParaMeshOutputSurfRootname
cli_parameters["outbase"] = SPHARMMeshRootname
# For each flip creation of an output filename
else:
flipName = ['AlongXY', 'AlongYZ', 'AlongXZ', 'AlongX', 'AlongY', 'AlongXYZ', 'AlongZ']
SPHARMMeshRootname = SPHARMMeshOutputDirectory + "/" + self.inputRootname + "_flip" + flipName[i - 1] + "_pp_surf"
cli_parameters["outbase"] = SPHARMMeshRootname
cli_parameters["subdivLevel"] = self.interface.SubdivLevelValue
cli_parameters["spharmDegree"] = self.interface.SPHARMDegreeValue
cli_parameters["thetaIteration"] = self.interface.thetaIterationValue
cli_parameters["phiIteration"] = self.interface.phiIterationValue
if self.interface.medialMesh:
cli_parameters["medialMesh"] = True
if self.interface.debug:
cli_parameters["debug"] = True
# Advanced parameters
if self.interface.useRegTemplate:
cli_parameters["regTemplateFileOn"] = True
regtemplate_filepath = self.interface.regTemplate
regtemplate_dir = os.path.split(regtemplate_filepath)[0]
regtemplate_rootname = os.path.split(regtemplate_filepath)[1].split(".")[0]
regtemplate_filename = os.path.split(regtemplate_filepath)[1]
regtemplate_model = MRMLUtility.loadMRMLNode(regtemplate_rootname, regtemplate_dir, regtemplate_filename, 'ModelFile')
cli_parameters["regTemplateFile"] = regtemplate_model
cli_nodes.append(regtemplate_model)
cli_dirnames.append(regtemplate_filepath)
self.setupNode(i + 2, cli_nodes, cli_dirnames, [False], [True])
if self.interface.useFlipTemplate:
cli_parameters["flipTemplateFileOn"] = True
cli_parameters["flipTemplateFile"] = self.interface.flipTemplate
if flipIndexToApply < 8 :
cli_parameters["finalFlipIndex"] = flipIndexToApply
else:
cli_parameters["finalFlipIndex"] = i
self.setupModule(slicer.modules.paratospharmmeshclp, cli_parameters)
class ShapeAnalysisModuleWrapper:
"""
This class should be called from an external python script to run SPHARM-PDM method on multiple cases thanks to SlicerSALT or 3DSlicer.
External python script (ex: SPHARM-PDM.py) should do the following:
from ShapeAnalasisModule import ShapeAnalysisModuleWrapper
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read(sys.argv[1]) #argv[1]: 'path/to/SPHARM-PDM-parameters.ini'
inputDirectoryPath = parser.get('section', 'input-directory-path')
[...]
ShapeAnalysisModuleInstance = ShapeAnalysisModuleWrapper(inputDirectoryPath, outputDirectoryPath, [...])
ShapeAnalysisModuleInstance.startProcessing()
The external python script can be run non-interactively using this command:
./SlicerSalt --no-main-window --python-script /path/to/SPHARM-PDM.py path/to/SPHARM-PDM-parameters.py
"""
def __init__(self, inputDirectoryPath, outputDirectoryPath,
RescaleSegPostProcess, sx, sy, sz, labelNumber,
GaussianFiltering, VarianceX, VarianceY, VarianceZ,
numberofIterations,
SubdivLevelValue, SPHARMDegreeValue,
medialMesh, thetaIterationValue, phiIterationValue,
useRegTemplate, regTemplate,
useFlipTemplate, flipTemplate, choiceOfFlip):
self.Logic = ShapeAnalysisModuleLogic()
self.Logic.parameters.setWaitForCompletion(True)
self.Logic.parameters.setInputDirectory(inputDirectoryPath)
self.Logic.parameters.setOutputDirectory(outputDirectoryPath)
self.Logic.parameters.setRescaleSegPostProcess(RescaleSegPostProcess)
self.Logic.parameters.setSx(sx)
self.Logic.parameters.setSy(sy)
self.Logic.parameters.setSz(sz)
self.Logic.parameters.setLabelNumber(labelNumber)
self.Logic.parameters.setGaussianFiltering(GaussianFiltering)
self.Logic.parameters.setVarianceX(VarianceX)
self.Logic.parameters.setVarianceY(VarianceY)
self.Logic.parameters.setVarianceZ(VarianceZ)
self.Logic.parameters.setNumberofIterations(numberofIterations)
self.Logic.parameters.setSubdivLevelValue(SubdivLevelValue)
self.Logic.parameters.setSPHARMDegreeValue(SPHARMDegreeValue)
self.Logic.parameters.setMedialMesh(medialMesh)
self.Logic.parameters.setThetaIterationValue(thetaIterationValue)
self.Logic.parameters.setPhiIterationValue(phiIterationValue)
self.Logic.parameters.setUseRegTemplate(useRegTemplate)
self.Logic.parameters.setRegTemplate(regTemplate)
self.Logic.parameters.setUseFlipTemplate(useFlipTemplate)
self.Logic.parameters.setFlipTemplate(flipTemplate)
self.Logic.parameters.setChoiceOfFlip(choiceOfFlip)
def startProcessing(self):
# Setup the inputCases
# Possible extensions
exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"]
# Search cases and add the filename to a list
self.Logic.InputCases = []
for file in os.listdir(self.Logic.parameters.inputDirectory):
for ext in exts:
if file.endswith(ext):
self.Logic.InputCases.append(file)
self.Logic.ShapeAnalysisCases()
class ShapeAnalysisModuleTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
slicer.mrmlScene.Clear(0)
self.inputRootnames = list()
def runTest(self):
self.setUp()
self.delayDisplay('Starting the tests')
self.test_ShapeAnalysisModule_completedWithoutErrors()
def test_ShapeAnalysisModule_completedWithoutErrors(self):
self.delayDisplay('Test 1: Run Shape Analysis Module')
self.Logic = ShapeAnalysisModuleLogic()
# Creation of input folder
inputDirectoryPath = slicer.app.temporaryPath + '/InputShapeAnalysisModule'
if not os.path.exists(inputDirectoryPath):
os.makedirs(inputDirectoryPath)
# Download the label map in the input folder
input_downloads = (
('https://data.kitware.com/api/v1/file/59945eb38d777f7d33e9c3c4/download', 'InputImage.gipl'),
)
for i in range(len(input_downloads)):
self.inputRootnames.append(input_downloads[i][1].split(".")[0])
self.download_files(inputDirectoryPath, input_downloads)
# Creation of output folder
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
if not os.path.exists(outputDirectoryPath):
os.makedirs(outputDirectoryPath)
# Creation of a template folder
templateDirectoryPath = slicer.app.temporaryPath + '/TemplateShapeAnalysisModule'
if not os.path.exists(templateDirectoryPath):
os.makedirs(templateDirectoryPath)
else:
for filename in os.listdir(templateDirectoryPath):
os.remove(os.path.join(templateDirectoryPath, filename))
# Download the registration template in the template folder
template_downloads = (
('https://data.kitware.com/api/v1/file/599462f78d777f7d33e9c3e6/download', 'RegistrationTemplateForParaToSPHARMMesh.vtk'),
)
self.download_files(templateDirectoryPath, template_downloads)
#
# Inputs of Shape Analysis Module
#
self.Logic.parameters.setWaitForCompletion(True)
self.Logic.parameters.setInputDirectory(inputDirectoryPath)
self.Logic.parameters.setOutputDirectory(outputDirectoryPath)
self.Logic.parameters.setOverwriteSegPostProcess(True)
self.Logic.parameters.setOverwriteGenParaMesh(True)
self.Logic.parameters.setNumberofIterations(25)
self.Logic.parameters.setOverwriteParaToSPHARMMesh(True)
self.Logic.parameters.setMedialMesh(True)
self.Logic.parameters.setUseRegTemplate(True)
regTemplateFilePath = templateDirectoryPath + '/RegistrationTemplateForParaToSPHARMMesh.vtk'
self.Logic.parameters.setChoiceOfFlip(3)
self.Logic.parameters.setRegTemplate(regTemplateFilePath)
# Setup the inputCases
# Possible extensions
exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"]
# Search cases and add the filename to a list
self.Logic.InputCases = []
for file in os.listdir(inputDirectoryPath):
for ext in exts:
if file.endswith(ext):
self.Logic.InputCases.append(file)
self.delayDisplay('Run Shape Analysis Module')
self.Logic.ShapeAnalysisCases()
self.assertTrue(self.comparisonOfOutputsSegPostProcess())
self.assertTrue(self.comparisonOfOutputsGenParaMesh())
self.assertTrue(self.comparisonOfOutputsParaToSPHARMMesh())
self.cleanSlicerTemporaryDirectory()
self.delayDisplay('Tests Passed!')
slicer.mrmlScene.Clear(0)
def comparisonOfOutputsSegPostProcess(self):
self.delayDisplay('Test 2: Comparison of the outputs generated by SegPostProcess CLI')
# Checking the existence of the output directory Step1_SegPostProcess
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
SegPostProcessOutputDirectoryPath = outputDirectoryPath + '/Step1_SegPostProcess'
if not os.path.exists(SegPostProcessOutputDirectoryPath):
return False
# Downloading output data to compare with the ones generated by Shape Analysis Module during the tests
output_downloads = (
('https://data.kitware.com/api/v1/file/59945ee08d777f7d33e9c3d3/download', 'OutputImageToCompareSegPostProcess.nrrd'),
)
self.download_files(SegPostProcessOutputDirectoryPath, output_downloads)
# Comparison of the Post Process Mesh Outputs
self.delayDisplay('Comparison of the Post Process Outputs')
output_filenames = list()
for inputRootname in self.inputRootnames:
output_filename = inputRootname + "_pp.nrrd"
output_filenames.append(output_filename)
for i in range(len(output_filenames)):
volume2_filepath = os.path.join(SegPostProcessOutputDirectoryPath, output_filenames[i])
# Checking the existence of the output files in the folder Step1_SegPostProcess
if not os.path.exists(volume2_filepath):
return False
# Loading the 2 volumes for comparison
volume1_rootname = output_filenames[i].split(".")[0]
volume2_rootname = output_downloads[i][1].split(".")[0]
volume1 = MRMLUtility.loadMRMLNode(volume1_rootname, SegPostProcessOutputDirectoryPath, output_downloads[i][1], 'LabelMap')
volume2 = MRMLUtility.loadMRMLNode(volume2_rootname, SegPostProcessOutputDirectoryPath, output_filenames[i], 'LabelMap')
# Comparison
if not self.volume_comparison(volume1, volume2):
return False
return True
def comparisonOfOutputsGenParaMesh(self):
self.delayDisplay('Test 3: Comparison of the outputs generated by GenParaMesh CLI')
# Checking the existence of the output directory Step2_GenParaMesh
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
GenParaMeshOutputDirectoryPath = outputDirectoryPath + '/Step2_GenParaMesh'
if not os.path.exists(GenParaMeshOutputDirectoryPath):
return False
# Downloading output data to compare with the ones generated by Shape Analysis Module during the tests
output_downloads = (
('https://data.kitware.com/api/v1/file/59af09588d777f7d33e9cf9d/download', 'OutputImageToCompareGenParaMesh_para.vtk'),
('https://data.kitware.com/api/v1/file/59945ece8d777f7d33e9c3c7/download', 'OutputImageToCompareGenParaMesh_surf.vtk'),
)
self.download_files(GenParaMeshOutputDirectoryPath, output_downloads)
# Comparison of the Parameters Mesh Outputs
self.delayDisplay('Comparison of the Parameters Mesh Outputs')
output_filenames = list()
for inputRootname in self.inputRootnames:
output_para_filename = inputRootname + "_pp_para.vtk"
output_surf_filename = inputRootname + "_pp_surf.vtk"
output_filenames.append(output_para_filename)
output_filenames.append(output_surf_filename)
for i in range(len(output_filenames)):
model2_filepath = os.path.join(GenParaMeshOutputDirectoryPath, output_filenames[i])
# Checking the existence of the output files in the folder Step2_GenParaMesh
if not os.path.exists(model2_filepath):
return False
# Loading the 2 models for comparison
model1_rootname = output_downloads[i][1].split(".")[0]
model2_rootname = output_filenames[i].split(".")[0]
model1 = MRMLUtility.loadMRMLNode(model1_rootname, GenParaMeshOutputDirectoryPath, output_downloads[i][1], 'ModelFile')
model2 = MRMLUtility.loadMRMLNode(model2_rootname, GenParaMeshOutputDirectoryPath,output_filenames[i], 'ModelFile')
# Comparison
if not self.polydata_comparison(model1, model2):
return False
return True
def comparisonOfOutputsParaToSPHARMMesh(self):
self.delayDisplay('Test 4: Comparison of the outputs generated by ParaToSPHARMMesh CLI')
# Checking the existence of the output directory Step3_ParaToSPHARMMesh
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
ParaToSPHARMMeshOutputDirectoryPath = outputDirectoryPath + '/Step3_ParaToSPHARMMesh'
if not os.path.exists(ParaToSPHARMMeshOutputDirectoryPath):
return False
# Downloading output data to compare with the ones generated by Shape Analysis Module during the tests
output_downloads = (
('https://data.kitware.com/api/v1/file/59af09028d777f7d33e9cf9a/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM.vtk'),
('https://data.kitware.com/api/v1/file/59af09018d777f7d33e9cf91/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM_ellalign.vtk'),
('https://data.kitware.com/api/v1/file/59af09018d777f7d33e9cf94/download', 'OutputImageToCompareParaToSPHARMMesh_MedialMesh.vtk'),
('https://data.kitware.com/api/v1/file/59af09028d777f7d33e9cf97/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM_procalign.vtk'),
)
self.download_files(ParaToSPHARMMeshOutputDirectoryPath, output_downloads)
# Comparison of the SPHARM Mesh Outputs
self.delayDisplay('Comparison of the SPHARM Mesh Outputs')
output_filenames = list()
for inputRootname in self.inputRootnames:
output_spharm_filename = inputRootname + "_pp_surf_SPHARM.vtk"
output_ellalign_filename = inputRootname + "_pp_surf_SPHARM_ellalign.vtk"
output_medialmesh_filename = inputRootname + "_pp_surf_SPHARMMedialMesh.vtk"
output_procalign_filename = inputRootname + "_pp_surf_SPHARM_procalign.vtk"
output_filenames.append(output_spharm_filename)
output_filenames.append(output_ellalign_filename)
output_filenames.append(output_medialmesh_filename)
output_filenames.append(output_procalign_filename)
for i in range(len(output_filenames)):
model2_filepath = os.path.join(ParaToSPHARMMeshOutputDirectoryPath, output_filenames[i])
# Checking the existence of the output files in the folder Step3_ParaToSPHARMMesh
if not os.path.exists(model2_filepath):
return False
# Loading the 2 models for comparison
model1_rootname = output_downloads[i][1].split(".")[0]
model2_rootname = output_filenames[i].split(".")[0]
model1 = MRMLUtility.loadMRMLNode(model1_rootname, ParaToSPHARMMeshOutputDirectoryPath, output_downloads[i][1], 'ModelFile')
model2 = MRMLUtility.loadMRMLNode(model2_rootname, ParaToSPHARMMeshOutputDirectoryPath, output_filenames[i], 'ModelFile')
# Comparison
if not self.polydata_comparison(model1, model2):
return False
return True
def volume_comparison(self, volume1, volume2):
imageData1 = volume1.GetImageData()
imageData2 = volume2.GetImageData()
nbPoints1 = imageData1.GetNumberOfPoints()
nbPoints2 = imageData2.GetNumberOfPoints()
if not nbPoints1 == nbPoints2:
return False
dimension1 = imageData1.GetDimensions()
dimension2 = imageData2.GetDimensions()
if not dimension1 == dimension2:
return False
for i in range(dimension1[0]):
for j in range(dimension1[1]):
for k in range(dimension1[2]):
if not imageData1.GetScalarComponentAsDouble(i,j,k,0) == imageData2.GetScalarComponentAsDouble(i,j,k,0):
return False
return True
def polydata_comparison(self, model1, model2):
polydata1 = model1.GetPolyData()
polydata2 = model2.GetPolyData()
# Number of points
nbPoints1 = polydata1.GetNumberOfPoints()
nbPoints2 = polydata2.GetNumberOfPoints()
if not nbPoints1 == nbPoints2:
return False
# Polydata
data1 = polydata1.GetPoints().GetData()
data2 = polydata2.GetPoints().GetData()
# Number of Components
nbComponents1 = data1.GetNumberOfComponents()
nbComponents2 = data2.GetNumberOfComponents()
if not nbComponents1 == nbComponents2:
return False
# Points value
for i in range(nbPoints1):
for j in range(nbComponents1):
if not data1.GetTuple(i)[j] == data2.GetTuple(i)[j]:
return False
# Area
nbAreas1 = polydata1.GetPointData().GetNumberOfArrays()
nbAreas2 = polydata2.GetPointData().GetNumberOfArrays()
if not nbAreas1 == nbAreas2:
return False
for l in range(nbAreas1):
area1 = polydata1.GetPointData().GetArray(l)
area2 = polydata2.GetPointData().GetArray(l)
# Name of the area
nameArea1 = area1.GetName()
nameArea2 = area2.GetName()
if not nameArea1 == nameArea2:
return False
# Number of Components of the area
nbComponents1 = area1.GetNumberOfComponents()
nbComponents2 = area2.GetNumberOfComponents()
if not nbComponents1 == nbComponents2:
return False
# Points value in the area
for i in range(nbPoints1):
for j in range(nbComponents1):
if not data1.GetTuple(i)[j] == data2.GetTuple(i)[j]:
return False
return True
def download_files(self, directoryPath, downloads):
self.delayDisplay('Starting download')
for url, name in downloads:
filePath = os.path.join(directoryPath, name)
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
print 'Requesting download %s from %s...\n' % (name, url)
urllib.urlretrieve(url, filePath)
self.delayDisplay('Finished with download')
# Function to delete all the data needed for the tests
def cleanSlicerTemporaryDirectory(self):
# deletion of the SAM input folder
inputDirectoryPath = slicer.app.temporaryPath + '/InputShapeAnalysisModule'
if os.path.exists(inputDirectoryPath):
shutil.rmtree(inputDirectoryPath)
# deletion of the SAM output folder
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
if os.path.exists(outputDirectoryPath):
shutil.rmtree(outputDirectoryPath)
# deletion of the SAM template folder
templateDirectoryPath = slicer.app.temporaryPath + '/TemplateShapeAnalysisModule'
if os.path.exists(templateDirectoryPath):
shutil.rmtree(templateDirectoryPath)
|
bpaniagua/SPHARM-PDM
|
Modules/Scripted/ShapeAnalysisModule/ShapeAnalysisModule.py
|
Python
|
apache-2.0
| 88,012 | 0.010192 |
## FormEncode, a Form processor
## Copyright (C) 2003, Ian Bicking <ianb@colorstudy.com>
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## NOTE: In the context of the Python environment, I interpret "dynamic
## linking" as importing -- thus the LGPL applies to the contents of
## the modules, but make no requirements on code importing these
## modules.
"""
Validator/Converters for use with FormEncode.
"""
import re
DateTime = None
mxlookup = None
httplib = None
urlparse = None
socket = None
from interfaces import *
from api import *
sha = random = None
try:
import sets
except ImportError:
sets = None
import cgi
import fieldstorage
True, False = (1==1), (0==1)
############################################################
## Utility methods
############################################################
# These all deal with accepting both mxDateTime and datetime
# modules and types
datetime_module = None
mxDateTime_module = None
def import_datetime(module_type):
global datetime_module, mxDateTime_module
if module_type is None:
try:
if datetime_module is None:
import datetime as datetime_module
return datetime_module
except ImportError:
if mxDateTime_module is None:
from mx import DateTime as mxDateTime_module
return mxDateTime_module
module_type = module_type.lower()
assert module_type in ('datetime', 'mxdatetime')
if module_type == 'datetime':
if datetime_module is None:
import datetime as datetime_module
return datetime_module
else:
if mxDateTime_module is None:
from mx import DateTime as mxDateTime_module
return mxDateTime_module
def datetime_now(module):
if module.__name__ == 'datetime':
return module.datetime.now()
else:
return module.now()
def datetime_makedate(module, year, month, day):
if module.__name__ == 'datetime':
return module.date(year, month, day)
else:
try:
return module.DateTime(year, month, day)
except module.RangeError, e:
raise ValueError(str(e))
############################################################
## Wrapper Validators
############################################################
class ConfirmType(FancyValidator):
"""
Confirms that the input/output is of the proper type.
Uses the parameters:
subclass:
The class or a tuple of classes; the item must be an instance
of the class or a subclass.
type:
A type or tuple of types (or classes); the item must be of
the exact class or type. Subclasses are not allowed.
Examples::
>>> cint = ConfirmType(subclass=int)
>>> cint.to_python(True)
True
>>> cint.to_python('1')
Traceback (most recent call last):
...
Invalid: '1' is not a subclass of <type 'int'>
>>> cintfloat = ConfirmType(subclass=(float, int))
>>> cintfloat.to_python(1.0), cintfloat.from_python(1.0)
(1.0, 1.0)
>>> cintfloat.to_python(1), cintfloat.from_python(1)
(1, 1)
>>> cintfloat.to_python(None)
Traceback (most recent call last):
...
Invalid: None is not a subclass of one of the types <type 'float'>, <type 'int'>
>>> cint2 = ConfirmType(type=int)
>>> cint2(accept_python=False).from_python(True)
Traceback (most recent call last):
...
Invalid: True must be of the type <type 'int'>
"""
subclass = None
type = None
messages = {
'subclass': "%(object)r is not a subclass of %(subclass)s",
'inSubclass': "%(object)r is not a subclass of one of the types %(subclassList)s",
'inType': "%(object)r must be one of the types %(typeList)s",
'type': "%(object)r must be of the type %(type)s",
}
def __init__(self, *args, **kw):
FancyValidator.__init__(self, *args, **kw)
if self.subclass:
if isinstance(self.subclass, list):
self.subclass = tuple(self.subclass)
elif not isinstance(self.subclass, tuple):
self.subclass = (self.subclass,)
self.validate_python = self.confirm_subclass
if self.type:
if isinstance(self.type, list):
self.type = tuple(self.type)
elif not isinstance(self.type, tuple):
self.type = (self.type,)
self.validate_python = self.confirm_type
def confirm_subclass(self, value, state):
if not isinstance(value, self.subclass):
if len(self.subclass) == 1:
msg = self.message('subclass', state, object=value,
subclass=self.subclass[0])
else:
subclass_list = ', '.join(map(str, self.subclass))
msg = self.message('inSubclass', state, object=value,
subclassList=subclass_list)
raise Invalid(msg, value, state)
def confirm_type(self, value, state):
for t in self.type:
if type(value) is t:
break
else:
if len(self.type) == 1:
msg = self.message('type', state, object=value,
type=self.type[0])
else:
msg = self.message('inType', state, object=value,
typeList=', '.join(map(str, self.type)))
raise Invalid(msg, value, state)
return value
class Wrapper(FancyValidator):
"""
Used to convert functions to validator/converters.
You can give a simple function for `to_python`, `from_python`,
`validate_python` or `validate_other`. If that function raises an
exception, the value is considered invalid. Whatever value the
function returns is considered the converted value.
Unlike validators, the `state` argument is not used. Functions
like `int` can be used here, that take a single argument.
Examples::
>>> def downcase(v):
... return v.lower()
>>> wrap = Wrapper(to_python=downcase)
>>> wrap.to_python('This')
'this'
>>> wrap.from_python('This')
'This'
>>> wrap2 = Wrapper(from_python=downcase)
>>> wrap2.from_python('This')
'this'
>>> wrap2.from_python(1)
Traceback (most recent call last):
...
Invalid: 'int' object has no attribute 'lower'
>>> wrap3 = Wrapper(validate_python=int)
>>> wrap3.to_python('1')
'1'
>>> wrap3.to_python('a')
Traceback (most recent call last):
...
Invalid: invalid literal for int(): a
"""
func_to_python = None
func_from_python = None
func_validate_python = None
func_validate_other = None
def __init__(self, *args, **kw):
for n in ['to_python', 'from_python', 'validate_python',
'validate_other']:
if kw.has_key(n):
kw['func_%s' % n] = kw[n]
del kw[n]
FancyValidator.__init__(self, *args, **kw)
self._to_python = self.wrap(self.func_to_python)
self._from_python = self.wrap(self.func_from_python)
self.validate_python = self.wrap(self.func_validate_python)
self.validate_other = self.wrap(self.func_validate_other)
def wrap(self, func):
if not func:
return None
def result(value, state, func=func):
try:
return func(value)
except Exception, e:
raise Invalid(str(e), {}, value, state)
return result
class Constant(FancyValidator):
"""
This converter converts everything to the same thing.
I.e., you pass in the constant value when initializing, then all
values get converted to that constant value.
This is only really useful for funny situations, like::
fromEmailValidator = ValidateAny(
ValidEmailAddress(),
Constant('unknown@localhost'))
In this case, the if the email is not valid
``'unknown@localhost'`` will be used instead. Of course, you
could use ``if_invalid`` instead.
Examples::
>>> Constant('X').to_python('y')
'X'
"""
__unpackargs__ = ('value',)
def _to_python(self, value, state):
return self.value
_from_python = _to_python
############################################################
## Normal validators
############################################################
class MaxLength(FancyValidator):
"""
Invalid if the value is longer than `maxLength`. Uses len(),
so it can work for strings, lists, or anything with length.
Examples::
>>> max5 = MaxLength(5)
>>> max5.to_python('12345')
'12345'
>>> max5.from_python('12345')
'12345'
>>> max5.to_python('123456')
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5(accept_python=False).from_python('123456')
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5.to_python([1, 2, 3])
[1, 2, 3]
>>> max5.to_python([1, 2, 3, 4, 5, 6])
Traceback (most recent call last):
...
Invalid: Enter a value less than 5 characters long
>>> max5.to_python(5)
Traceback (most recent call last):
...
Invalid: Invalid value (value with length expected)
"""
__unpackargs__ = ('maxLength',)
messages = {
'tooLong': "Enter a value less than %(maxLength)i characters long",
'invalid': "Invalid value (value with length expected)",
}
def validate_python(self, value, state):
try:
if value and \
len(value) > self.maxLength:
raise Invalid(self.message('tooLong', state,
maxLength=self.maxLength),
value, state)
else:
return None
except TypeError:
raise Invalid(self.message('invalid', state),
value, state)
class MinLength(FancyValidator):
"""
Invalid if the value is shorter than `minlength`. Uses len(),
so it can work for strings, lists, or anything with length.
Examples::
>>> min5 = MinLength(5)
>>> min5.to_python('12345')
'12345'
>>> min5.from_python('12345')
'12345'
>>> min5.to_python('1234')
Traceback (most recent call last):
...
Invalid: Enter a value more than 5 characters long
>>> min5(accept_python=False).from_python('1234')
Traceback (most recent call last):
...
Invalid: Enter a value more than 5 characters long
>>> min5.to_python([1, 2, 3, 4, 5])
[1, 2, 3, 4, 5]
>>> min5.to_python([1, 2, 3])
Traceback (most recent call last):
...
Invalid: Enter a value more than 5 characters long
>>> min5.to_python(5)
Traceback (most recent call last):
...
Invalid: Invalid value (value with length expected)
"""
__unpackargs__ = ('minLength',)
messages = {
'tooShort': "Enter a value more than %(minLength)i characters long",
'invalid': "Invalid value (value with length expected)",
}
def validate_python(self, value, state):
try:
if len(value) < self.minLength:
raise Invalid(self.message('tooShort', state,
minLength=self.minLength),
value, state)
except TypeError:
raise Invalid(self.message('invalid', state),
value, state)
class NotEmpty(FancyValidator):
"""
Invalid if value is empty (empty string, empty list, etc).
Generally for objects that Python considers false, except zero
which is not considered invalid.
Examples::
>>> ne = NotEmpty(messages={'empty': 'enter something'})
>>> ne.to_python('')
Traceback (most recent call last):
...
Invalid: enter something
>>> ne.to_python(0)
0
"""
messages = {
'empty': "Please enter a value",
}
def validate_python(self, value, state):
if value == 0:
# This isn't "empty" for this definition.
return value
if not value:
raise Invalid(self.message('empty', state),
value, state)
class Empty(FancyValidator):
"""
Invalid unless the value is empty. Use cleverly, if at all.
Examples::
>>> Empty.to_python(0)
Traceback (most recent call last):
...
Invalid: You cannot enter a value here
"""
messages = {
'notEmpty': "You cannot enter a value here",
}
def validate_python(self, value, state):
if value or value == 0:
raise Invalid(self.message('notEmpty', state),
value, state)
class Regex(FancyValidator):
"""
Invalid if the value doesn't match the regular expression `regex`.
The regular expression can be a compiled re object, or a string
which will be compiled for you.
Use strip=True if you want to strip the value before validation,
and as a form of conversion (often useful).
Examples::
>>> cap = Regex(r'^[A-Z]+$')
>>> cap.to_python('ABC')
'ABC'
Note that ``.from_python()`` calls (in general) do not validate
the input::
>>> cap.from_python('abc')
'abc'
>>> cap(accept_python=False).from_python('abc')
Traceback (most recent call last):
...
Invalid: The input is not valid
>>> cap.to_python(1)
Traceback (most recent call last):
...
Invalid: The input must be a string (not a <type 'int'>: 1)
>>> Regex(r'^[A-Z]+$', strip=True).to_python(' ABC ')
'ABC'
>>> Regex(r'this', regexOps=('I',)).to_python('THIS')
'THIS'
"""
regexOps = ()
strip = False
regex = None
__unpackargs__ = ('regex',)
messages = {
'invalid': "The input is not valid",
}
def __init__(self, *args, **kw):
FancyValidator.__init__(self, *args, **kw)
if isinstance(self.regex, str):
ops = 0
assert not isinstance(self.regexOps, str), (
"regexOps should be a list of options from the re module "
"(names, or actual values)")
for op in self.regexOps:
if isinstance(op, str):
ops |= getattr(re, op)
else:
ops |= op
self.regex = re.compile(self.regex, ops)
def validate_python(self, value, state):
self.assert_string(value, state)
if self.strip and (isinstance(value, str) or isinstance(value, unicode)):
value = value.strip()
if not self.regex.search(value):
raise Invalid(self.message('invalid', state),
value, state)
def _to_python(self, value, state):
if self.strip and \
(isinstance(value, str) or isinstance(value, unicode)):
return value.strip()
return value
class PlainText(Regex):
"""
Test that the field contains only letters, numbers, underscore,
and the hyphen. Subclasses Regex.
Examples::
>>> PlainText.to_python('_this9_')
'_this9_'
>>> PlainText.from_python(' this ')
' this '
>>> PlainText(accept_python=False).from_python(' this ')
Traceback (most recent call last):
...
Invalid: Enter only letters, numbers, or _ (underscore)
>>> PlainText(strip=True).to_python(' this ')
'this'
>>> PlainText(strip=True).from_python(' this ')
'this'
"""
regex = r"^[a-zA-Z_\-0-9]*$"
messages = {
'invalid': 'Enter only letters, numbers, or _ (underscore)',
}
class OneOf(FancyValidator):
"""
Tests that the value is one of the members of a given list.
If ``testValueLists=True``, then if the input value is a list or
tuple, all the members of the sequence will be checked (i.e., the
input must be a subset of the allowed values).
Use ``hideList=True`` to keep the list of valid values out of the
error message in exceptions.
Examples::
>>> oneof = OneOf([1, 2, 3])
>>> oneof.to_python(1)
1
>>> oneof.to_python(4)
Traceback (most recent call last):
...
Invalid: Value must be one of: 1; 2; 3 (not 4)
>>> oneof(testValueList=True).to_python([2, 3, [1, 2, 3]])
[2, 3, [1, 2, 3]]
>>> oneof.to_python([2, 3, [1, 2, 3]])
Traceback (most recent call last):
...
Invalid: Value must be one of: 1; 2; 3 (not [2, 3, [1, 2, 3]])
"""
list = None
testValueList = False
hideList = False
__unpackargs__ = ('list',)
messages = {
'invalid': "Invalid value",
'notIn': "Value must be one of: %(items)s (not %(value)r)",
}
def validate_python(self, value, state):
if self.testValueList and isinstance(value, (list, tuple)):
for v in value:
self.validate_python(v, state)
else:
if not value in self.list:
if self.hideList:
raise Invalid(self.message('invalid', state),
value, state)
else:
items = '; '.join(map(str, self.list))
raise Invalid(self.message('notIn', state,
items=items,
value=value),
value, state)
class DictConverter(FancyValidator):
"""
Converts values based on a dictionary which has values as keys for
the resultant values.
If ``allowNull`` is passed, it will not balk if a false value
(e.g., '' or None) is given (it will return None in these cases).
to_python takes keys and gives values, from_python takes values and
gives keys.
If you give hideDict=True, then the contents of the dictionary
will not show up in error messages.
Examples::
>>> dc = DictConverter({1: 'one', 2: 'two'})
>>> dc.to_python(1)
'one'
>>> dc.from_python('one')
1
>>> dc.to_python(3)
Traceback (most recent call last):
Invalid: Enter a value from: 1; 2
>>> dc2 = dc(hideDict=True)
>>> dc2.hideDict
True
>>> dc2.dict
{1: 'one', 2: 'two'}
>>> dc2.to_python(3)
Traceback (most recent call last):
Invalid: Choose something
>>> dc.from_python('three')
Traceback (most recent call last):
Invalid: Nothing in my dictionary goes by the value 'three'. Choose one of: 'one'; 'two'
"""
dict = None
hideDict = False
__unpackargs__ = ('dict',)
messages = {
'keyNotFound': "Choose something",
'chooseKey': "Enter a value from: %(items)s",
'valueNotFound': "That value is not known",
'chooseValue': "Nothing in my dictionary goes by the value %(value)s. Choose one of: %(items)s",
}
def _to_python(self, value, state):
try:
return self.dict[value]
except KeyError:
if self.hideDict:
raise Invalid(self.message('keyNotFound', state),
value, state)
else:
items = '; '.join(map(repr, self.dict.keys()))
raise Invalid(self.message('chooseKey', state,
items=items),
value, state)
def _from_python(self, value, state):
for k, v in self.dict.items():
if value == v:
return k
if self.hideDict:
raise Invalid(self.message('valueNotFound', state),
value, state)
else:
items = '; '.join(map(repr, self.dict.values()))
raise Invalid(self.message('chooseValue', state,
value=repr(value),
items=items),
value, state)
class IndexListConverter(FancyValidator):
"""
Converts a index (which may be a string like '2') to the value in
the given list.
Examples::
>>> index = IndexListConverter(['zero', 'one', 'two'])
>>> index.to_python(0)
'zero'
>>> index.from_python('zero')
0
>>> index.to_python('1')
'one'
>>> index.to_python(5)
Traceback (most recent call last):
Invalid: Index out of range
>>> index.to_python(None)
Traceback (most recent call last):
Invalid: Must be an integer index
>>> index.from_python('five')
Traceback (most recent call last):
Invalid: Item 'five' was not found in the list
"""
list = None
__unpackargs__ = ('list',)
messages = {
'integer': "Must be an integer index",
'outOfRange': "Index out of range",
'notFound': "Item %(value)s was not found in the list",
}
def _to_python(self, value, state):
try:
value = int(value)
except (ValueError, TypeError):
raise Invalid(self.message('integer', state),
value, state)
try:
return self.list[value]
except IndexError:
raise Invalid(self.message('outOfRange', state),
value, state)
def _from_python(self, value, state):
for i in range(len(self.list)):
if self.list[i] == value:
return i
raise Invalid(self.message('notFound', state,
value=repr(value)),
value, state)
class DateValidator(FancyValidator):
"""
Validates that a date is within the given range. Be sure to call
DateConverter first if you aren't expecting mxDateTime input.
``earliest_date`` and ``latest_date`` may be functions; if so,
they will be called each time before validating.
``after_now`` means a time after the current timestamp; note that
just a few milliseconds before now is invalid! ``today_or_after``
is more permissive, and ignores hours and minutes.
Examples::
>>> from datetime import datetime, timedelta
>>> d = DateValidator(earliest_date=datetime(2003, 1, 1))
>>> d.to_python(datetime(2004, 1, 1))
datetime.datetime(2004, 1, 1, 0, 0)
>>> d.to_python(datetime(2002, 1, 1))
Traceback (most recent call last):
...
Invalid: Date must be after Wednesday, 01 January 2003
>>> d.to_python(datetime(2003, 1, 1))
datetime.datetime(2003, 1, 1, 0, 0)
>>> d = DateValidator(after_now=True)
>>> now = datetime.now()
>>> d.to_python(now+timedelta(seconds=5)) == now+timedelta(seconds=5)
True
>>> d.to_python(now-timedelta(days=1))
Traceback (most recent call last):
...
Invalid: The date must be sometime in the future
>>> d.to_python(now+timedelta(days=1)) > now
True
>>> d = DateValidator(today_or_after=True)
>>> d.to_python(now) == now
True
"""
earliest_date = None
latest_date = None
after_now = False
# Like after_now, but just after this morning:
today_or_after = False
# Use 'datetime' to force the Python 2.3+ datetime module, or
# 'mxDateTime' to force the mxDateTime module (None means use
# datetime, or if not present mxDateTime)
datetime_module = None
messages = {
'after': "Date must be after %(date)s",
'before': "Date must be before %(date)s",
# Double %'s, because this will be substituted twice:
'date_format': "%%A, %%d %%B %%Y",
'future': "The date must be sometime in the future",
}
def validate_python(self, value, state):
if self.earliest_date:
if callable(self.earliest_date):
earliest_date = self.earliest_date()
else:
earliest_date = self.earliest_date
if value < earliest_date:
date_formatted = earliest_date.strftime(
self.message('date_format', state))
raise Invalid(
self.message('after', state,
date=date_formatted),
value, state)
if self.latest_date:
if callable(self.latest_date):
latest_date = self.latest_date()
else:
latest_date = self.latest_date
if value > latest_date:
date_formatted = latest_date.strftime(
self.message('date_format', state))
raise Invalid(
self.message('before', state,
date=date_formatted),
value, state)
if self.after_now:
dt_mod = import_datetime(self.datetime_module)
now = datetime_now(dt_mod)
if value < now:
date_formatted = now.strftime(
self.message('date_format', state))
raise Invalid(
self.message('future', state,
date=date_formatted),
value, state)
if self.today_or_after:
dt_mod = import_datetime(self.datetime_module)
now = datetime_now(dt_mod)
today = datetime_makedate(dt_mod,
now.year, now.month, now.day)
value_as_date = datetime_makedate(
dt_mod, value.year, value.month, value.day)
if value_as_date < today:
date_formatted = now.strftime(
self.message('date_format', state))
raise Invalid(
self.message('future', state,
date=date_formatted),
value, state)
class Bool(FancyValidator):
"""
Always Valid, returns True or False based on the value and the
existance of the value.
If you want to convert strings like ``'true'`` to booleans, then
use ``StringBoolean``.
Examples::
>>> Bool.to_python(0)
False
>>> Bool.to_python(1)
True
>>> Bool.to_python('')
False
>>> Bool.to_python(None)
False
"""
if_missing = False
def _to_python(self, value, state):
return bool(value)
_from_python = _to_python
class Int(FancyValidator):
"""
Convert a value to an integer.
Example::
>>> Int.to_python('10')
10
>>> Int.to_python('ten')
Traceback (most recent call last):
...
Invalid: Please enter an integer value
"""
messages = {
'integer': "Please enter an integer value",
}
def _to_python(self, value, state):
try:
return int(value)
except (ValueError, TypeError):
raise Invalid(self.message('integer', state),
value, state)
_from_python = _to_python
class Number(FancyValidator):
"""
Convert a value to a float or integer. Tries to convert it to
an integer if no information is lost.
::
>>> Number.to_python('10')
10
>>> Number.to_python('10.5')
10.5
>>> Number.to_python('ten')
Traceback (most recent call last):
...
Invalid: Please enter a number
"""
messages = {
'number': "Please enter a number",
}
def _to_python(self, value, state):
try:
value = float(value)
if value == int(value):
return int(value)
return value
except ValueError:
raise Invalid(self.message('number', state),
value, state)
class String(FancyValidator):
"""
Converts things to string, but treats empty things as the empty
string.
Also takes a `max` and `min` argument, and the string length must
fall in that range.
::
>>> String(min=2).to_python('a')
Traceback (most recent call last):
...
Invalid: Enter a value 2 characters long or more
>>> String(max=10).to_python('xxxxxxxxxxx')
Traceback (most recent call last):
...
Invalid: Enter a value less than 10 characters long
>>> String().from_python(None)
''
>>> String().from_python([])
''
"""
min = None
max = None
messages = {
'tooLong': "Enter a value less than %(max)i characters long",
'tooShort': "Enter a value %(min)i characters long or more",
}
def validate_python(self, value, state):
if (self.max is not None and value is not None
and len(value) > self.max):
raise Invalid(self.message('tooLong', state,
max=self.max),
value, state)
if (self.min is not None
and (not value or len(value) < self.min)):
raise Invalid(self.message('tooShort', state,
min=self.min),
value, state)
def _from_python(self, value, state):
if value:
return str(value)
if value == 0:
return str(value)
return ""
def empty_value(self, value):
return ''
class Set(FancyValidator):
"""
This is for when you think you may return multiple values for a
certain field.
This way the result will always be a list, even if there's only
one result. It's equivalent to ForEach(convertToList=True).
If you give ``use_set=True``, then it will return an actual
``sets.Set`` object.
::
>>> Set.to_python(None)
[]
>>> Set.to_python('this')
['this']
>>> Set.to_python(('this', 'that'))
['this', 'that']
>>> s = Set(use_set=True)
>>> s.to_python(None)
Set([])
>>> s.to_python('this')
Set(['this'])
>>> s.to_python(('this',))
Set(['this'])
"""
use_set = False
def _to_python(self, value, state):
if self.use_set:
if isinstance(value, sets.Set):
return value
elif isinstance(value, (list, tuple)):
return sets.Set(value)
elif value is None:
return sets.Set()
else:
return sets.Set([value])
else:
if isinstance(value, list):
return value
elif sets and isinstance(value, sets.Set):
return list(value)
elif isinstance(value, tuple):
return list(value)
elif value is None:
return []
else:
return [value]
def empty_value(self, value):
return []
class Email(FancyValidator):
r"""
Validate an email address.
If you pass ``resolve_domain=True``, then it will try to resolve
the domain name to make sure it's valid. This takes longer, of
course. You must have the `pyDNS <http://pydns.sf.net>`_ modules
installed to look up MX records.
::
>>> e = Email()
>>> e.to_python(' test@foo.com ')
'test@foo.com'
>>> e.to_python('test')
Traceback (most recent call last):
...
Invalid: An email address must contain a single @
>>> e.to_python('test@foobar.com.5')
Traceback (most recent call last):
...
Invalid: The domain portion of the email address is invalid (the portion after the @: foobar.com.5)
>>> e.to_python('o*reilly@test.com')
'o*reilly@test.com'
>>> e = Email(resolve_domain=True)
>>> e.to_python('doesnotexist@colorstudy.com')
'doesnotexist@colorstudy.com'
>>> e.to_python('test@thisdomaindoesnotexistithink.com')
Traceback (most recent call last):
...
Invalid: The domain of the email address does not exist (the portion after the @: thisdomaindoesnotexistithink.com)
"""
resolve_domain = False
usernameRE = re.compile(r"^[^ \t\n\r@<>()]+$", re.I)
domainRE = re.compile(r"^[a-z0-9][a-z0-9\.\-_]*\.[a-z]+$", re.I)
messages = {
'empty': 'Please enter an email address',
'noAt': 'An email address must contain a single @',
'badUsername': 'The username portion of the email address is invalid (the portion before the @: %(username)s)',
'badDomain': 'The domain portion of the email address is invalid (the portion after the @: %(domain)s)',
'domainDoesNotExist': 'The domain of the email address does not exist (the portion after the @: %(domain)s)',
}
def __init__(self, *args, **kw):
global mxlookup
FancyValidator.__init__(self, *args, **kw)
if self.resolve_domain:
if mxlookup is None:
try:
import DNS.Base
DNS.Base.ParseResolvConf()
from DNS.lazy import mxlookup
except ImportError:
import warnings
warnings.warn(
"pyDNS <http://pydns.sf.net> is not installed on "
"your system (or the DNS package cannot be found). "
"I cannot resolve domain names in addresses")
raise
def validate_python(self, value, state):
if not value:
raise Invalid(
self.message('empty', state),
value, state)
value = value.strip()
splitted = value.split('@', 1)
if not len(splitted) == 2:
raise Invalid(
self.message('noAt', state),
value, state)
if not self.usernameRE.search(splitted[0]):
raise Invalid(
self.message('badUsername', state,
username=splitted[0]),
value, state)
if not self.domainRE.search(splitted[1]):
raise Invalid(
self.message('badDomain', state,
domain=splitted[1]),
value, state)
if self.resolve_domain:
domains = mxlookup(splitted[1])
if not domains:
raise Invalid(
self.message('domainDoesNotExist', state,
domain=splitted[1]),
value, state)
def _to_python(self, value, state):
return value.strip()
class URL(FancyValidator):
"""
Validate a URL, either http://... or https://. If check_exists
is true, then we'll actually make a request for the page.
If add_http is true, then if no scheme is present we'll add
http://
::
>>> u = URL(add_http=True)
>>> u.to_python('foo.com')
'http://foo.com'
>>> u.to_python('http://hahaha/bar.html')
Traceback (most recent call last):
...
Invalid: That is not a valid URL
>>> u.to_python('https://test.com')
'https://test.com'
>>> u = URL(add_http=False, check_exists=True)
>>> u.to_python('http://google.com')
'http://google.com'
>>> u.to_python('http://colorstudy.com/doesnotexist.html')
Traceback (most recent call last):
...
Invalid: The server responded that the page could not be found
>>> u.to_python('http://this.domain.does.not.exists.formencode.org/test.html')
Traceback (most recent call last):
...
Invalid: An error occured when trying to connect to the server: (-2, 'Name or service not known')
"""
check_exists = False
add_http = True
url_re = re.compile(r'^(http|https)://'
r'[a-z0-9][a-z0-9\-\._]*\.[a-z]+'
r'(?:[0-9]+)?'
r'(?:/.*)?$', re.I)
scheme_re = re.compile(r'^[a-zA-Z]+:')
messages = {
'noScheme': 'You must start your URL with http://, https://, etc',
'badURL': 'That is not a valid URL',
'httpError': 'An error occurred when trying to access the URL: %(error)s',
'socketError': 'An error occured when trying to connect to the server: %(error)s',
'notFound': 'The server responded that the page could not be found',
'status': 'The server responded with a bad status code (%(status)s)',
}
def _to_python(self, value, state):
value = value.strip()
if self.add_http:
if not self.scheme_re.search(value):
value = 'http://' + value
match = self.scheme_re.search(value)
if not match:
raise Invalid(
self.message('noScheme', state),
value, state)
value = match.group(0).lower() + value[len(match.group(0)):]
if not self.url_re.search(value):
raise Invalid(
self.message('badURL', state),
value, state)
if self.check_exists and (value.startswith('http://')
or value.startswith('https://')):
self._check_url_exists(value, state)
return value
def _check_url_exists(self, url, state):
global httplib, urlparse, socket
if httplib is None:
import httplib
if urlparse is None:
import urlparse
if socket is None:
import socket
scheme, netloc, path, params, query, fragment = urlparse.urlparse(
url, 'http')
if scheme == 'http':
ConnClass = httplib.HTTPConnection
else:
ConnClass = httplib.HTTPSConnection
try:
conn = ConnClass(netloc)
if params:
path += ';' + params
if query:
path += '?' + query
conn.request('HEAD', path)
res = conn.getresponse()
except httplib.HTTPException, e:
raise Invalid(
self.message('httpError', state, error=e),
state, url)
except socket.error, e:
raise Invalid(
self.message('socketError', state, error=e),
state, url)
else:
if res.status == 404:
raise Invalid(
self.message('notFound', state),
state, url)
if (res.status < 200
or res.status >= 500):
raise Invalid(
self.message('status', state, status=res.status),
state, url)
class StateProvince(FancyValidator):
"""
Valid state or province code (two-letter).
Well, for now I don't know the province codes, but it does state
codes. Give your own `states` list to validate other state-like
codes; give `extra_states` to add values without losing the
current state values.
::
>>> s = StateProvince('XX')
>>> s.to_python('IL')
'IL'
>>> s.to_python('XX')
'XX'
>>> s.to_python('xx')
'XX'
>>> s.to_python('YY')
Traceback (most recent call last):
...
Invalid: That is not a valid state code
"""
states = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE',
'FL', 'GA', 'HI', 'IA', 'ID', 'IN', 'IL', 'KS', 'KY',
'LA', 'MA', 'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT',
'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',
'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT',
'VA', 'VT', 'WA', 'WI', 'WV', 'WY']
extra_states = []
__unpackargs__ = ('extra_states',)
messages = {
'empty': 'Please enter a state code',
'wrongLength': 'Please enter a state code with TWO letters',
'invalid': 'That is not a valid state code',
}
def validate_python(self, value, state):
value = str(value).strip().upper()
if not value:
raise Invalid(
self.message('empty', state),
value, state)
if not value or len(value) != 2:
raise Invalid(
self.message('wrongLength', state),
value, state)
if value not in self.states \
and not (self.extra_states and value in self.extra_states):
raise Invalid(
self.message('invalid', state),
value, state)
def _to_python(self, value, state):
return str(value).strip().upper()
class PhoneNumber(FancyValidator):
"""
Validates, and converts to ###-###-####, optionally with
extension (as ext.##...)
@@: should add international phone number support
::
>>> p = PhoneNumber()
>>> p.to_python('333-3333')
Traceback (most recent call last):
...
Invalid: Please enter a number, with area code, in the form ###-###-####, optionally with "ext.####"
>>> p.to_python('555-555-5555')
'555-555-5555'
>>> p.to_python('1-393-555-3939')
'1-393-555-3939'
>>> p.to_python('321.555.4949')
'321.555.4949'
>>> p.to_python('3335550000')
'3335550000'
"""
# for emacs: "
_phoneRE = re.compile(r'^\s*(?:1-)?(\d\d\d)[\- \.]?(\d\d\d)[\- \.]?(\d\d\d\d)(?:\s*ext\.?\s*(\d+))?\s*$', re.I)
messages = {
'phoneFormat': 'Please enter a number, with area code, in the form ###-###-####, optionally with "ext.####"',
}
def _to_python(self, value, state):
self.assert_string(value, state)
match = self._phoneRE.search(value)
if not match:
raise Invalid(
self.message('phoneFormat', state),
value, state)
return value
def _from_python(self, value, state):
self.assert_string(value, state)
match = self._phoneRE.search(value)
if not match:
raise Invalid(self.message('phoneFormat', state),
value, state)
result = '%s-%s-%s' % (match.group(1), match.group(2), match.group(3))
if match.group(4):
result = result + " ext.%s" % match.group(4)
return result
class FieldStorageUploadConverter(FancyValidator):
"""
Converts a cgi.FieldStorage instance to
a value that FormEncode can use for file
uploads.
"""
def _to_python(self, value, state):
if isinstance(value, cgi.FieldStorage):
return fieldstorage.convert_fieldstorage(value)
else:
return value
class FileUploadKeeper(FancyValidator):
"""
Takes two inputs (a dictionary with keys ``static`` and
``upload``) and converts them into one value on the Python side (a
dictionary with ``filename`` and ``content`` keys). The upload
takes priority over the static value. The filename may be None if
it can't be discovered.
Handles uploads of both text and ``cgi.FieldStorage`` upload
values.
This is basically for use when you have an upload field, and you
want to keep the upload around even if the rest of the form
submission fails. When converting *back* to the form submission,
there may be extra values ``'original_filename'`` and
``'original_content'``, which may want to use in your form to show
the user you still have their content around.
"""
upload_key = 'upload'
static_key = 'static'
def _to_python(self, value, state):
upload = value.get(self.upload_key)
static = value.get(self.static_key, '').strip()
filename = content = None
if isinstance(upload, cgi.FieldStorage):
filename = upload.filename
content = upload.value
elif isinstance(upload, str) and upload:
filename = None
content = upload
if not content and static:
filename, content = static.split(None, 1)
if filename == '-':
filename = ''
else:
filename = filename.decode('base64')
content = content.decode('base64')
return {'filename': filename, 'content': content}
def _from_python(self, value, state):
filename = value.get('filename', '')
content = value.get('content', '')
if filename or content:
result = self.pack_content(filename, content)
return {self.upload_key: '',
self.static_key: result,
'original_filename': filename,
'original_content': content}
else:
return {self.upload_key: '',
self.static_key: ''}
def pack_content(self, filename, content):
enc_filename = self.base64encode(filename) or '-'
enc_content = (content or '').encode('base64')
result = '%s %s' % (enc_filename, enc_content)
return result
class DateConverter(FancyValidator):
"""
Validates and converts a textual date, like mm/yy, dd/mm/yy,
dd-mm-yy, etc, always assumes month comes second value is the
month.
Accepts English month names, also abbreviated. Returns value as
mx.DateTime object. Two year dates are assumed to be within
1950-2020, with dates from 21-49 being ambiguous and signaling an
error.
Use accept_day=False if you just want a month/year (like for a
credit card expiration date).
::
>>> d = DateConverter()
>>> d.to_python('12/3/09')
datetime.date(2009, 12, 3)
>>> d.to_python('12/3/2009')
datetime.date(2009, 12, 3)
>>> d.to_python('2/30/04')
Traceback (most recent call last):
...
Invalid: That month only has 29 days
>>> d.to_python('13/2/05')
Traceback (most recent call last):
...
Invalid: Please enter a month from 1 to 12
"""
## @@: accepts only US-style dates
accept_day = True
# also allowed: 'dd/mm/yyyy'
month_style = 'mm/dd/yyyy'
# Use 'datetime' to force the Python 2.3+ datetime module, or
# 'mxDateTime' to force the mxDateTime module (None means use
# datetime, or if not present mxDateTime)
datetime_module = None
_day_date_re = re.compile(r'^\s*(\d\d?)[\-\./\\](\d\d?|jan|january|feb|febuary|mar|march|apr|april|may|jun|june|jul|july|aug|august|sep|sept|september|oct|october|nov|november|dec|december)[\-\./\\](\d\d\d?\d?)\s*$', re.I)
_month_date_re = re.compile(r'^\s*(\d\d?|jan|january|feb|febuary|mar|march|apr|april|may|jun|june|jul|july|aug|august|sep|sept|september|oct|october|nov|november|dec|december)[\-\./\\](\d\d\d?\d?)\s*$', re.I)
_month_names = {
'jan': 1, 'january': 1,
'feb': 2, 'febuary': 2,
'mar': 3, 'march': 3,
'apr': 4, 'april': 4,
'may': 5,
'jun': 6, 'june': 6,
'jul': 7, 'july': 7,
'aug': 8, 'august': 8,
'sep': 9, 'sept': 9, 'september': 9,
'oct': 10, 'october': 10,
'nov': 11, 'november': 11,
'dec': 12, 'december': 12,
}
## @@: Feb. should be leap-year aware (but mxDateTime does catch that)
_monthDays = {
1: 31, 2: 29, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31,
9: 30, 10: 31, 11: 30, 12: 31}
messages = {
'badFormat': 'Please enter the date in the form %(format)s',
'monthRange': 'Please enter a month from 1 to 12',
'invalidDay': 'Please enter a valid day',
'dayRange': 'That month only has %(days)i days',
'invalidDate': 'That is not a valid day (%(exception)s)',
'unknownMonthName': "Unknown month name: %(month)s",
'invalidYear': 'Please enter a number for the year',
'fourDigitYear': 'Please enter a four-digit year',
'wrongFormat': 'Please enter the date in the form %(format)s',
}
def _to_python(self, value, state):
if self.accept_day:
return self.convert_day(value, state)
else:
return self.convert_month(value, state)
def convert_day(self, value, state):
self.assert_string(value, state)
match = self._day_date_re.search(value)
if not match:
raise Invalid(self.message('badFormat', state,
format=self.month_style),
value, state)
day = int(match.group(1))
try:
month = int(match.group(2))
except TypeError:
month = self.make_month(match.group(2), state)
else:
if self.month_style == 'mm/dd/yyyy':
month, day = day, month
year = self.make_year(match.group(3), state)
if month > 12 or month < 1:
raise Invalid(self.message('monthRange', state),
value, state)
if day < 1:
raise Invalid(self.message('invalidDay', state),
value, state)
if self._monthDays[month] < day:
raise Invalid(self.message('dayRange', state,
days=self._monthDays[month]),
value, state)
dt_mod = import_datetime(self.datetime_module)
try:
return datetime_makedate(dt_mod, year, month, day)
except ValueError, v:
raise Invalid(self.message('invalidDate', state,
exception=str(v)),
value, state)
def make_month(self, value, state):
try:
return int(value)
except ValueError:
value = value.lower().strip()
if self._month_names.has_key(value):
return self._month_names[value]
else:
raise Invalid(self.message('unknownMonthName', state,
month=value),
value, state)
def make_year(self, year, state):
try:
year = int(year)
except ValueError:
raise Invalid(self.message('invalidYear', state),
year, state)
if year <= 20:
year = year + 2000
if year >= 50 and year < 100:
year = year + 1900
if year > 20 and year < 50:
raise Invalid(self.message('fourDigitYear', state),
year, state)
return year
def convert_month(self, value, state):
match = self._month_date_re.search(value)
if not match:
raise Invalid(self.message('wrongFormat', state,
format='mm/yyyy'),
value, state)
month = self.make_month(match.group(1), state)
year = self.make_year(match.group(2), state)
if month > 12 or month < 1:
raise Invalid(self.message('monthRange', state),
value, state)
dt_mod = import_datetime(self.datetime_module)
return datetime_makedate(dt_mod, year, month, 1)
def _from_python(self, value, state):
if self.if_empty is not NoDefault and not value:
return ''
if self.accept_day:
return self.unconvert_day(value, state)
else:
return self.unconvert_month(value, state)
def unconvert_day(self, value, state):
# @@ ib: double-check, improve
return value.strftime("%m/%d/%Y")
def unconvert_month(self, value, state):
# @@ ib: double-check, improve
return value.strftime("%m/%Y")
class TimeConverter(FancyValidator):
"""
Converts times in the format HH:MM:SSampm to (h, m, s).
Seconds are optional.
For ampm, set use_ampm = True. For seconds, use_seconds = True.
Use 'optional' for either of these to make them optional.
Examples::
>>> tim = TimeConverter()
>>> tim.to_python('8:30')
(8, 30)
>>> tim.to_python('20:30')
(20, 30)
>>> tim.to_python('30:00')
Traceback (most recent call last):
...
Invalid: You must enter an hour in the range 0-23
>>> tim.to_python('13:00pm')
Traceback (most recent call last):
...
Invalid: You must enter an hour in the range 1-12
>>> tim.to_python('12:-1')
Traceback (most recent call last):
...
Invalid: You must enter a minute in the range 0-59
>>> tim.to_python('12:02pm')
(12, 2)
>>> tim.to_python('12:02am')
(0, 2)
>>> tim.to_python('1:00PM')
(13, 0)
>>> tim.from_python((13, 0))
'13:00:00'
>>> tim2 = tim(use_ampm=True, use_seconds=False)
>>> tim2.from_python((13, 0))
'1:00pm'
>>> tim2.from_python((0, 0))
'12:00am'
>>> tim2.from_python((12, 0))
'12:00pm'
"""
use_ampm = 'optional'
prefer_ampm = False
use_seconds = 'optional'
messages = {
'noAMPM': 'You must indicate AM or PM',
'tooManyColon': 'There are two many :\'s',
'noSeconds': 'You may not enter seconds',
'secondsRequired': 'You must enter seconds',
'minutesRequired': 'You must enter minutes (after a :)',
'badNumber': 'The %(part)s value you gave is not a number: %(number)r',
'badHour': 'You must enter an hour in the range %(range)s',
'badMinute': 'You must enter a minute in the range 0-59',
'badSecond': 'You must enter a second in the range 0-59',
}
def _to_python(self, value, state):
time = value.strip()
explicit_ampm = False
if self.use_ampm:
last_two = time[-2:].lower()
if last_two not in ('am', 'pm'):
if self.use_ampm != 'optional':
raise Invalid(
self.message('noAMPM', state),
value, state)
else:
offset = 0
else:
explicit_ampm = True
if last_two == 'pm':
offset = 12
else:
offset = 0
time = time[:-2]
else:
offset = 0
parts = time.split(':')
if len(parts) > 3:
raise Invalid(
self.message('tooManyColon', state),
value, state)
if len(parts) == 3 and not self.use_seconds:
raise Invalid(
self.message('noSeconds', state),
value, state)
if (len(parts) == 2
and self.use_seconds
and self.use_seconds != 'optional'):
raise Invalid(
self.message('secondsRequired', state),
value, state)
if len(parts) == 1:
raise Invalid(
self.message('minutesRequired', state),
value, state)
try:
hour = int(parts[0])
except ValueError:
raise Invalid(
self.message('badNumber', state, number=parts[0], part='hour'),
value, state)
if explicit_ampm:
if hour > 12 or hour < 1:
raise Invalid(
self.message('badHour', state, number=hour, range='1-12'),
value, state)
if hour == 12 and offset == 12:
# 12pm == 12
pass
elif hour == 12 and offset == 0:
# 12am == 0
hour = 0
else:
hour += offset
else:
if hour > 23 or hour < 0:
raise Invalid(
self.message('badHour', state,
number=hour, range='0-23'),
value, state)
try:
minute = int(parts[1])
except ValueError:
raise Invalid(
self.message('badNumber', state,
number=parts[1], part='minute'),
value, state)
if minute > 59 or minute < 0:
raise Invalid(
self.message('badMinute', state, number=minute),
value, state)
if len(parts) == 3:
try:
second = int(parts[2])
except ValueError:
raise Invalid(
self.message('badNumber', state,
number=parts[2], part='second'))
if second > 59 or second < 0:
raise Invalid(
self.message('badSecond', state, number=second),
value, state)
else:
second = None
if second is None:
return (hour, minute)
else:
return (hour, minute, second)
def _from_python(self, value, state):
if isinstance(value, (str, unicode)):
return value
if hasattr(value, 'hour'):
hour, minute = value.hour, value.minute
elif len(value) == 3:
hour, minute, second = value
elif len(value) == 2:
hour, minute = value
second = 0
ampm = ''
if ((self.use_ampm == 'optional' and self.prefer_ampm)
or (self.use_ampm and self.use_ampm != 'optional')):
ampm = 'am'
if hour > 12:
hour -= 12
ampm = 'pm'
elif hour == 12:
ampm = 'pm'
elif hour == 0:
hour = 12
if self.use_seconds:
return '%i:%02i:%02i%s' % (hour, minute, second, ampm)
else:
return '%i:%02i%s' % (hour, minute, ampm)
class PostalCode(Regex):
"""
US Postal codes (aka Zip Codes).
::
>>> PostalCode.to_python('55555')
'55555'
>>> PostalCode.to_python('55555-5555')
'55555-5555'
>>> PostalCode.to_python('5555')
Traceback (most recent call last):
...
Invalid: Please enter a zip code (5 digits)
"""
regex = r'^\d\d\d\d\d(?:-\d\d\d\d)?$'
strip = True
messages = {
'invalid': 'Please enter a zip code (5 digits)',
}
class StripField(FancyValidator):
"""
Take a field from a dictionary, removing the key from the
dictionary.
``name`` is the key. The field value and a new copy of the
dictionary with that field removed are returned.
>>> StripField('test').to_python({'a': 1, 'test': 2})
(2, {'a': 1})
>>> StripField('test').to_python({})
Traceback (most recent call last):
...
Invalid: The name 'test' is missing
"""
__unpackargs__ = ('name',)
messages = {
'missing': 'The name %(name)s is missing',
}
def _to_python(self, valueDict, state):
v = valueDict.copy()
try:
field = v[self.name]
del v[self.name]
except KeyError:
raise Invalid(self.message('missing', state,
name=repr(self.name)),
valueDict, state)
return field, v
class StringBool(FancyValidator):
# Originally from TurboGears
"""
Converts a string to a boolean.
Values like 'true' and 'false' are considered True and False,
respectively; anything in ``true_values`` is true, anything in
``false_values`` is false, case-insensitive). The first item of
those lists is considered the preferred form.
::
>>> s = StringBoolean()
>>> s.to_python('yes'), s.to_python('no')
(True, False)
>>> s.to_python(1), s.to_python('N')
(True, False)
>>> s.to_python('ye')
Traceback (most recent call last):
...
Invalid: Value should be 'true' or 'false'
"""
true_values = ['true', 't', 'yes', 'y', 'on', '1']
false_values = ['false', 'f', 'no', 'n', 'off', '0']
messages = { "string" : "Value should be %(true)r or %(false)r" }
def _to_python(self, value, state):
if isinstance(value, (str, unicode)):
value = value.strip().lower()
if value in self.true_values:
return True
if not value or value in self.false_values:
return False
raise Invalid(self.message("string", state,
true=self.true_values[0],
false=self.false_values[0]),
value, state)
return bool(value)
def _from_python(self, value, state):
if value:
return self.true_values[0]
else:
return self.false_values[0]
# Should deprecate:
StringBoolean = StringBool
class SignedString(FancyValidator):
"""
Encodes a string into a signed string, and base64 encodes both the
signature string and a random nonce.
It is up to you to provide a secret, and to keep the secret handy
and consistent.
"""
messages = {
'malformed': 'Value does not contain a signature',
'badsig': 'Signature is not correct',
}
secret = None
nonce_length = 4
def _to_python(self, value, state):
global sha
if not sha:
import sha
assert self.secret is not None, (
"You must give a secret")
parts = value.split(None, 1)
if not parts or len(parts) == 1:
raise Invalid(self.message('malformed', state),
value, state)
sig, rest = parts
sig = sig.decode('base64')
rest = rest.decode('base64')
nonce = rest[:self.nonce_length]
rest = rest[self.nonce_length:]
expected = sha.new(str(self.secret)+nonce+rest).digest()
if expected != sig:
raise Invalid(self.message('badsig', state),
value, state)
return rest
def _from_python(self, value, state):
global sha
if not sha:
import sha
nonce = self.make_nonce()
value = str(value)
digest = sha.new(self.secret+nonce+value).digest()
return self.encode(digest)+' '+self.encode(nonce+value)
def encode(self, value):
return value.encode('base64').strip().replace('\n', '')
def make_nonce(self):
global random
if not random:
import random
return ''.join([
chr(random.randrange(256))
for i in range(self.nonce_length)])
class FormValidator(FancyValidator):
"""
A FormValidator is something that can be chained with a
Schema. Unlike normal chaining the FormValidator can
validate forms that aren't entirely valid.
The important method is .validate(), of course. It gets passed a
dictionary of the (processed) values from the form. If you have
.validate_partial_form set to True, then it will get the incomplete
values as well -- use .has_key() to test if the field was able to
process any particular field.
Anyway, .validate() should return a string or a dictionary. If a
string, it's an error message that applies to the whole form. If
not, then it should be a dictionary of fieldName: errorMessage.
The special key "form" is the error message for the form as a whole
(i.e., a string is equivalent to {"form": string}).
Return None on no errors.
"""
validate_partial_form = False
validate_partial_python = None
validate_partial_other = None
class FieldsMatch(FormValidator):
"""
Tests that the given fields match, i.e., are identical. Useful
for password+confirmation fields. Pass the list of field names in
as `field_names`.
::
>>> f = FieldsMatch('pass', 'conf')
>>> f.to_python({'pass': 'xx', 'conf': 'xx'})
{'conf': 'xx', 'pass': 'xx'}
>>> f.to_python({'pass': 'xx', 'conf': 'yy'})
Traceback (most recent call last):
...
Invalid: conf: Fields do not match
"""
show_match = False
field_names = None
validate_partial_form = True
__unpackargs__ = ('*', 'field_names')
messages = {
'invalid': "Fields do not match (should be %(match)s)",
'invalidNoMatch': "Fields do not match",
}
def validate_partial(self, field_dict, state):
for name in self.field_names:
if not field_dict.has_key(name):
return
self.validate_python(field_dict, state)
def validate_python(self, field_dict, state):
ref = field_dict[self.field_names[0]]
errors = {}
for name in self.field_names[1:]:
if field_dict.get(name, '') != ref:
if self.show_match:
errors[name] = self.message('invalid', state,
match=ref)
else:
errors[name] = self.message('invalidNoMatch', state)
if errors:
error_list = errors.items()
error_list.sort()
error_message = '<br>\n'.join(
['%s: %s' % (name, value) for name, value in error_list])
raise Invalid(error_message,
field_dict, state,
error_dict=errors)
class CreditCardValidator(FormValidator):
"""
Checks that credit card numbers are valid (if not real).
You pass in the name of the field that has the credit card
type and the field with the credit card number. The credit
card type should be one of "visa", "mastercard", "amex",
"dinersclub", "discover", "jcb".
You must check the expiration date yourself (there is no
relation between CC number/types and expiration dates).
::
>>> cc = CreditCardValidator()
>>> cc.to_python({'ccType': 'visa', 'ccNumber': '4111111111111111'})
{'ccNumber': '4111111111111111', 'ccType': 'visa'}
>>> cc.to_python({'ccType': 'visa', 'ccNumber': '411111111111111'})
Traceback (most recent call last):
...
Invalid: ccNumber: You did not enter a valid number of digits
>>> cc.to_python({'ccType': 'visa', 'ccNumber': '411111111111112'})
Traceback (most recent call last):
...
Invalid: ccNumber: You did not enter a valid number of digits
"""
validate_partial_form = True
cc_type_field = 'ccType'
cc_number_field = 'ccNumber'
__unpackargs__ = ('cc_type_field', 'cc_number_field')
messages = {
'notANumber': "Please enter only the number, no other characters",
'badLength': "You did not enter a valid number of digits",
'invalidNumber': "That number is not valid",
}
def validate_partial(self, field_dict, state):
if not field_dict.get(self.cc_type_field, None) \
or not field_dict.get(self.cc_number_field, None):
return None
self.validate_python(field_dict, state)
def validate_python(self, field_dict, state):
errors = self._validateReturn(field_dict, state)
if errors:
error_list = errors.items()
error_list.sort()
raise Invalid(
'<br>\n'.join(["%s: %s" % (name, value)
for name, value in error_list]),
field_dict, state, error_dict=errors)
def _validateReturn(self, field_dict, state):
ccType = field_dict[self.cc_type_field].lower().strip()
number = field_dict[self.cc_number_field].strip()
number = number.replace(' ', '')
number = number.replace('-', '')
try:
long(number)
except ValueError:
return {self.cc_number_field: self.message('notANumber', state)}
assert self._cardInfo.has_key(ccType), (
"I can't validate that type of credit card")
foundValid = False
validLength = False
for prefix, length in self._cardInfo[ccType]:
if len(number) == length:
validLength = True
if (len(number) == length
and number.startswith(prefix)):
foundValid = True
break
if not validLength:
return {self.cc_number_field: self.message('badLength', state)}
if not foundValid:
return {self.cc_number_field: self.message('invalidNumber', state)}
if not self._validateMod10(number):
return {self.cc_number_field: self.message('invalidNumber', state)}
return None
def _validateMod10(self, s):
"""
This code by Sean Reifschneider, of tummy.com
"""
double = 0
sum = 0
for i in range(len(s) - 1, -1, -1):
for c in str((double + 1) * int(s[i])):
sum = sum + int(c)
double = (double + 1) % 2
return((sum % 10) == 0)
_cardInfo = {
"visa": [('4', 16),
('4', 13)],
"mastercard": [('51', 16),
('52', 16),
('53', 16),
('54', 16),
('55', 16)],
"discover": [('6011', 16)],
"amex": [('34', 15),
('37', 15)],
"dinersclub": [('300', 14),
('301', 14),
('302', 14),
('303', 14),
('304', 14),
('305', 14),
('36', 14),
('38', 14)],
"jcb": [('3', 16),
('2131', 15),
('1800', 15)],
}
__all__ = []
for name, value in globals().items():
if isinstance(value, type) and issubclass(value, Validator):
__all__.append(name)
|
fregaham/DISP
|
formencode/validators.py
|
Python
|
gpl-2.0
| 71,779 | 0.001936 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.