repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
kubeflow/kubeflow
|
components/crud-web-apps/volumes/backend/apps/common/status.py
|
Python
|
apache-2.0
| 2,168 | 0 |
from kubeflow.kubeflow.crud_backend import api, status
def pvc_status(pvc):
"""
Set the status of the pvc
"""
if pvc.metadata.deletion_timestamp is not None:
return status.create_status(status.STATUS_PHASE.TERMINATING,
"Deleting Volume...")
if pvc.status.phase == "Bound":
return status.create_status(status.STATUS_PHASE.READY, "Bound")
# The PVC is in Pending state, we check the Events to find out why
evs = api.v1_core.list_namespaced_event(
namespace=pvc.metadata.namespace,
field_selector=api.events_field_selector(
"PersistentVolumeClaim", pvc.metadata.name
),
).items
# If there are no events, then the PVC was just created
if len(evs) == 0:
return status.create_status(status.STATUS_PHASE.WAITING,
"Provisioning Volume...")
msg = f"Pending: {evs[0].message}"
state = evs[0].reason
if evs[0].reason == "WaitForFirstConsumer":
phase = status.STATUS_PHASE.UNAVAILABLE
msg = (
"Pending: This volume will be bound when its first consumer"
" is created. E.g., when you first browse its contents, or"
" attach it to a notebook server"
)
elif evs[0].r
|
eason == "Provisioning":
phase = status.STATUS_PHASE.WAITING
elif evs[0].reason == "FailedBinding":
phase = status.STATUS_PHASE.WARNING
elif evs[0].type == "Warning":
phase = status.STATUS
|
_PHASE.WARNING
elif evs[0].type == "Normal":
phase = status.STATUS_PHASE.READY
return status.create_status(phase, msg, state)
def viewer_status(viewer):
"""
Return a string representing the status of that viewer. If a deletion
timestamp is set we want to return a `Terminating` state.
"""
try:
ready = viewer["status"]["ready"]
except KeyError:
return status.STATUS_PHASE.UNINITIALIZED
if "deletionTimestamp" in viewer["metadata"]:
return status.STATUS_PHASE.TERMINATING
if not ready:
return status.STATUS_PHASE.WAITING
return status.STATUS_PHASE.READY
|
SwankSwashbucklers/bottle-builder
|
bottle-builder.py
|
Python
|
mit
| 18,381 | 0.013111 |
"""
"""
################################################################################
##### Command Line Interface ###################################################
################################################################################
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from tempfile import gettempdir
import os
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument("-p", "--path",
type=str,
help="the path to the desired location of the generated site")
parser.add_argument("-d", "--deploy",
action="store_true",
help="package site for movement to deployment server. Default path is the"
"current working directory, but the path flag will override that value" )
parser.add_argument("-r", "--reuse",
action="store_true",
help="if an already built website exists at the targeted path, attempt to"
"reuse already present resources (i.e. images, favicon elements and other"
"static resources)" )
args = parser.parse_args()
if args.path is None:
args.path = os.getcwd()
# if args.deploy:
# args.path = os.getcwd()
# else:
# args.path = gettempdir()
################################################################################
##### Overrides ################################################################
################################################################################
from string import Template
from re import compile
class TemplateWrapper():
def __init__(self, cls):
PYTHON_LL = 80
HTML_LL = 112
self.cls = cls
self.headers = [
( # Primary python file header template
compile(r'\$ph{(.*?)}'),
lambda x: "\n\n{1}\n##### {0} {2}\n{1}\n".format(
x.upper(), '#'*PYTHON_LL, '#'*(PYTHON_LL-len(x)-7) )
),
( # Secondary python file header template
compile(r'\$sh{(.*?)}'),
lambda x: "\n### {0} {1}".format(
x, '#'*(PYTHON_LL-len(x)-5) )
),
( # HTML file header template
compile(r'\$wh{(.*?)}'),
lambda x: "<!-- ***** {0} {1} -->".format(
x, '*'*(HTML_LL-len(x)-16) )
)
]
def __call__(self, template):
for header in self.headers:
ptn, tpl = header
for match in ptn.finditer(template):
replacements = ( match.group(0), tpl(match.group(1)) )
template = template.replace(*replacements)
template_obj = self.cls(template)
template_obj.populate = self.populate
return template_obj
@staticmethod
def populate(template, filepath, **kwargs):
for key, value in kwargs.items():
if isinstance(value, list):
kwargs[key] = "\n".join(
[ t[0].safe_substitute(**t[1]) for t in value ]
)
try:
with open(filepath, 'w') as f:
f.write(template.safe_substitute(**kwargs))
except Exception as exception:
raise exception
Template = TemplateWrapper(Template)
from subprocess import Popen, call, DEVNULL, STDOUT, PIPE
from sys import executable
def sPopen(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name == 'nt':
from subprocess import CREATE_NEW_CONSOLE
return Popen( command, shell=shell, creationflags=CREATE_NEW_CONSOLE )
else:
return Popen( command, shell=shell )
def sCall(*args):
command, shell = list(args), True
if command[0] == 'python':
command[0] = executable
shell = False
if os.name != 'nt':
shell = False
call( command, shell=shell, stdout=DEVNULL, stderr=STDOUT )
################################################################################
##### Templates ################################################################
################################################################################
APP_PY_TEMPLATE = Template("""\
\"""
${doc_string}
\"""
from bottle import run, route, get, post, error
from bottle import static_file, template, request
from bottle import HTTPError
$ph{Command Line Interface}
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from inspect import getframeinfo, currentframe
from os.path import dirname, abspath
import os
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__ )
parser.add_argument('-d', '--deploy',
action='store_true',
help='Run server for deployment' )
parser.add_argument('-i', '--ip',
type=str,
default="127.0.0.1",
help='ip to run the server against, default localhost' )
parser.add_argument('-p', '--port',
type=str,
default="8080",
help='port to run server on' )
args = parser.parse_args()
# change working directory to script directory
os.chdir(dirname(abspath(getframeinfo(currentframe()).filename)))
$ph{Main Site Routes}
${main_routes}
$ph{API and Additional Site Routes}
${api_routes}
$ph{Static Routes}
${static_routes}
$sh{Favicon Routes}
${favicon_routes}
$sh{Image Routes}
${image_routes}
$sh{Font Routes}
${font_routes}
$sh{Stylesheet Routes}
${css_routes}
$sh{Javascript Routes}
${js_routes}
$ph{Error Routes}
@error(404)
def error404(error):
|
return 'nothing to see here'
$ph{Run Server}
if args.deploy:
run(host=args.ip, port=args.port, server='cherrypy') #deployment
else:
run(host=args.ip, port=args.port, debug=True, reloader=True) #development
""" )
MAIN_ROUTE_TEMPLATE = Template("""\
@route('/${path}')
def ${method_name}():
return template('${template}', request=request, templat
|
e='${template}')
""" )
STATIC_ROUTE_TEMPLATE = Template("""\
@get('/${path}')
def load_resource():
return static_file('${file}', root='${root}')
""" )
WATCH_SASS_SCRIPT = Template("""\
from sys import argv, exit
from signal import signal, SIGTERM, SIGINT
from shutil import rmtree
from subprocess import Popen
from inspect import getframeinfo, currentframe
from os.path import dirname, abspath, isdir, isfile
from os import chdir, remove
def signal_term_handler(signal, frame):
if not p is None: p.kill()
if isfile("_all.scss"): remove("_all.scss")
if isdir(".sass-cache"): rmtree(".sass-cache")
print(argv[0])
remove("watch.py") # argv[0] contains full path
exit(0)
p = None
signal(SIGTERM, signal_term_handler)
signal(SIGINT, signal_term_handler)
chdir(dirname(abspath(getframeinfo(currentframe()).filename)))
command = "sass --watch"
for x in range(1, len(argv)):
command += " {0}.scss:../../www/static/css/{0}.css".format(argv[x])
p = Popen(command, shell=True)
p.wait()
""" )
################################################################################
##### Script Body ##############################################################
################################################################################
from os.path import relpath, abspath, normpath, join, isfile, isdir, splitext
from shutil import copy, copyfileobj, rmtree
from urllib.request import urlopen
from time import sleep
from re import match, search
from sys import exit
SCRIPT_DIR = os.getcwd()
PROJECT_NAME = relpath(SCRIPT_DIR, "..")
STATIC_ROUTE = lambda p, f, r: \
( STATIC_ROUTE_TEMPLATE, { "path": p, "file": f, "root": r } )
MAIN_ROUTE = lambda p, m, t: \
( MAIN_ROUTE_TEMPLATE, { "path": p, "method_name": m, "template": t } )
def migrate_files(directory, destination):
src_path = join(SCRIPT_DIR, directory)
if not isdir(destination): os.makedirs(destination)
for root, dirs, files in os.walk(src_path):
for dirname in dirs:
if dirname.startswith('!') or dirname in ['.DS_STORE']:
dirs.remove(dirname)
for filename in files:
if not filename.startswith('!') and filename not in ['.DS_Store']:
if not isfile(filename): #added for the reuse flag
copy(join(root, fil
|
simplegeo/sqlalchemy
|
examples/beaker_caching/model.py
|
Python
|
mit
| 3,110 | 0.006752 |
"""Model. We are modeling Person objects with a collection
of Address objects. Each Address has a PostalCode, which
in turn references a City and then a Country:
Person --(1..n)--> Address
Address --(has a)--> PostalCode
PostalCode --(has a)--> City
City --(has a)--> Country
"""
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from meta import Base, FromCache, Session, RelationshipCache
class Country(Base):
__tablename__ = 'country'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
def __init__(self, name):
self.name = name
class City(Base):
__tablename__ = 'city'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
country_id = Column(Integer, ForeignKey('country.id'), nullable=False)
country = relationship(Country)
def __init__(self, name, country):
self.name = name
self.country = country
class PostalCode(Base):
__tablename__ = 'postal_code'
id = Column(Integer, primary_key=True)
code = Column(String(10), nullable=False)
city_id = Column(Integer, ForeignKey('city.id'), nullable=False)
city = relationship(City)
@property
def country(self):
return self.city.country
def __init__(self, code, city):
self.code = code
self.city = city
class Address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
person_id = Column(Integer, ForeignKey('person.id'), nullable=False)
street = Column(String(200), nullable=False)
postal_code_id = Column(Integer, ForeignKey('postal_code.id'))
postal_code = relationship(PostalCode)
@property
def city(self):
return self.postal_code.city
@property
def country(self):
return self.postal_code.country
def __str__(self):
return "%s\t"\
"%s, %s\t"\
"%s" % (self.street, self.city.name,
self.postal_code.code, self.country.name)
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
addresses = relationship(Address, collection_class=set)
def __init__(self, name, *addresses):
self.name = name
self.addresses = set(addresses)
def __str__(self):
return self.name
def __repr__(self):
return "Person(name=%r)" % self.name
def format_full(self):
return "\t".join([str(x) for x in [self] + list(self.addresses)])
|
# Caching options. A set of three RelationshipCache options
# which can be applied to Query(), causing the "l
|
azy load"
# of these attributes to be loaded from cache.
cache_address_bits = RelationshipCache("default", "byid", PostalCode.city).\
and_(
RelationshipCache("default", "byid", City.country)
).and_(
RelationshipCache("default", "byid", Address.postal_code)
)
|
kmacinnis/sympy
|
sympy/core/numbers.py
|
Python
|
bsd-3-clause
| 81,489 | 0.000577 |
from __future__ import print_function, division
import decimal
import math
import re as regex
import sys
from collections import defaultdict
from .core import C
from .sympify import converter, sympify, _sympify, SympifyError
from .singleton import S, Singleton
from .expr import Expr, AtomicExpr
from .decorators import _sympifyit, deprecated
from .cache import cacheit, clear_cache
from sympy.core.compatibility import (
as_int, integer_types, long, string_types, with_metaclass, HAS_GMPY,
SYMPY_INTS)
import sympy.mpmath as mpmath
import sympy.mpmath.libmp as mlib
from sympy.mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed
from sympy.mpmath.ctx_mp import mpnumeric
from sympy.mpmath.libmp.libmpf import (
finf as _mpf_inf, fninf as _mpf_ninf,
fnan as _mpf_nan, fzero as _mpf_zero, _normalize as mpf_normalize,
prec_to_dps)
rnd = mlib.round_nearest
_LOG2 = math.log(2)
def mpf_norm(mpf, prec):
"""Return the mpf tuple normalized appropriately for the indicated
precision after doing a check to see if zero should be returned or
not when the mantissa is 0. ``mpf_normlize`` always assumes that this
is zero, but it may not be since the mantissa for mpf's values "+inf",
"-inf" and "nan" have a mantissa of zero, too.
Note: this is not intended to validate a given mpf tuple, so sending
mpf tuples that were not created by mpmath may produce bad results. This
is only a wrapper to ``mpf_normalize`` which provides the check for non-
zero mpfs that have a 0 for the mantissa.
"""
sign, man, expt, bc = mpf
if not man:
# hack for mpf_normalize which does not do this;
# it assumes that if man is zero the result is 0
# (see issue 3540)
if not bc
|
:
return _mpf_zero
else:
# don't change anything; this should already
# be a well formed mpf tuple
return mpf
rv = mpf_normalize(sign, man, expt, bc, prec, rnd)
return rv
# TODO: we should use the warnings module
_errdict = {"divide": False}
def seterr(divide=False):
"""
Should sympy raise an exception on 0/0 or return a nan?
divide == True .... raise an exception
divide == False ... return nan
"""
if _errdict
|
["divide"] != divide:
clear_cache()
_errdict["divide"] = divide
def _as_integer_ratio(p):
"""compatibility implementation for python < 2.6"""
neg_pow, man, expt, bc = getattr(p, '_mpf_', mpmath.mpf(p)._mpf_)
p = [1, -1][neg_pow % 2]*man
if expt < 0:
q = 2**-expt
else:
q = 1
p *= 2**expt
return int(p), int(q)
def _decimal_to_Rational_prec(dec):
"""Convert an ordinary decimal instance to a Rational."""
if not dec.is_finite(): # NOTE: this is_finite is not SymPy's
raise TypeError("dec must be finite, got %s." % dec)
s, d, e = dec.as_tuple()
prec = len(d)
if e >= 0: # it's an integer
rv = Integer(int(dec))
else:
s = (-1)**s
d = sum([di*10**i for i, di in enumerate(reversed(d))])
rv = Rational(s*d, 10**-e)
return rv, prec
def _literal_float(f):
"""Return True if n can be interpreted as a floating point number."""
pat = r"[-+]?((\d*\.\d+)|(\d+\.?))(eE[-+]?\d+)?"
return bool(regex.match(pat, f))
# (a,b) -> gcd(a,b)
_gcdcache = {}
# TODO caching with decorator, but not to degrade performance
def igcd(a, b):
"""Computes positive, integer greatest common divisor of two numbers.
The algorithm is based on the well known Euclid's algorithm. To
improve speed, igcd() has its own caching mechanism implemented.
"""
try:
return _gcdcache[(a, b)]
except KeyError:
a, b = as_int(a), as_int(b)
if a and b:
if b < 0:
b = -b
while b:
a, b = b, a % b
else:
a = abs(a or b)
_gcdcache[(a, b)] = a
return a
def ilcm(a, b):
"""Computes integer least common multiple of two numbers. """
if a == 0 and b == 0:
return 0
else:
return a*b // igcd(a, b)
def igcdex(a, b):
"""Returns x, y, g such that g = x*a + y*b = gcd(a, b).
>>> from sympy.core.numbers import igcdex
>>> igcdex(2, 3)
(-1, 1, 1)
>>> igcdex(10, 12)
(-1, 1, 2)
>>> x, y, g = igcdex(100, 2004)
>>> x, y, g
(-20, 1, 4)
>>> x*100 + y*2004
4
"""
if (not a) and (not b):
return (0, 1, 0)
if not a:
return (0, b//abs(b), abs(b))
if not b:
return (a//abs(a), 0, abs(a))
if a < 0:
a, x_sign = -a, -1
else:
x_sign = 1
if b < 0:
b, y_sign = -b, -1
else:
y_sign = 1
x, y, r, s = 1, 0, 0, 1
while b:
(c, q) = (a % b, a // b)
(a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s)
return (x*x_sign, y*y_sign, a)
class Number(AtomicExpr):
"""
Represents any kind of number in sympy.
Floating point numbers are represented by the Float class.
Integer numbers (of any size), together with rational numbers (again,
there is no limit on their size) are represented by the Rational class.
If you want to represent, for example, ``1+sqrt(2)``, then you need to do::
Rational(1) + sqrt(Rational(2))
"""
is_commutative = True
is_number = True
__slots__ = []
# Used to make max(x._prec, y._prec) return x._prec when only x is a float
_prec = -1
is_Number = True
def __new__(cls, *obj):
if len(obj) == 1:
obj = obj[0]
if isinstance(obj, Number):
return obj
if isinstance(obj, SYMPY_INTS):
return Integer(obj)
if isinstance(obj, tuple) and len(obj) == 2:
return Rational(*obj)
if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)):
return Float(obj)
if isinstance(obj, string_types):
val = sympify(obj)
if isinstance(val, Number):
return val
else:
raise ValueError('String "%s" does not denote a Number' % obj)
if isinstance(obj, Number):
return obj
msg = "expected str|int|long|float|Decimal|Number object but got %r"
raise TypeError(msg % type(obj).__name__)
def __divmod__(self, other):
from .containers import Tuple
from sympy.functions.elementary.complexes import sign
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(self).__name__, type(other).__name__))
if not other:
raise ZeroDivisionError('modulo by zero')
if self.is_Integer and other.is_Integer:
return Tuple(*divmod(self.p, other.p))
else:
rat = self/other
w = sign(rat)*int(abs(rat)) # = rat.floor()
r = self - other*w
#w*other + r == self
return Tuple(w, r)
def __rdivmod__(self, other):
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(other).__name__, type(self).__name__))
return divmod(other, self)
def __round__(self, *args):
return round(float(self), *args)
def _as_mpf_val(self, prec):
"""Evaluation of mpf tuple accurate to at least prec bits."""
raise NotImplementedError('%s needs ._as_mpf_val() method' %
(self.__class__.__name__))
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def _as_mpf_op(self, prec):
prec = max(prec, self._prec)
return self._as_mpf_val(prec), prec
def __float__(self):
return mlib.to_float(self._as_mpf_val(53))
def _eval_conjugate(self):
return self
def _eval_order(self, *symbols):
# Order(5, x, y) -> Order(1,x,y)
return C.Order(S.One, *symbols)
def _eval_subs(self, old, new):
if
|
our-iot-project-org/pingow-web-service
|
src/posts/views.py
|
Python
|
mit
| 5,217 | 0.000767 |
from django.core import serializers
from rest_framework.response import Response
from django.http import JsonResponse
try:
from urllib import quote_plus # python 2
except:
pass
try:
from urllib.parse import quote_plus # python 3
except:
pass
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from comments.forms import CommentForm
from comments.models import Comment
from .forms import PostForm
from .models import Post
def post_create(request):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# message success
messages.success(request, "Successfully Created")
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
}
return render(request, "post_form.html", context)
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
if instance.publish > timezone.now().date() or instance.draft:
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
share_string = quote_plus(instance.content)
initial_data = {
"content_type": instance.get_content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid() and request.user.is_authenticated():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_d
|
ata,
parent=parent_obj,
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
comments = instance.comments
context = {
"title": instance.title,
"instance": instance,
"share_string": share_string,
"comments": comments,
"comment_form": form,
}
return render(request,
|
"post_detail.html", context)
def post_list(request):
today = timezone.now().date()
queryset_list = Post.objects.active() # .order_by("-timestamp")
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.all()
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query) |
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
paginator = Paginator(queryset_list, 8) # Show 25 contacts per page
page_request_var = "page"
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"object_list": queryset,
"title": "List",
"page_request_var": page_request_var,
"today": today,
}
return render(request, "post_list.html", context)
def post_update(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None,
request.FILES or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item</a> Saved",
extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form": form,
}
return render(request, "post_form.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
instance.delete()
messages.success(request, "Successfully deleted")
return redirect("posts:list")
|
buzztroll/staccato
|
staccato/cmd/api.py
|
Python
|
apache-2.0
| 899 | 0.001112 |
import eventlet
import gettext
import sys
from staccato.common import config
import staccato.openstack.common.wsgi as os_wsgi
import staccato.openstack.common.pastedeploy as os_pastedeploy
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
gettext.install('staccato', unicode=1)
def fail(returncode, e):
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(returncode)
def main():
try:
conf = config.get_config_object()
paste_file = conf.find_file(conf.paste_deploy.config_file)
wsgi_app = os_pastedeploy.paste_deploy_app(paste_file,
'staccato-api',
|
conf)
server = os_
|
wsgi.Service(wsgi_app, conf.bind_port)
server.start()
server.wait()
except RuntimeError as e:
fail(1, e)
main()
|
verejnedigital/verejne.digital
|
data/prod_generation/entity_tools.py
|
Python
|
apache-2.0
| 3,557 | 0.000562 |
"""Utility methods for handling Entities.
These methods can be shared between entity generation (invoked through
the Entities class) at the start of prod data generation, and between
post processing methods (such as adding edges between family members
and neighbours).
"""
import codecs
import collections
import re
def get_surnames():
"""Retrieves a set of surnames from a provided data file."""
path_surnames = 'prod_generation/surnames.txt'
with codecs.open(path_surnames, 'r') as f:
return set(line.strip().lower() for line in f.readlines())
def get_academic_titles_parser():
"""Returns a regular expression for parsing academic titles."""
# Read list of academic titles from the data file.
path_titles = 'prod_generation/academic_titles.txt'
with codecs.open(path_titles, 'r') as f:
titles = set(line.strip() for line in f.readlines())
# Compile the regular expression.
re_titles = "|".join(titles)
re_name = ("^(?P<titles_pre>((%s)\.?( |,))*)"
"(?P<name_clean>.*?)"
"(?P<titles_suffix>(( |,)*(%s)\.?)*)$" % (
re_titles, re_titles))
return re.compile(re_name)
# NamedTuple for parsed entity names:
# - `titles_pre` is a string of academic titles detected before name
# - `firstnames` is a non-empty list of given names
# - `surname` is a string
# - `titles_suf` is a string of academic titles detected after name
ParsedName = collections.namedtuple(
"ParsedName",
["titles_prefix", "firstnames", "surname", "titles_suffix"]
)
def parse_entity_name(entity_name, titles_parser, surnames,
verbose=False):
"""Parses an entity name into a ParsedName, or returns None."""
if verbose:
print('entity_name = |%s|' % (entity_name))
# Remove newlines from `entity_name`:
entity_name = entity_name.replace("\n", " ")
# Trim name of Zivnost, followed by first occurrence of (' - ').
p = entity_name.find(' - ')
if (p > 0):
name = entity_name[:p]
else:
name = entity_name
if verbose:
print('name = |%s|' % (name))
# Trim academic titles from the start and end of the name.
match = titles_parser.match(name).groupdict()
titles_pre = match['titles_pre'] if 'titles_pre' in match else ''
titles_suf = match['titles_suf'] if 'titles_suf' in match else ''
name_clean = match['name_clean']
if verbose:
print('name_clean = |%s|' % (name_clean))
# Split cleaned name on spaces (it should now be a list of
# firstnames, followed by a surname).
names = name_clean.split()
# Lowercase the names, so that we get case-insensitive matching on
# both surnames and firstnames downstream.
names = [name.lower() for name in names]
# Strict matching: Check that last name is a surname
# if len(names) >= 2 and names[-1] in surnames:
# return {
# 'titles_pre': titles_pre,
# 'firstnames': names[:-1],
# 'surname': names[-1]
|
,
# 'titles_suf': titles_suf,
# }
# Less conservative matching: Find the last token that is a surname,
# and take the rest before it
|
as given names
i = len(names) - 1
while (i >= 1) and (names[i] not in surnames):
i -= 1
if i >= 1:
return ParsedName(
titles_prefix=titles_pre,
firstnames=names[:i],
surname=names[i],
titles_suffix=titles_suf
)
else:
if verbose:
print('Parse failed')
return None
|
leleobhz/scripts
|
python/chat_back_machine/engine/PyDbLite/MySQL.py
|
Python
|
gpl-2.0
| 11,904 | 0.015793 |
"""PyDbLite.py adapted for MySQL backend
Differences with PyDbLite:
- pass the connection to the MySQL db as argument to Base()
- in create(), field definitions must specify a type
- no index
- the Base() instance has a cursor attribute, so that SQL requests
can be executed :
db.cursor.execute(an_sql_request)
result = db.cursor.fetchall()
Fields must be declared
Syntax :
from PyDbLite.MySQL import Base
import MySQLdb
# connect to a MySQL server and use database "test"
connection = MySQLdb.connect("localhost","root","admin")
connection.cursor().execute("USE test")
# pass the connection as argument to Base creation
db = Base('dummy',connection)
# create new base with field names
db.create(('name','INTEGER'),('age',"INTEGER'),('size','REAL'))
# existing base
db.open()
# insert new record
db.insert(name='homer',age=23,size=1.84)
# records are dictionaries with a unique integer key __id__
# selection by list comprehension
res = [ r for r in db if 30 > r['age'] >= 18 and r['size'] < 2 ]
# or generator expression
for r in (r for r in db if r['name'] in ('homer','marge') ):
# simple selection (equality test)
res = db(age=30)
# delete a record or a list of records
db.delete(one_record)
db.delete(list_of_records)
# delete a record by its id
del db[rec_id]
# direct access by id
record = db[rec_id] # the record such that record['__id__'] == rec_id
# update
db.update(record,age=24)
# add and drop fields
db.add_field('new_field')
db.drop_field('name')
# save changes on disk
db.commit()
"""
import os
import cPickle
import bisect
import MySQLdb
# compatibility with Python 2.3
try:
set([])
except NameError:
from sets import Set as set
class Base:
def __init__(self,basename,connection):
"""basename = name of the PyDbLite database = a MySQL table
connection = a connection to a MySQL database"""
self.name = basename
self.conn = connection
self.cursor = connection.cursor()
self._iterating = False
def create(self,*fields,**kw):
"""Create a new base with specified field names
A keyword argument mode can be specified ; it is used if a file
with the base name already exists
- if mode = 'open' : open the existing base, ignore the fields
- if mode = 'override' : erase the existing base and create a
new one with the specified fields"""
self.mode = mode = kw.get("mode",None)
if self._table_exists():
if mode == "override":
self.cursor.execute("DROP TABLE %s" %self.na
|
me)
elif mode == "open":
return self.open()
else:
raise IOError,"Base %s already exists" %self.name
self.fields = [ f[0] for f in fields ]
self.all_fields = ["__id__","__version__"]+self.fields
_types = ["INTEGER PRIMARY KEY AUTO_INCREMENT","INTEGER"] + \
[f[1] for f in fields]
f_string = [ "%s %s" %(f,t) for (f,t) in
|
zip(self.all_fields,_types)]
sql = "CREATE TABLE %s (%s)" %(self.name,
",".join(f_string))
self.cursor.execute(sql)
return self
def open(self):
"""Open an existing database"""
if self._table_exists():
self.mode = "open"
self._get_table_info()
return self
# table not found
raise IOError,"Table %s doesn't exist" %self.name
def _table_exists(self):
"""Database-specific method to see if the table exists"""
self.cursor.execute("SHOW TABLES")
for table in self.cursor.fetchall():
if table[0].lower() == self.name.lower():
return True
return False
def _get_table_info(self):
"""Database-specific method to get field names"""
self.cursor.execute('DESCRIBE %s' %self.name)
self.all_fields = [ f[0] for f in self.cursor.fetchall() ]
self.fields = self.all_fields[2:]
def commit(self):
"""No use here ???"""
pass
def insert(self,*args,**kw):
"""Insert a record in the database
Parameters can be positional or keyword arguments. If positional
they must be in the same order as in the create() method
If some of the fields are missing the value is set to None
Returns the record identifier
"""
if args:
kw = dict([(f,arg) for f,arg in zip(self.all_fields[2:],args)])
kw["__version__"] = 0
vals = self._make_sql_params(kw)
sql = "INSERT INTO %s SET %s" %(self.name,",".join(vals))
res = self.cursor.execute(sql)
self.cursor.execute("SELECT LAST_INSERT_ID()")
__id__ = self.cursor.fetchone()[0]
return __id__
def delete(self,removed):
"""Remove a single record, or the records in an iterable
Before starting deletion, test if all records are in the base
and don't have twice the same __id__
Return the number of deleted items
"""
if isinstance(removed,dict):
# remove a single record
removed = [removed]
else:
# convert iterable into a list (to be able to sort it)
removed = [ r for r in removed ]
if not removed:
return 0
_ids = [ r['__id__'] for r in removed ]
_ids.sort()
sql = "DELETE FROM %s WHERE __id__ IN (%s)" %(self.name,
",".join([str(_id) for _id in _ids]))
self.cursor.execute(sql)
return len(removed)
def update(self,record,**kw):
"""Update the record with new keys and values"""
# increment version number
kw["__version__"] = record["__version__"] + 1
vals = self._make_sql_params(kw)
sql = "UPDATE %s SET %s WHERE __id__=%s" %(self.name,
",".join(vals),record["__id__"])
self.cursor.execute(sql)
def _make_sql_params(self,kw):
"""Make a list of strings to pass to an SQL statement
from the dictionary kw with Python types"""
vals = []
for k,v in kw.iteritems():
vals.append('%s=%s' %(k,self._conv(v)))
return vals
def _conv(self,v):
if isinstance(v,str):
v = v.replace('"','""')
return '"%s"' %v
elif isinstance(v,datetime.date):
return v.strftime("%Y%m%d")
else:
return v
def _make_record(self,row):
"""Make a record dictionary from the result of a fetch_"""
return dict(zip(self.all_fields,row))
def add_field(self,field,default=None):
fname,ftype = field
if fname in self.all_fields:
raise ValueError,'Field "%s" already defined' %fname
sql = "ALTER TABLE %s ADD %s %s" %(self.name,fname,ftype)
if default is not None:
sql += " DEFAULT %s" %self._conv(default)
self.cursor.execute(sql)
self.commit()
self._get_table_info()
def drop_field(self,field):
if field in ["__id__","__version__"]:
raise ValueError,"Can't delete field %s" %field
if not field in self.fields:
raise ValueError,"Field %s not found in base" %field
sql = "ALTER TABLE %s DROP %s" %(self.name,field)
self.cursor.execute(sql)
self._get_table_info()
def __call__(self,**kw):
"""Selection by field values
db(key=value) returns the list of records where r[key] = value"""
for key in kw:
if not key in self.all_fields:
raise ValueError,"Field %s not in the database" %key
vals = self._make_sql_params(kw)
sql = "SELECT * FROM %s WHERE %s" %(self.name,",".join(vals))
self.cursor.execute(sql)
return [self._make_record(row) for row in self.cursor.fetchall(
|
Accelerite/cinder
|
cinder/tests/targets/test_tgt_driver.py
|
Python
|
apache-2.0
| 11,627 | 0.000172 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import mock
from oslo_concurrency import processutils as putils
from oslo_utils import timeutils
from cinder import context
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.targets import tgt
from cinder.volume import utils as vutils
class TestTgtAdmDriver(test.TestCase):
def setUp(self):
super(TestTgtAdmDriver, self).setUp()
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.iscsi_ip_address = '10.9.8.7'
self.fake_volumes_dir = tempfile.mkdtemp()
self.fake_id_1 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
self.fake_id_2 = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba'
self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
self.testvol_1 =\
{'project_id': self.fake_id_1,
'name': 'testvol',
'size': 1,
'id': self.fake_id_2,
'volume_type_id': None,
'provider_location': '10.9.8.7:3260 '
'iqn.2010-10.org.openstack:'
'volume-%s 0' % self.fake_id_2,
'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
'c76370d66b 2FE0CQ8J196R',
'provider_geometry': '512 512',
'created_at': timeutils.utcnow(),
'host': 'fake_host@lvm#lvm'}
self.expected_iscsi_properties = \
{'auth_method': 'CHAP',
'auth_password': '2FE0CQ8J196R',
'auth_username': 'stack-1-a60e2611875f40199931f2c76370d66b',
'encrypted': False,
'logical_block_size': '512',
'physical_block_size': '512',
'target_discovered': False,
'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' %
self.fake_id_2,
'target_lun': 0,
'target_portal': '10.9.8.7:3260',
'volume_id': self.fake_id_2}
self.fake_iscsi_scan =\
('Target 1: iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' System information:\n'
' Driver: iscsi\n'
' State: ready\n'
' I_T nexus information:\n'
' LUN information:\n'
' LUN: 0\n'
' Type: controller\n'
' SCSI ID: IET 00010000\n'
' SCSI SN: beaf10\n'
' Size: 0 MB, Block size: 1\n'
' Online: Yes\n'
' Removable media: No\n'
' Prevent removal: No\n'
' Readonly: No\n'
' SWP: No\n'
' Thin-provisioning: No\n'
' Backing store type: null\n'
' Backing store path: None\n'
' Backing store flags:\n'
' LUN: 1\n'
' Type: disk\n'
' SCSI ID: IET 00010001\n'
' SCSI SN: beaf11\n'
' Size: 1074 MB, Block size: 512\n'
' Online: Yes\n'
' Removable media: No\n'
|
' Prevent removal: No\n'
' Readonly: No\n'
' SWP: No\n'
|
' Thin-provisioning: No\n'
' Backing store type: rdwr\n'
' Backing store path: /dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45\n' # noqa
' Backing store flags:\n'
' Account information:\n'
' mDVpzk8cZesdahJC9h73\n'
' ACL information:\n'
' ALL"\n')
def fake_safe_get(self, value):
if value == 'volumes_dir':
return self.fake_volumes_dir
elif value == 'iscsi_protocol':
return self.configuration.iscsi_protocol
def test_iscsi_protocol(self):
self.assertEqual(self.target.iscsi_protocol, 'iscsi')
def test_get_target(self):
def _fake_execute(*args, **kwargs):
return self.fake_iscsi_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute)
self.assertEqual('1',
self.target._get_target('iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45'))
def test_verify_backing_lun(self):
def _fake_execute(*args, **kwargs):
return self.fake_iscsi_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute)
self.assertTrue(self.target._verify_backing_lun(
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45', '1'))
# Test the failure case
bad_scan = self.fake_iscsi_scan.replace('LUN: 1', 'LUN: 3')
def _fake_execute_bad_lun(*args, **kwargs):
return bad_scan, None
self.stubs.Set(utils,
'execute',
_fake_execute_bad_lun)
self.assertFalse(self.target._verify_backing_lun(
'iqn.2010-10.org.openstack:'
'volume-83c2e877-feed-46be-'
'8435-77884fe55b45', '1'))
def test_get_target_chap_auth(self):
persist_file =\
'<target iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45>\n'\
' backing-store /dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45\n'\
' driver iscsi\n'\
' incominguser otzLy2UYbYfnP4zXLG5z 234Zweo38VGBBvrpK9nt\n'\
' write-cache on\n'\
'</target>'
test_vol =\
'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
with open(os.path.join(self.fake_volumes_dir,
test_vol.split(':')[1]),
'wb') as tmp_file:
tmp_file.write(persist_file)
expected = ('otzLy2UYbYfnP4zXLG5z', '234Zweo38VGBBvrpK9nt')
self.assertEqual(expected, self.target._get_target_chap_auth(test_vol))
def test_create_iscsi_target(self):
def _fake_execute(*args, **kwargs):
return '', ''
self.stubs.Set(utils,
'execute',
_fake_execute)
self.stubs.Set(self.target,
'_get_target',
lambda x: 1)
self.stubs.Set(self.target,
'_verify_backing_lun',
lambda x, y: True)
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
self.assertEqual(
1,
self.target.create_iscsi_target(
test_vol,
1,
0,
self.fake_volumes_dir))
def test_create_iscsi_target_already_exists(self):
def _fake_execute(*args, **kwargs):
if 'update' in args:
|
michellemorales/OpenMM
|
models/lfads/utils.py
|
Python
|
gpl-2.0
| 12,183 | 0.010671 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import os
import h5py
import json
import numpy as np
import tensorflow as tf
def log_sum_exp(x_k):
"""Computes log \sum exp in a numerically stable way.
log ( sum_i exp(x_i) )
log ( sum_i exp(x_i - m + m) ), with m = max(x_i)
log ( sum_i exp(x_i - m)*exp(m) )
log ( sum_i exp(x_i - m) + m
Args:
x_k - k -dimensional list of arguments to log_sum_exp.
Returns:
log_sum_exp of the arguments.
"""
m = tf.reduce_max(x_k)
x1_k = x_k - m
u_k = tf.exp(x1_k)
z = tf.reduce_sum(u_k)
return tf.log(z) + m
def linear(x, out_size, do_bias=True, alpha=1.0, identity_if_possible=False,
normalized=False, name=None, collections=None):
"""Linear (affine) transformation, y = x W + b, for a variety of
configurations.
Args:
x: input The tensor to tranformation.
out_size: The integer size of non-batch output dimension.
do_bias (optional): Add a learnable bias vector to the operation.
alpha (optional): A multiplicative scaling for the weight initialization
of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}.
identity_if_possible (optional): just return identity,
if x.shape[1] == out_size.
normalized (optional): Option to divide out by the norms of the rows of W.
name (optional): The name prefix to add to variables.
collections (optional): List of additional collections. (Placed in
tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.)
Returns:
In the equation, y = x W + b, returns the tensorflow op that yields y.
"""
in_size = int(x.get_shape()[1]) # from Dimension(10) -> 10
stddev = alpha/np.sqrt(float(in_size))
mat_init = tf.random_normal_initializer(0.0, stddev)
wname = (name + "/W") if name else "/W"
if identity_if_possible and in_size == out_size:
# Sometimes linear layers are nothing more than size adapters.
return tf.identity(x, name=(wname+'_ident'))
W,b = init_linear(in_size, out_size, do_bias=do_bias, alpha=alpha,
normalized=normalized, name=name, collections=collections)
if do_bias:
return tf.matmul(x, W) + b
else:
return tf.matmul(x, W)
def init_linear(in_size, out_size, do_bias=True, mat_init_value=None,
bias_init_value=None, alpha=1.0, identity_if_possible=False,
normalized=False, name=None, collections=None):
"""Linear (affine) transformation, y = x W + b, for a variety of
configurations.
Args:
in_size: The integer size of the non-batc input dimension. [(x),y]
out_size: The integer size of non-batch output dimension. [x,(y)]
do_bias (optional): Add a learnable bias vector to the operation.
mat_init_value (optional): numpy constant for matrix initialization, if None
, do random, with additional parameters.
alpha (optional): A multiplicative scaling for the weight initialization
of the matrix, in the form \alpha * 1/\sqrt{x.shape[1]}.
identity_if_possible (optional): just return identity,
if x.shape[1] == out_size.
normalized (optional): Option to divide out by the norms of the rows of W.
name (optional): The name prefix to add to variables.
collections (optional): List of additional collections. (Placed in
tf.GraphKeys.GLOBAL_VARIABLES already, so no need for that.)
Returns:
In the equation, y = x W + b, returns the pair (W, b).
"""
if mat_init_value is not None and mat_init_value.shape != (in_size, out_size):
raise ValueError(
'Provided mat_init_value must have shape [%d, %d].'%(in_size, out_size))
if bias_init_value is not None and bias_init_value.shape != (1,out_size):
raise ValueError(
'Provided bias_init_value must have shape [1,%d].'%(out_size,))
if mat_init_value is None:
stddev = alpha/np.sqrt(float(in_size))
mat_init = tf.random_normal_initializer(0.0, stddev)
wname = (name + "/W") if name else "/W"
if identity_if_possible and in_size == out_size:
return (tf.constant(np.eye(in_size).astype(np.float32)),
tf.zeros(in_size))
# Note the use of get_variable vs. tf.Variable. this is because get_variable
# does not allow the initialization of the variable with a value.
if normalized:
w_collections = [tf.GraphKeys.GLOBAL_VARIABLES, "norm-variables"]
if collections:
w_collections += collections
if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections)
else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections)
w = tf.nn.l2_normalize(w, dim=0) # x W, so xW_j = \sum_i x_bi W_ij
else:
w_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if collections:
w_collections += collections
if mat_init_value is not None:
w = tf.Variable(mat_init_value, name=wname, collections=w_collections)
else:
w = tf.get_variable(wname, [in_size, out_size], initializer=mat_init,
collections=w_collections)
b = None
if do_bias:
b_collections = [tf.GraphKeys.GLOBAL_VARIABLES]
if collections:
b_collections += collections
bname = (name + "/b") if name else "/b"
if bias_init_value is None:
b = tf.get_variable(bname, [1, out_size],
initializer=tf.zeros_initializer(),
collections=b_collections)
else:
b = tf.Variable(bias_init_value, name=bname,
collections=b_collections)
return (w, b)
def write_data(data_fname, data_dict, use_json=False, compression=None):
"""Write data in HD5F format.
Args:
data_fname: The filename of teh file in which to write the data.
data_dict: The dictionary of data to write. The keys are strings
and the values are numpy arrays.
use_json (optional): human readable format for simple items
compression (optional): The compression to use for h5py (disabled by
default because the library borks on scalars, otherwise try 'gzip').
"""
dir_name = os.path.dirname(data_fname)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
if use_json:
the_file = open(data_fname,'w')
json.dump(data_dict, the_file)
the_file.close()
else:
try:
with h5py.File(data_fname, 'w') as hf:
for k, v in data_dict.items():
clean_k = k.replace('/', '_')
if clean_k is not k:
print('Warning: saving variable with name: ', k, ' as ', clean_k)
else:
print('Saving variable with name: ', clean_k)
hf.create_dataset(clean_k, data=v, compression=compression)
except IOError:
print("Cannot open %s for writing.", data_fname)
raise
def read_data(data_fname):
""" Read saved data in
|
HDF5 format.
Args:
data_fname: The filename of the file from which to read the data.
Returns:
A diction
|
ary whose keys will vary depending on dataset (but should
always contain the keys 'train_data' and 'valid_data') and whose
values are numpy arrays.
"""
try:
with h5py.File(data_fname, 'r') as hf:
data_dict = {k: np.array(v) for k, v in hf.items()}
return data_dict
except IOError:
print("Cannot open %s for reading." % data_fname)
raise
def write_datasets(data_path, data_fname_stem, dataset_dict, compression=None):
"""Write datasets in HD5F format.
This function assumes the dataset_dict is a mapping ( string
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_PolyTrend/cycle_0/ar_12/test_artificial_1024_Anscombe_PolyTrend_0_12_20.py
|
Python
|
bsd-3-clause
| 265 | 0.086792 |
import pyaf.Bench.TS_datasets as tsds
import tests.artifici
|
al.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length
|
= 0, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12);
|
kaushik94/sympy
|
sympy/external/tests/test_importtools.py
|
Python
|
bsd-3-clause
| 1,405 | 0.004982 |
from sympy.external import import_module
from sympy.utilities.pytest import warns
# fixes issue that arose in addressing issue 6533
def test_no_stdlib_collections():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.collections
def test_no_stdlib_collections2():
'''
make sure we get the right collections when it is not part of a
larger list
'''
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
assert collections != matplotlib.co
|
llections
def test_no_stdlib_collections3():
'''make sure we get the right collections with no catch'''
|
import collections
matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['cm', 'collections']},
min_module_version='1.1.0')
if matplotlib:
assert collections != matplotlib.collections
def test_min_module_version_python3_basestring_error():
with warns(UserWarning):
import_module('mpmath', min_module_version='1000.0.1')
|
kinow-io/kinow-python-sdk
|
kinow_client/models/prepayment_bonus_response.py
|
Python
|
apache-2.0
| 7,163 | 0.000419 |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PrepaymentBonusResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, id_product=None, id_product_attribute=None, amount=None, type=None, date_add=None, date_upd=None):
"""
PrepaymentBonusResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'id_product': 'int',
'id_product_attribute': 'int',
'amount': 'float',
'type': 'str',
'date_add': 'str',
'date_upd': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'id_product': 'id_product',
'id_product_attribute': 'id_product_attribute',
'amount': 'amount',
'type': 'type',
'date_add': 'date_add',
'date_upd': 'date_upd'
}
self._id = id
self._name = name
self._id_product = id_product
self._id_product_attribute = id_product_attribute
self._amount = amount
self._type = type
self._date_add = date_add
self._date_upd = date_upd
@property
def id(self):
"""
Gets the id of this PrepaymentBonusResponse.
:return: The id of this PrepaymentBonusResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this PrepaymentBonusResponse.
:param id: The id of this Pre
|
paymentBonusResponse.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this PrepaymentBonusResponse.
:return: The name of this PrepaymentBonusResponse.
:rtype: str
"""
|
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this PrepaymentBonusResponse.
:param name: The name of this PrepaymentBonusResponse.
:type: str
"""
self._name = name
@property
def id_product(self):
"""
Gets the id_product of this PrepaymentBonusResponse.
:return: The id_product of this PrepaymentBonusResponse.
:rtype: int
"""
return self._id_product
@id_product.setter
def id_product(self, id_product):
"""
Sets the id_product of this PrepaymentBonusResponse.
:param id_product: The id_product of this PrepaymentBonusResponse.
:type: int
"""
self._id_product = id_product
@property
def id_product_attribute(self):
"""
Gets the id_product_attribute of this PrepaymentBonusResponse.
:return: The id_product_attribute of this PrepaymentBonusResponse.
:rtype: int
"""
return self._id_product_attribute
@id_product_attribute.setter
def id_product_attribute(self, id_product_attribute):
"""
Sets the id_product_attribute of this PrepaymentBonusResponse.
:param id_product_attribute: The id_product_attribute of this PrepaymentBonusResponse.
:type: int
"""
self._id_product_attribute = id_product_attribute
@property
def amount(self):
"""
Gets the amount of this PrepaymentBonusResponse.
:return: The amount of this PrepaymentBonusResponse.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this PrepaymentBonusResponse.
:param amount: The amount of this PrepaymentBonusResponse.
:type: float
"""
self._amount = amount
@property
def type(self):
"""
Gets the type of this PrepaymentBonusResponse.
:return: The type of this PrepaymentBonusResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this PrepaymentBonusResponse.
:param type: The type of this PrepaymentBonusResponse.
:type: str
"""
self._type = type
@property
def date_add(self):
"""
Gets the date_add of this PrepaymentBonusResponse.
:return: The date_add of this PrepaymentBonusResponse.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this PrepaymentBonusResponse.
:param date_add: The date_add of this PrepaymentBonusResponse.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this PrepaymentBonusResponse.
:return: The date_upd of this PrepaymentBonusResponse.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this PrepaymentBonusResponse.
:param date_upd: The date_upd of this PrepaymentBonusResponse.
:type: str
"""
self._date_upd = date_upd
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
viswimmer1/PythonGenerator
|
data/python_files/31890725/gamestate.py
|
Python
|
gpl-2.0
| 5,483 | 0.001094 |
import pygame
from pygame.locals import *
import random
import itertools
import state
import block
import tetros
import states
from text import Text
from colors import Colors
from engine import Engine
from playfield import Playfield
from countdown import Countdown
class GameState(state.State):
tetro_classes = (tetros.Leftsnake, tetros.Rightsnake, tetros.Stick,
tetros.Square, tetros.Tee, tetros.Leftgun,
tetros.Rightgun)
tetro_colors = (Colors.ORANGE, Colors.RED, Colors.BLUE, Colors.YELLOW)
def __init__(self):
super(GameState, self).__init__()
self.falling_tetro = None
# nrows should be 22
self.playfield = Playfield(10, 15)
self.playfield.rect.centerx = Engine.screenrect.centerx
self.playfield.rect.bottom = Engine.screenrect.bottom - block.SIZE
self.members.append(self.playfield)
#
# self.kill()
# # start a countdown, and revive ourself when done
# self.intro = Countdown(3000, 256, self.revive)
# self.intro.rect.center = Engine.screenrect.center
# self.members.append(self.intro)
def update(self):
# escape back to main menu
if Engine.is_just_pressed(K_ESCAPE):
Engine.switch(states.MainMenuState())
if not self.alive:
super(GameState, self).update()
return
# update falling tetro
# X movements
if self.falling_tetro is not None:
dx = 0
#
if Engine.pressed(K_LEFT):
dx = -block.SIZE
if Engine.pressed(K_RIGHT):
dx = block.SIZE
#
if dx != 0:
self.falling_tetro.move(dx, 0)
# move it back if any of it's block are now outside the
# playfield
for tblock in self.falling_tetro.members:
if (tblock.rect.x < self.playfield.rect.x
or tblock.rect.right > self.playfield.rect.right):
self.falling_tetro.move(-dx, 0)
break
else:
# not colliding with "walls" check against well blocks
well_blocks = self.playfield.get_well_blocks()
for tblock, wblock in itertools.product(
self.falling_tetro.members, well_blocks):
if tblock.rect.colliderect(wblock.rect):
# move it back and land
self.falling_tetro.move(-dx, 0)
break
else:
self.falling_tetro.col += 1 if dx > 0 else -1
# Y movements
if (self.falling_tetro is not None and self.falling_tetro.dropping):
self.falling_tetro.drop_delay_counter += Engine.elapsed
if self.falling_tetro.drop_delay_counter > self.falling_tetro.drop_delay:
# move and check for collisions
dy = block.SIZE
self.falling_tetro.move(0, dy)
#
well_blocks = self.playfield.get_well_blocks()
# collision with well bottom
for tblock in self.falling_tetro.members:
if tblock.rect.bottom > self.playfield.rect.bottom:
# move it back and land
self.falling_tetro.move(0, -dy)
if self.falling_tetro.row < 0:
self.kill()
return
self.falling_tetro.land(self.playfield)
self.falling_tetro = None
break
else:
# collision with blocks in the well
for tblock, wblock in itertools.product(
self.falling_tetro.members, well_blocks):
if tblock.rect.colliderect(wblock.rect):
# move it back and land
self.falling_tetro.move(0, -dy)
if self.falling_tetro.row < 0:
self.kill()
return
self.falling_tetro.land(self.playfield)
sel
|
f.falling_tetro = None
break
|
else:
# update row
self.falling_tetro.row += 1
# reset counter
self.falling_tetro.drop_delay_counter = 0
# new tetro if needed
if self.falling_tetro is None:
color = random.choice(self.tetro_colors)
tetro_cls = random.choice(self.tetro_classes)
#
# not giving the startx-y may get the tetromino and playfield out
# of sync because startx-y default to zero
startx = self.playfield.rect.x + block.SIZE * 4
starty = self.playfield.rect.y - block.SIZE * 4
self.falling_tetro = tetro_cls(color,
startx=startx,
starty=starty,
drop_delay=50)
#
self.members.append(self.falling_tetro)
self.falling_tetro.drop()
super(GameState, self).update()
|
Yelp/pyes
|
tests/test_facets.py
|
Python
|
bsd-3-clause
| 6,193 | 0.004198 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from .estestcase import ESTestCase
from pyes.facets import DateHistogramFacet
from pyes.filters import TermFilter, RangeFilter
from pyes.query import FilteredQuery, MatchAllQuery, Search
from pyes.utils import ESRange
import datetime
class FacetSearchTestCase(ESTestCase):
def setUp(self):
super(FacetSearchTestCase, self).setUp()
mapping = {u'parsedtext': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'name': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'title': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector": "with_positions_offsets"},
u'position': {'store': 'yes',
'type': u'integer'},
u'tag': {'store': 'yes',
'type': u'string'},
u'date': {'store': 'yes',
'type': u'date'},
u'uuid': {'boost': 1.0,
'index': 'not_analyzed',
'store': 'yes',
'type': u'string'}}
self.conn.create_index(self.index_name)
self.conn.put_mapping(self.document_type, {'properties': mapping}, self.index_name)
self.conn.index({"name": "Joe Tester",
"parsedtext": "Joe Testere nice guy",
"uuid": "11111",
"position": 1,
"tag": "foo",
"date": datetime.date(2011, 5, 16)},
self.index_name, self.document_type, 1)
self.conn.index({"name": " Bill Baloney",
"parsedtext": "Bill Testere nice guy",
"uuid": "22222",
"position": 2,
"tag": "foo",
"date": datetime.date(2011, 4, 16)},
self.index_name, self.document_type, 2)
self.conn.index({"name": "Bill Clinton",
"parsedtext": "Bill is not nice guy",
"uuid": "33333",
"position": 3,
"tag": "bar",
"date": datetime.date(2011, 4, 28)},
self.index_name, self.document_type, 3)
self.conn.refresh(self.index_name)
def test_terms_facet(self):
q = MatchAllQuery()
q = q.search()
q.facet.add_term_facet('tag')
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 3)
self.assertEquals(resultset.facets.tag.terms, [{u'count': 2, u'term': u'foo'},
{u'count': 1, u'term': u'bar'}])
q2 = MatchAllQuery()
q2 = q2.search()
q2.facet.add_term_facet('tag')
q3 = MatchAllQuery()
q3 = q3.search()
q3.facet.add_term_facet('tag')
self.assertEquals(q2, q3)
q4 = MatchAllQuery()
q4 = q4.search()
q4.facet.add_term_facet('bag')
self.assertNotEquals(q2, q4)
def test_terms_facet_filter(self):
q = MatchAllQuery()
q = FilteredQuery(q, TermFilter('tag', 'foo'))
q = q.search()
q.facet.add_term_facet('tag')
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 2)
self.assertEquals(resultset.facets['tag']['terms'], [{u'count': 2, u'term': u'foo'}])
self.assertEquals(resultset.facets.tag.terms, [{u'count': 2, u'term': u'foo'}])
q2 = MatchAllQuery()
q2 = FilteredQuery(q2, TermFilter('tag', 'foo'))
q2 = q2.search()
q2.facet.add_term_facet('tag')
q3 = MatchAllQuery()
q3 = FilteredQuery(q3, TermFilter('tag', 'foo'))
q3 = q3.search()
q3.facet.add_term_facet('tag')
self.assertEquals(q2, q3)
q4 = MatchAllQuery()
q4 = FilteredQuery(q4, TermFilter('tag', 'foo'))
q4 = q4.search()
q4.facet.add_term_facet('bag')
self.assertNotEquals(q3, q4)
def test_date_facet(self):
q = MatchAllQuery()
q = q.search()
q.facet.facets.append(DateHistogramFacet('date_facet',
field='date',
interval='month'))
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 3)
self.assertEquals(resultset.facets.date_facet.entries, [{u'count': 2, u'time': 1301616000000},
{u'count': 1, u'time': 1304208000000}])
self.assertEquals(datetime.datetime.fromtimestamp(1301616000000 / 1000.).date(),
datetime.date(2011, 04, 01))
self.assertEquals(datetime.datetime.fromtimestamp(1304208000000 / 1000.).date(),
datetime.date(2011, 05, 01))
def test_date_facet_filter(self):
|
q = MatchAllQuery()
q = FilteredQuery(q, RangeFilter(qrange=ESRange('date',
datetime.date(2011, 4, 1),
datetime.date(2011, 5, 1),
include_upper=False)))
q = q.search()
q.facet.facets.append(DateHistogramFacet('date_facet',
field='date',
interval='month'))
|
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEquals(resultset.total, 2)
self.assertEquals(resultset.facets['date_facet']['entries'], [{u'count': 2, u'time': 1301616000000}])
if __name__ == "__main__":
unittest.main()
|
be-cloud-be/horizon-addons
|
server/addons/project/project.py
|
Python
|
agpl-3.0
| 53,981 | 0.005131 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, date
from lxml import etree
import time
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'description': fields.text('Description', translate=True),
'sequence': fields.integer('Sequence'),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'legend_priority': fields.char(
'Priority Management Explanation', translate=True,
help='Explanation text to help users using the star and priority mechanism on stages or issues that are in this stage.'),
'legend_blocked': fields.char(
'Kanban Blocked Explanation', translate=True,
help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.'),
'legend_done': fields.char(
'Kanban Valid Explanation', translate=True,
help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.'),
'legend_normal': fields.char(
'Kanban Ongoing Explanation', translate=True,
help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.'),
'fold': fields.boolean('Folded in Tasks Pipeline',
help='This stage is folded in the kanban view when '
'there are no records in that stage to d
|
isplay.'),
}
def _get_default_project_ids(self, cr, uid, ctx=None):
if ctx is None:
ctx = {}
default_project_id = ctx.get('default_project_id')
return [default_project_id] if default_project_id else None
_defaults = {
'sequence': 1,
'project_ids': _get_def
|
ault_project_ids,
}
_order = 'sequence'
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
_period_number = 5
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
'project.task', self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
analytic_account_to_delete = set()
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise UserError(_('You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
elif proj.alias_id:
alias_ids.append(proj.alias_id.id)
if proj.analytic_account_id and not proj.analytic_account_id.line_ids:
analytic_account_to_delete.add(proj.analytic_account_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
self.pool['account.analytic.account'].unlink(cr, uid, list(analytic_account_to_delete), context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.project'), ('res_id', '=', id)], context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)], context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
res={}
for project in self.browse(cr, uid, ids, context=context):
res[project.id] = len(project.task_ids)
return res
def _task_needaction_count(self, cr, uid, ids, field_name, arg, context=None):
Task = self.pool['project.task']
res = dict.fromkeys(ids, 0)
projects = Task.read_group(cr, uid, [('project_id', 'in', ids), ('message_needaction', '=', True)], ['project_id'], ['project_id'], context=context)
res.update({project['project_id'][0]: int(project['project_id_count']) for project in projects})
return res
def _get_alias_models(self, cr, uid, context=None):
""" Overriden in project_issue to offer more options """
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('portal', _('Customer Project: visible in portal if the customer is a follower')),
('employees', _('All Employees Project: all employees can access')),
('followers', _('Private Project: followers only'))]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'help': _('''<p class="oe_view_nocontent_create">
Documents are attached to the tasks and issues of your project.</p><p>
Send messages or log internal notes with attachments to link
documents to your project.
</p>'''),
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'active': fields.boolean('Active', help="If the active field i
|
psykohack/crowdsource-platform
|
crowdsourcing/models.py
|
Python
|
mit
| 21,348 | 0.001499 |
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from oauth2client.django_orm import FlowField, CredentialsField
from crowdsourcing.utils import get_delimiter
import pandas as pd
import os
class RegistrationModel(models.Model):
user = models.OneToOneField(User)
activation_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class PasswordResetModel(models.Model):
user = models.OneToOneField(User)
reset_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Region(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the region!', })
code = models.CharField(max_length=16, error_messages={'required': 'Please specify the region code!', })
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Country(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the country!', })
code = models.CharField(max_length=8, error_messages={'required': 'Please specify the country code!', })
region = models.ForeignKey(Region)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class City(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the city!', })
country = models.ForeignKey(Country)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class Address(models.Model):
street = models.CharField(max_length=128, error_messages={'required': 'Please specify the street name!', })
country = models.ForeignKey(Country)
city = models.ForeignKey(City)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s, %s, %s' % (self.street, self.city, self.country)
class Role(models.Model):
name = models.CharField(max_length=32, unique=True, error_messages={'required': 'Please specify the role name!',
'unique': 'The role %(value)r already exists. Please provide another name!'})
is_active = models.BooleanField(default=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Language(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the language!'})
iso_code = models.CharField(max_length=8)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserProfile(models.Model):
user = models.OneToOneField(User)
gender_choices = (('M', 'Male'), ('F', 'Female'))
gender = models.CharField(max_length=1, choices=gender_choices)
address = models.ForeignKey(Address, null=True)
birthday = models.DateField(null=True, error_messages={'invalid': "Please enter a correct date format"})
nationality = models.ManyToManyField(Country, through='UserCountry')
verified = models.BooleanField(default=False)
picture = models.BinaryField(null=True)
friends = models.ManyToManyField('self', through='Friendship',
symmetrical=False)
roles = models.ManyToManyField(Role, through='UserRole')
deleted = models.BooleanField(default=False)
languages = models.ManyToManyField(Language, through='UserLanguage')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserCountry(models.Model):
country = models.ForeignKey(Country)
user = models.ForeignKey(UserProfile)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Skill(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the skill name!"})
description = models.CharField(max_length=512, error_messages={'required': "Please enter the skill description!"})
verified = models.BooleanField(default=False)
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Worker(models.Model):
profile = models.OneToOneField(UserProfile)
skills = models.ManyToManyField(Skill, through='WorkerSkill')
deleted = models.BooleanField(default=False)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class WorkerSkill(models.Model):
worker = models.ForeignKey(Worker)
skill = models.ForeignKey(Skill)
level = models.IntegerField(null=True)
verified = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'skill')
class Requester(models.Model):
profile = models.OneToOneField(UserProfile)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class UserRole(models.Model):
user_profile = models.ForeignKey(UserProfile)
role = models.ForeignKey(Role)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Friendship(models.Mod
|
el):
user_source = models.ForeignKey(UserProfile, related_name='user_source')
user_target = models.ForeignKey(UserProfile, related_name='user_target')
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateT
|
imeField(auto_now_add=False, auto_now=True)
class Category(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the category name!"})
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Project(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the project name!"})
start_date = models.DateTimeField(auto_now_add=True, auto_now=False)
end_date = models.DateTimeField(auto_now_add=True, auto_now=False)
owner = models.ForeignKey(Requester, related_name='project_owner')
description = models.CharField(max_length=1024, default='')
collaborators = models.ManyToManyField(Requester, through='ProjectRequester')
keywords = models.TextField(null=True)
save_to_drive = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
categories = models.ManyToManyField(Category, through='ProjectCategory')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ProjectRequester(models.Model):
"""
Tracks the list
|
wizzat/wizzat.py
|
tests/testcase.py
|
Python
|
mit
| 630 | 0.022222 |
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
import wizzat.testutil
import wizzat.pghelper
class DBTestCase(wizzat.testutil.TestCase):
db_info = {
'host' : 'localhost',
'port' : 5432,
'user' : 'wizzat',
'password' : 'wizzat',
'database' : 'wizzatpy_testdb',
'
|
autocommit' : False,
}
db_mgr = wizzat.pghelper.ConnMgr(db_info,
max_ob
|
js = 3,
)
def conn(self, name = 'testconn'):
conn = self.db_mgr.name(name)
conn.autocommit = True
return conn
|
berquist/programming_party
|
eric/project12/davidson.py
|
Python
|
mpl-2.0
| 2,084 | 0.00144 |
"""A block Davidson solver for finding a fixed number of eigenvalues.
Adapted from https://joshuagoings.com/2013/08/23/davidsons-method/
"""
import time
from typing import Tuple
import numpy as np
from tqdm import tqdm
def davidson(A: np.ndarray, k: int, eig: int) -> Tuple[np.ndarray, np.ndarray]:
assert len(A.shape) == 2
assert A.shape[0] == A.shape[1]
n = A.shape[0]
## set up subspace and trial vectors
# set of k unit vectors as guess
t = np.eye(n, k)
# hold guess vectors
V = np.zeros((n, n))
I = np.eye(n)
for m in tqdm(
|
range(k, mmax, k)):
if m <= k:
for j in range(k):
V[:, j] = t[:, j] / np.linalg.norm(t[:, j])
theta_old = 1
elif m > k:
theta_old = theta[:eig]
V, R = np.linalg.qr(V)
T = V[:, : (m + 1)].T @ A @ V[:, : (m + 1)]
THETA, S = np.linalg.eig(T)
|
idx = THETA.argsort()
theta = THETA[idx]
s = S[:, idx]
for j in range(k):
w = (A - theta[j] * I) @ V[:, : (m + 1)] @ s[:, j]
q = w / (theta[j] - A[j, j])
V[:, (m + j + 1)] = q
norm = np.linalg.norm(theta[:eig] - theta_old)
if norm < tol:
break
return theta, V
if __name__ == "__main__":
# dimension of problem
n = 1200
# convergence tolerance
tol = 1e-8
# maximum number of iterations
mmax = n // 2
## set up fake Hamiltonian
sparsity = 1.0e-4
A = np.zeros((n, n))
for i in range(0, n):
A[i, i] = i + 1
A = A + sparsity * np.random.randn(n, n)
A = (A.T + A) / 2
# number of initial guess vectors
k = 8
# number of eigenvalues to solve
eig = 4
start_davidson = time.time()
theta, V = davidson(A, k, eig)
end_davidson = time.time()
print(f"davidson = {theta[:eig]}; {end_davidson - start_davidson} seconds")
start_numpy = time.time()
E, Vec = np.linalg.eig(A)
E = np.sort(E)
end_numpy = time.time()
print(f"numpy = {E[:eig]}; {end_numpy - start_numpy} seconds")
|
happz/settlers
|
tests/units/tournaments/randomized.py
|
Python
|
mit
| 746 | 0.009383 |
import tests.units.tournaments
import lib.datalayer
import g
|
ames
import games.settlers
import tournaments
import hruntime
from tests import *
from tests.units.tournaments import create_and_populate_tournament
class Tests(TestCase):
@classmethod
def setup_class(cls):
super(Tests, cls).setup_class()
hruntime.dbroot = lib.datalayer.Root()
hruntime.dbroot.users['S
|
YSTEM'] = tests.DummyUser('SYSTEM')
def test_sanity(self):
patched_events = {
'tournament.Created': 2,
'tournament.PlayerJoined': 12,
'game.GameCreated': 8,
'game.PlayerJoined': 4,
'game.PlayerInvited': 8
}
with EventPatcherWithCounter(patched_events):
T = create_and_populate_tournament(engine = 'randomized')
|
ecreall/nova-ideo
|
novaideo/views/smart_folder_management/remove_smart_folder.py
|
Python
|
agpl-3.0
| 2,248 | 0.00089 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from novaideo.content.processes.smart_folder_management.behaviors import (
RemoveSmartFolder)
from novaideo.content.smart_folder import SmartFolder
from novaideo import _
class RemoveSmartFolderViewStudyReport(BasicView):
title = 'Alert for remove'
name = 'alertforremove'
template = 'novaideo:views/smart_folder_management/templates/alert_smartfolder_remove.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class RemoveSmartFolderView(FormView):
title = _('Remove')
name = 'removesmartfolderform'
formid = 'formremovesmartfolder'
behaviors = [RemoveSmartFolder, Cancel]
validate_behaviors = False
def before_update(self):
self.action = self.request.resource_url(
self.context, 'novaideoapi',
query={'op': 'update_action_view',
'node_id': RemoveSmartFolder.node_definition.id})
self.schema.widget = deform.widget.FormWidget(
css_class='deform novaideo-ajax-form')
@view_config(
name='removesmartfolder',
context=SmartFolder,
renderer='pontus:templates/views_templates/grid.pt',
)
class RemoveSmartFolderViewMultipleView(MultipleView):
title = _('Remove the topic of interest')
name =
|
'removesmartfolder'
viewid = 'removesmartfolder'
template = 'pontus:templates/views_templates/simple_multipleview.pt'
views = (RemoveSmartFolderViewStudyReport, RemoveSmartFolderView)
validators = [RemoveSmartFolder.get_validator()]
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{RemoveSmartFolder: RemoveSmartFolderVie
|
wMultipleView})
|
jonathf/matlab2cpp
|
src/matlab2cpp/rules/_cell.py
|
Python
|
bsd-3-clause
| 495 | 0.00404 |
from .variables import *
def Cell(node):
# cells must stand on own line
if node.parent.cls not in ("Assign", "Assigns"):
node.auxiliary("cell")
return "{", ",", "}"
def Assign(node):
if node.name == 'var
|
argin':
out = "%(0)s = va_arg(varargin, " + node[0].type + ") ;"
else
|
:
out = "%(0)s.clear() ;"
# append to cell, one by one
for elem in node[1]:
out = out + "\n%(0)s.push_back(" + str(elem) + ") ;"
return out
|
FlowFX/unkenmathe.de
|
src/um/exercises/migrations/0005_auto_20170826_0942.py
|
Python
|
agpl-3.0
| 578 | 0.00173 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-26 09:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
|
('exercises', '0004_exercise_author'),
]
operations = [
migrations.AlterField(
|
model_name='exercise',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
|
gnumdk/eolie
|
eolie/helper_dbus.py
|
Python
|
gpl-3.0
| 3,499 | 0 |
# Copyright (c) 2017 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gio
from eolie.define import PROXY_BUS, PROXY_PATH, PROXY_INTERFACE, El
class DBusHelper:
"""
Simpler helper for DBus
"""
def __init__(self):
self.__signals = {}
def call(self, call, page_id, dbus_args=None, callback=None, *args):
"""
Call function
@param call as str
@param page
|
_id as int
@param dbus_args as GLib.Variant()/None
@param callback as function
"""
try:
bus = El().get_dbus_connection()
proxy_bus = PROXY_BUS % page_id
Gio.DBusProxy.new(bus, Gio.DBusProxyFlags.NONE, None,
proxy_bus,
PROXY_PATH,
PROXY_INTERFACE, None,
self.__on_get_proxy,
|
call, dbus_args, callback, *args)
except Exception as e:
print("DBusHelper::call():", e)
def connect(self, signal, callback, page_id):
"""
Connect callback to object signals
@param signal as str
@param callback as function
@param page_id as int
"""
try:
bus = El().get_dbus_connection()
proxy_bus = PROXY_BUS % page_id
subscribe_id = bus.signal_subscribe(None, proxy_bus, signal,
PROXY_PATH, None,
Gio.DBusSignalFlags.NONE,
callback)
self.__signals[page_id] = (bus, subscribe_id)
except Exception as e:
print("DBusHelper::connect():", e)
def disconnect(self, page_id):
"""
Disconnect signal
@param page_id as int
"""
if page_id in self.__signals.keys():
(bus, subscribe_id) = self.__signals[page_id]
bus.signal_unsubscribe(subscribe_id)
del self.__signals[page_id]
#######################
# PRIVATE #
#######################
def __on_get_proxy(self, source, result, call, dbus_args, callback, *args):
"""
Launch call and connect it to callback
@param source as GObject.Object
@param result as Gio.AsyncResult
@param call as str
@param dbus_args as GLib.Variant()/None
@param callback as function
"""
try:
proxy = source.new_finish(result)
proxy.call(call, dbus_args, Gio.DBusCallFlags.NO_AUTO_START,
1000, None, callback, *args)
except Exception as e:
print("DBusHelper::__on_get_proxy():", e)
callback(None, None, *args)
|
projectarkc/arkc-server
|
arkcserver/pyotp/totp.py
|
Python
|
gpl-2.0
| 2,787 | 0.001435 |
from __future__ import print_function, unicode_literals, division, absolute_import
import datetime
import time
import ntplib
from pyotp import utils
from pyotp.otp import OTP
class TOTP(OTP):
systime_offset = None
def __init__(self, *args, **kwargs):
"""
@option options [Integer] interval (30) the time interval in seconds
for OTP This defaults to 30 which is standard.
"""
self.interv
|
al = kwargs.pop('interval', 30)
if self.systime_offset is None:
try:
c = ntplib.NTPClient()
TOTP.systime_offset = int(c.request(
'pool.ntp.org', version=3).offset)
except Exception:
self.systime_offset = 0
super(TOTP, self).__init__(*args, **kwargs)
def at(self, for_time, counter_offset=0):
"""
Accepts either a Unix timestamp integer or a Time object.
Time objects wil
|
l be adjusted to UTC automatically
@param [Time/Integer] time the time to generate an OTP for
@param [Integer] counter_offset an amount of ticks to add to the time counter
"""
if not isinstance(for_time, datetime.datetime):
for_time = datetime.datetime.fromtimestamp(int(for_time))
return self.generate_otp(self.timecode(for_time) + counter_offset)
def now(self):
"""
Generate the current time OTP
@return [Integer] the OTP as an integer
"""
return self.generate_otp(self.timecode(datetime.datetime.now()))
def verify(self, otp, for_time=None, valid_window=0):
"""
Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
@param [Integer] valid_window extends the validity to this many counter ticks before and after the current one
"""
if for_time is None:
for_time = datetime.datetime.now()
if valid_window:
for i in range(-valid_window, valid_window + 1):
if utils.strings_equal(str(otp), str(self.at(for_time, i))):
return True
return False
return utils.strings_equal(str(otp), str(self.at(for_time)))
def provisioning_uri(self, name, issuer_name=None):
"""
Returns the provisioning URI for the OTP
This can then be encoded in a QR Code and used
to provision the Google Authenticator app
@param [String] name of the account
@return [String] provisioning uri
"""
return utils.build_uri(self.secret, name, issuer_name=issuer_name)
def timecode(self, for_time):
i = time.mktime(for_time.timetuple()) + self.systime_offset
return int(i / self.interval)
|
treycucco/py-utils
|
idb/spreadsheet/csv.py
|
Python
|
bsd-3-clause
| 862 | 0.012761 |
import csv
from . import WorksheetBase, WorkbookBase, C
|
ellMode
class CSVWorksheet(WorksheetBase):
def __init__(self, raw_sheet, ordinal):
super().__init__(raw_sheet, ordinal)
self.name = "Sheet 1"
self.nrows = len(self.raw_sheet)
self.ncols = max([len(r) for r in self.raw_sheet])
def parse_cell(self, cell, coords, cell_mode=CellMode.cooked):
try:
return int(cell)
except ValueError:
pass
try:
return float(cell)
except ValueError:
pass
# TODO Check for dates?
return cell
def get_row(sel
|
f, row_index):
return self.raw_sheet[row_index]
class CSVWorkbook(WorkbookBase):
def iterate_sheets(self):
with open(self.filename, "r") as rf:
reader = csv.reader(rf)
yield list(reader)
def get_worksheet(self, raw_sheet, index):
return CSVWorksheet(raw_sheet, index)
|
bdfoster/blumate
|
tests/components/test_graphite.py
|
Python
|
mit
| 8,152 | 0 |
"""The tests for the Graphite component."""
import socket
import unittest
from unittest import mock
import blumate.core as ha
import blumate.components.graphite as graphite
from blumate.const import (
EVENT_STATE_CHANGED,
EVENT_BLUMATE_START, EVENT_BLUMATE_STOP,
STATE_ON, STATE_OFF)
from tests.common import get_test_home_assistant
class TestGraphite(unittest.TestCase):
"""Test the Graphite component."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.latitude = 32.87336
self.hass.config.longitude = 117.22743
self.gf = graphite.GraphiteFeeder(self.hass, 'foo', 123, 'bm')
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_minimal_config(self, mock_gf):
"""Test setup with minimal configuration."""
self.assertTrue(graphite.setup(self.hass, {}))
mock_gf.assert_called_once_with(self.hass, 'localhost', 2003, 'bm')
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_full_config(self, mock_gf):
"""Test setup with full configuration."""
config = {
'graphite': {
'host': 'foo',
'port': 123,
'prefix': 'me',
}
}
self.assertTrue(graphite.setup(self.hass, config))
mock_gf.assert_called_once_with(self.hass, 'foo', 123, 'me')
@mock.patch('blumate.components.graphite.GraphiteFeeder')
def test_config_bad_port(self, mock_gf):
"""Test setup with invalid port."""
config = {
'graphite': {
'host': 'foo',
'port': 'wrong',
}
}
self.assertFalse(graphite.setup(self.hass, config))
self.assertFalse(mock_gf.called)
def test_subscribe(self):
"""Test the subscription."""
fake_hass = mock.MagicMock()
gf = graphite.GraphiteFeeder(fake_hass, 'foo', 123, 'bm')
fake_hass.bus.listen_once.has_calls([
mock.call(EVENT_BLUMATE_START, gf.start_listen),
mock.call(EVENT_BLUMATE_STOP, gf.shutdown),
])
fake_hass.bus.listen.assert_called_once_with(
EVENT_STATE_CHANGED, gf.event_listener)
def test_start(self):
"""Test the start."""
with mock.patch.object(self.gf, 'start') as mock_start:
self.gf.start_listen('event')
mock_start.assert_called_once_with()
def test_shutdown(self):
"""Test the shutdown."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
self.gf.shutdown('event')
mock_queue.put.assert_called_once_with(self.gf._quit_object)
def test_event_listener(self):
"""Test the event listener."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
self.gf.event_listener('foo')
mock_queue.put.assert_called_once_with('foo')
@mock.patch('time.time')
def test_report_attributes(self, mock_time):
"""Test the reporting with attri
|
butes."""
mock_time.return_value = 12345
attrs = {'foo': 1,
'bar': 2.0,
'baz': True,
|
'bat': 'NaN',
}
expected = [
'bm.entity.state 0.000000 12345',
'bm.entity.foo 1.000000 12345',
'bm.entity.bar 2.000000 12345',
'bm.entity.baz 1.000000 12345',
]
state = mock.MagicMock(state=0, attributes=attrs)
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_report_with_string_state(self, mock_time):
"""Test the reporting with strings."""
mock_time.return_value = 12345
expected = [
'bm.entity.foo 1.000000 12345',
'bm.entity.state 1.000000 12345',
]
state = mock.MagicMock(state='above_horizon', attributes={'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_report_with_binary_state(self, mock_time):
"""Test the reporting with binary state."""
mock_time.return_value = 12345
state = ha.State('domain.entity', STATE_ON, {'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
expected = ['bm.entity.foo 1.000000 12345',
'bm.entity.state 1.000000 12345']
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
state.state = STATE_OFF
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
self.gf._report_attributes('entity', state)
expected = ['bm.entity.foo 1.000000 12345',
'bm.entity.state 0.000000 12345']
actual = mock_send.call_args_list[0][0][0].split('\n')
self.assertEqual(sorted(expected), sorted(actual))
@mock.patch('time.time')
def test_send_to_graphite_errors(self, mock_time):
"""Test the sending with errors."""
mock_time.return_value = 12345
state = ha.State('domain.entity', STATE_ON, {'foo': 1.0})
with mock.patch.object(self.gf, '_send_to_graphite') as mock_send:
mock_send.side_effect = socket.error
self.gf._report_attributes('entity', state)
mock_send.side_effect = socket.gaierror
self.gf._report_attributes('entity', state)
@mock.patch('socket.socket')
def test_send_to_graphite(self, mock_socket):
"""Test the sending of data."""
self.gf._send_to_graphite('foo')
mock_socket.assert_called_once_with(socket.AF_INET,
socket.SOCK_STREAM)
sock = mock_socket.return_value
sock.connect.assert_called_once_with(('foo', 123))
sock.sendall.assert_called_once_with('foo'.encode('ascii'))
sock.send.assert_called_once_with('\n'.encode('ascii'))
sock.close.assert_called_once_with()
def test_run_stops(self):
"""Test the stops."""
with mock.patch.object(self.gf, '_queue') as mock_queue:
mock_queue.get.return_value = self.gf._quit_object
self.assertEqual(None, self.gf.run())
mock_queue.get.assert_called_once_with()
mock_queue.task_done.assert_called_once_with()
def test_run(self):
"""Test the running."""
runs = []
event = mock.MagicMock(event_type=EVENT_STATE_CHANGED,
data={'entity_id': 'entity',
'new_state': mock.MagicMock()})
def fake_get():
if len(runs) >= 2:
return self.gf._quit_object
elif runs:
runs.append(1)
return mock.MagicMock(event_type='somethingelse',
data={'new_event': None})
else:
runs.append(1)
return event
with mock.patch.object(self.gf, '_queue') as mock_queue:
with mock.patch.object(self.gf, '_report_attributes') as mock_r:
mock_queue.get.side_effect = fake_get
self.gf.run()
# Twice for two events, once for the stop
self.assertEqual(3, mock_queue.task_done.call_count)
mock_r.assert_called_once_with(
'entity',
event.data['new_state'])
|
punalpatel/st2
|
st2reactor/st2reactor/container/manager.py
|
Python
|
apache-2.0
| 6,484 | 0.002159 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import signal
import eventlet
from st2common import log as logging
from st2reactor.container.process_container import ProcessSensorContainer
from st2common.services.sensor_watcher import SensorWatcher
from st2common.models.system.common import ResourceReference
LOG = logging.getLogger(__name__)
class SensorContainerManager(object):
def __init__(self, sensors_partitioner):
self._sensor_container = None
self._sensors_watcher = SensorWatcher(create_handler=self._handle_create_sensor,
update_handler=self._handle_update_sensor,
delete_handler=self._handle_delete_sensor,
queue_suffix='sensor_container')
self._container_thread = None
if not sensors_partitioner:
raise ValueError('sensors_partitioner should be non-None.')
self._sensors_partitioner = sensors_partitioner
def run_sensors(self):
"""
Run all sensors as determined by sensors_partitioner.
"""
sensors = self._sensors_partitioner.get_sensors()
if sensors:
LOG.info('Setting up container to run %d sensors.', len(sensors))
LOG.info('\tSensors list - %s.', [self._get_sensor_ref(sensor) for sensor in sensors])
sensors_to_run = []
for sensor in sensors:
# TODO: Directly pass DB object to the ProcessContainer
sensors_to_run.append(self._to_sensor_object(sensor))
LOG.info('(PID:%s) SensorContainer started.', os.getpid())
self._setup_sigterm_handler()
self._spin_container_and_wait(sensors_to_run)
def _spin_container_and_wait(self, sensors):
try:
self._sensor_container = ProcessSensorContainer(sensors=sensors)
self._container_thread = eventlet.spawn(self._sensor_container.run)
LOG.debug('Starting sensor CUD watcher...')
self._sensors_watcher.start()
exit_code = self._container_thread.wait()
LOG.error('Process container quit with exit_code %d.', exit_code)
LOG.error('(PID:%s) SensorContainer stopped.', os.getpid())
except (KeyboardInterrupt, SystemExit):
self._sensor_container.shutdown()
self._sensors_watcher.stop()
LOG.info('(PID:%s) SensorContainer stopped. Reason - %s', os.getpid(),
sys.exc_info()[0].__name__)
eventlet.kill(self._container_thread)
self._container_thread = None
return 0
def _setup_sigterm_handler(self):
def sigterm_handler(signum=None, frame=None):
# This will cause SystemExit to be throw and we call sensor_container.shutdown()
# there which cleans things up.
sys.exit(0)
# Register a SIGTERM signal handler which calls sys.exit which causes SystemExit to
# be thrown. We catch SystemExit and handle cleanup there.
signal.signal(signal.SIGTERM, sigterm_handler)
def _to_sensor_object(self, sensor_db):
file_path = sensor_db.artifact_uri.replace('file://', '')
class_name = sensor_db.entry_point.split('.')[-1]
sensor_obj = {
'pack': sensor_db.pack,
'file_path': file_path,
'class_name': class_name,
'trigger_types': sensor_db.trigger_types,
'poll_interval': sensor_db.poll_interval,
'ref': self._get_sensor_ref(sensor_db)
}
return sensor_obj
#################################################
# Event handler methods for the sensor CUD events
#################################################
def _handle_create_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring create.', self._get_sensor_ref(sensor))
return
if not sensor.enabled:
LOG.info('sensor %s is not enabled.', self._get_sensor_ref(sensor))
return
LOG.info('Adding sensor %s.', self._get_sensor_ref(sensor))
self._sensor_container.add_sensor(sensor=self._to_sensor_object(sensor))
def _handle_update_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring update.', self._get_sensor_ref(sensor))
return
sensor_ref = self._get_sensor_ref(sensor)
sensor_obj = self._to_sensor_object(sensor)
# Handle disabling sensor
if not sensor.enabled:
LOG.info('Sensor %s disabled. Unloading sensor.', sensor_ref)
self._sensor_container.remove_sens
|
or(sensor=sensor_obj)
return
LOG.info('Sensor %s updated. Reloading sensor.', sensor_ref)
try:
self._sensor_container.remove_sensor(sensor=sensor_obj)
except:
LOG.exception('Failed to reload sensor %s', sensor_ref)
else:
self._sensor_container.add_sensor(sensor=sensor_obj)
LOG.info('Sensor %s reloaded.', sensor_ref)
def _handle_delete
|
_sensor(self, sensor):
if not self._sensors_partitioner.is_sensor_owner(sensor):
LOG.info('sensor %s is not supported. Ignoring delete.', self._get_sensor_ref(sensor))
return
LOG.info('Unloading sensor %s.', self._get_sensor_ref(sensor))
self._sensor_container.remove_sensor(sensor=self._to_sensor_object(sensor))
def _get_sensor_ref(self, sensor):
return ResourceReference.to_string_reference(pack=sensor.pack, name=sensor.name)
|
google/swift-jupyter
|
test/fast_test.py
|
Python
|
apache-2.0
| 196 | 0 |
"""Runs
|
fast tests."""
import unittest
from
|
tests.kernel_tests import SwiftKernelTests, OwnKernelTests
from tests.simple_notebook_tests import *
if __name__ == '__main__':
unittest.main()
|
lixiangning888/whole_project
|
modules/signatures_orignal/banker_cridex.py
|
Python
|
lgpl-3.0
| 2,206 | 0.00544 |
# Copyright (C) 2014 Robby Zeitfuchs (@robbyFux)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class Cridex(Signature):
name = "banker_cridex"
description = "Cridex banking trojan"
severity = 3
alert = True
categories = ["Banking", "Trojan"]
families = ["Cridex"]
authors = ["Robby Zeitfuchs", "@robbyFux"]
minimum = "0.5"
references = ["http://stopmalvertising.com/rootkits/analysis-of-cridex.html",
"http://sempersecurus.blogspot.de/2012/08/cridex-analysis-using-volatility.html",
"http://labs.m86security.com/2012/03/the-cridex-trojan-targets-137-financial-organizations-in-one-go/",
"https://malwr.co
|
m/analysis/NDU2ZWJjZTIwYmRiNGVmNWI3MDUyMGExMGQ0MmVhYTY/",
"https://malwr
|
.com/analysis/MTA5YmU4NmIwMjg5NDAxYjlhYzZiZGIwYjZkOTFkOWY/"]
def run(self):
indicators = [".*Local.QM.*",
".*Local.XM.*"]
match_file = self.check_file(pattern=".*\\KB[0-9]{8}\.exe", regex=True)
match_batch_file = self.check_file(pattern=".*\\\\Temp\\\\\S{4}\.tmp\.bat", regex=True)
if match_file and match_batch_file:
self.data.append({"file": match_file})
self.data.append({"batchfile": match_batch_file})
for indicator in indicators:
match_mutex = self.check_mutex(pattern=indicator, regex=True)
if match_mutex:
self.data.append({"mutex": match_mutex})
return True
return False
|
georgemarshall/django
|
django/contrib/gis/db/backends/base/operations.py
|
Python
|
bsd-3-clause
| 6,371 | 0.002197 |
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.db.models.functions import Distance
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.db.utils import NotSupportedError
from django.utils.functional import cached_property
class BaseSpatialOperations:
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = '%s'
@cached_property
def select_extent(self):
return self.select
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
# Aggregates
disallowed_aggregates = ()
geom_func_prefix = ''
# Mapping between Django function names and backend names, when names do not
# match; used in spatial_function_name().
function_names = {}
# Blacklist/set of known unsupported functions of the backend
unsupported_functions = {
'Area', 'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'Azimuth',
'BoundingCircle', 'Centroid', 'Difference', 'Distance', 'Envelope',
'GeoHash', 'GeometryDistance', 'Intersection', 'IsValid', 'Length',
'LineLocatePoint', 'MakeValid', 'MemSize', 'NumGeometries',
'NumPoints', 'Perimeter', 'PointOnSurface', 'Reverse', 'Scale',
'SnapToGrid', 'SymDifference', 'Transform', 'Translate', 'Union',
}
# Constructors
from_text = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box, srid):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Return the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value
|
, compiler):
"""
Return the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
def transform_value(value, field):
return value is not None and value.srid != field.srid
if hasattr(value, 'as_sql'):
return (
'%s(%%s, %s)' % (self.sp
|
atial_function_name('Transform'), f.srid)
if transform_value(value.output_field, f)
else '%s'
)
if transform_value(value, f):
# Add Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (
self.spatial_function_name('Transform'),
self.from_text, value.srid, f.srid,
)
elif self.connection.features.has_spatialrefsys_table:
return '%s(%%s,%s)' % (self.from_text, f.srid)
else:
# For backwards compatibility on MySQL (#27464).
return '%s(%%s)' % self.from_text
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotSupportedError(
"%s spatial aggregation is not supported by this database backend." % expression.name
)
super().check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_function_name(self, func_name):
if func_name in self.unsupported_functions:
raise NotSupportedError("This backend doesn't support the %s function." % func_name)
return self.function_names.get(func_name, self.geom_func_prefix + func_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('Subclasses of BaseSpatialOperations must provide a geometry_columns() method.')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
distance_expr_for_lookup = staticmethod(Distance)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
if isinstance(expression.output_field, GeometryField):
converters.append(self.get_geometry_converter(expression))
return converters
def get_geometry_converter(self, expression):
raise NotImplementedError(
'Subclasses of BaseSpatialOperations must provide a '
'get_geometry_converter() method.'
)
def get_area_att_for_field(self, field):
if field.geodetic(self.connection):
if self.connection.features.supports_area_geodetic:
return 'sq_m'
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
else:
units_name = field.units_name(self.connection)
if units_name:
return AreaMeasure.unit_attname(units_name)
def get_distance_att_for_field(self, field):
dist_att = None
if field.geodetic(self.connection):
if self.connection.features.supports_distance_geodetic:
dist_att = 'm'
else:
units = field.units_name(self.connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
return dist_att
|
openkamer/openkamer
|
parliament/migrations/0003_auto_20161126_2342.py
|
Python
|
mit
| 467 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016
|
-11-26 22:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parliament', '0002_auto_20161123_1157'),
]
ope
|
rations = [
migrations.AlterField(
model_name='politicalparty',
name='name_short',
field=models.CharField(max_length=200),
),
]
|
0--key/lib
|
portfolio/Python/scrapy/axemusic/tomleemusic_ca.py
|
Python
|
apache-2.0
| 5,954 | 0.003359 |
import re
import logging
import urllib
import csv
import os
import shutil
from datetime import datetime
import StringIO
from scrapy.spider import BaseSpider
from scrapy import signals
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.xlib.pydispatch import dispatcher
from scrapy.exceptions import CloseSpider
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class TomLeeMusicCaSpider(BaseSpider):
name = 'tomleemusic.ca'
allowed_domains = ['tomleemusic.ca', 'competitormonitor.com']
def __init__(self, *args, **kwargs):
super(TomLeeMusicCaSpider, self).__init__(*args, **kwargs)
dispatcher.connect(self.spider_closed, signals.spider_closed)
def start_requests(self):
if self.full_run_required():
start_req = self._start_requests_full()
log.msg('Full run')
else:
start_req = self._start_requests_simple()
log.msg('Simple run')
for req in start_req:
yield req
def spider_closed(self, spider):
if spider.name == self.name:
shutil.copy('data/%s_products.csv' % spider.crawl_id, os.path.join(HERE, 'tomleemusic_products.csv'))
def _start_requests_full(self):
yield Request('http://www.tomleemusic.ca/main/products.cfm', callback=self.parse_full)
def _start_requests_simple(self):
yield Request('http://competitormonitor.com/login.html?action=get_products_api&website_id=470333&matched=1',
callback=self.parse_simple)
def full_run_required(self):
if not os.path.exists(os.path.join(HERE, 'tomleemusic_products.csv')):
return True
#run full only on Mondays
return datetime.now().weekday() == 1
def parse_full(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[@class="catLink"]/@href').extract():
yield Request(url, callback=self.parse_product_list)
def parse_product_list(self, response):
hxs = HtmlXPathSelector(response)
for url in hxs.select(u'//a[@class="catLink"]/@href').extract():
yield Request(url, callback=self.parse_product_list)
for url in hxs.select(u'//a[@class="productListLink"]/@href').extract():
url = urljoin_rfc(g
|
et_base_url(response), url)
yield Request(url, callback=self.parse_product)
next_page = hxs.select(u'//a[@class="smallPrint" and contains(text(),"Next")]/@href').extract()
if next_page:
url = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(url, callback=self.parse_product_list)
def parse_product(self, response):
|
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), selector=hxs)
product_loader.add_value('url', response.url)
product_loader.add_xpath('name', u'//h1[@class="productDetailHeader"]/text()')
if hxs.select(u'//span[@class="productDetailSelling"]/text()'):
product_loader.add_xpath('price', u'//span[@class="productDetailSelling"]/text()')
else:
product_loader.add_value('price', '')
product_loader.add_xpath('sku', u'//input[@type="hidden" and (@name="hidProductId" or @name="inv")]/@value')
product_loader.add_xpath('category', u'//td[@class="smallPrint"]/a[position()=2 and contains(text(),"Products")]/../a[3]/text()')
img = hxs.select(u'//a[@class="smallPrint" and @rel="lightbox"]/@href').extract()
if img:
img = urljoin_rfc(get_base_url(response), img[0])
product_loader.add_value('image_url', img)
if hxs.select(u'//a[contains(@href,"BrandName")]/@href'):
product_loader.add_xpath('brand', u'substring-after(//a[contains(@href,"BrandName")]/@href,"=")')
else:
brands = hxs.select(u'//strong[@class="sideBarText"]/text()').extract()
brands = [b.strip() for b in brands]
for brand in brands:
if product_loader.get_output_value('name').startswith(brand):
product_loader.add_value('brand', brand)
break
else:
product_loader.add_xpath('brand', u'normalize-space(substring-before(substring-after(//title/text(), " - "), " - "))')
# product_loader.add_xpath('shipping_cost', u'//div[@class="DetailRow"]/div[contains(text(),"Shipping")]/../div[2]/text()')
yield product_loader.load_item()
def parse_simple(self, response):
f = StringIO.StringIO(response.body)
hxs = HtmlXPathSelector()
reader = csv.DictReader(f)
self.matched = set()
for row in reader:
self.matched.add(row['url'])
for url in self.matched:
yield Request(url, self.parse_product)
with open(os.path.join(HERE, 'tomleemusic_products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
if row['url'] not in self.matched:
loader = ProductLoader(selector=hxs, item=Product())
loader.add_value('url', row['url'])
loader.add_value('sku', row['sku'])
loader.add_value('identifier', row['identifier'])
loader.add_value('name', row['name'])
loader.add_value('price', row['price'])
loader.add_value('category', row['category'])
loader.add_value('brand', row['brand'])
loader.add_value('image_url', row['image_url'])
loader.add_value('shipping_cost', row['shipping_cost'])
yield loader.load_item()
|
DQE-Polytech-University/Beamplex
|
src/laserstructure.py
|
Python
|
mit
| 3,771 | 0.008221 |
import matplotlib.pyplot as plt
#stores information about laser structure
#saves refraction and electric field profiles in text and graphic form to HDD
class Laser:
refraction = []
field = []
gridX = []
gridN = []
field = []
def __init__(self, (wavelength, concentration, thickness)):
if isinstance(wavelength, (int, float)) == False:
raise TypeError("wavelength should be a number")
if isinstance(concentration, list) == False:
raise TypeError("concentration should be a list")
if isinstance( thickness, (list)) == False:
raise TypeError("thickness should be a list")
for i in range(5):
if isinstance(concentration[i], (int, float)) == False or isinstance( thickness[i], (int, float)) == False:
raise TypeError("concentration and thickness elements should be numbers")
if wavelength is None:
raise ValueError("wavelength is undefined")
if concentration is None:
raise ValueError("concentration is undefined")
if thickness is None:
raise ValueError("thickness is undefined")
if wavelength < 0.85 or wavelength > 1.5:
raise ValueError("wavelength out of range")
self.wavelength = wavelength
self.concentration = concentration
self.thickness = thickness
#refraction profile output
def plotRefraction(self):
if isinstance(self.gridX, list) == False:
raise TypeError("self.gridX should be a list")
if isinstance(self.gridN, list) == False:
raise TypeError("self.gridN should be a list")
if len(self.gridX) <= 20:
raise ValueError("len(self.gridX) out of range")
if len(self.gridN) <= 20:
raise ValueError("len(self.gridN) out of range")
if (len(self.gridX) == len(self.gridN)) == False:
raise IndexError("self.gridX should be the same dimension as self.gridN")
plt.plot(self.gridX, self.gridN)
plt.xlabel('position, micrometers')
plt.ylabel('refraction index, arb. units')
plt.title('Refraction Index Profile')
plt.savefig('refraction.png', format='png', dpi=100)
plt.clf()
refractionFile = open("refraction.txt", "w")
for i in range(len(self.gridN)):
refractionFile.write(str(
|
self.gridX[i]) + ": " + str(self.gridN[i]) + "\n")
refractionFile.close()
#field profile output
def plotField(self):
if isinstance(self.gridX, li
|
st) == False:
raise TypeError("self.gridX should be a list")
if isinstance(self.field, list) == False:
raise TypeError("self.field should be a list")
if len(self.gridX) <= 20:
raise ValueError("len(self.gridX) out of range")
if len(self.field) <= 20:
raise ValueError("len(self.field) out of range")
if (len(self.gridX) == len(self.field)) == False:
raise TypeError("self.gridX should be the same dimension as self.field")
for i in range(len(self.field)):
self.field[i] = self.field[i] ** 2
plt.plot(self.gridX, self.field)
plt.xlabel('position, micrometers')
plt.ylabel('electric field, arb. units')
plt.title('Electric field in laser structure')
plt.savefig('field.png', format='png', dpi=100)
plt.clf()
fieldFile = open("field.txt", "w")
for i in range(len(self.gridN)):
fieldFile.write(str(self.gridX[i]) + ": " + str(self.field[i]) + "\n")
fieldFile.close()
|
rmit-ir/SummaryRank
|
summaryrank/__main__.py
|
Python
|
mit
| 2,623 | 0.000381 |
"""
The main script
"""
import argparse
import summaryrank.features
import summaryrank.importers
import summaryrank.tools
DESCRIPTION = '''
SummaryRank is a set of tools that help producing machine-learned
summary/sentence rankers. It supports a wide range of functions such
as generating judgments in trec_eval format or creating feature
vectors in the SVMLight format.
corpora tools:
{}
representations and features:
{}
commands:
{}
'''
IMPORTER_FUNCTIONS = [
("import_webap", summaryrank.importers.import_weba
|
p),
("import_trec_novelty", summaryrank.importers.import_trec_novelty),
("import_mobileclick", summaryrank.importers.import_mobileclick),
]
FEATURE_FUNCTIONS = [
("gen_term", summaryrank.features.gen_term),
("gen_freqstats", summaryrank.features.gen_freqstats),
("gen_esa", summaryrank.features.gen_esa),
("gen_tagme", summaryrank.features.gen_tagme),
("extract", summaryrank.features.extract),
("contextualize", summaryrank.features.contextualize),
]
GENERAL_FUNCTIONS =
|
[
("describe", summaryrank.tools.describe),
("cut", summaryrank.tools.cut),
("join", summaryrank.tools.join),
("shuffle", summaryrank.tools.shuffle),
("split", summaryrank.tools.split),
("normalize", summaryrank.tools.normalize),
]
def _make_command_list(functions):
""" Prepare a formatted list of commands. """
return [' {:24}{}\n'.format(name, func.__doc__.strip().splitlines()[0])
for name, func in functions]
if __name__.endswith('__main__'):
importer_commands = ''.join(_make_command_list(IMPORTER_FUNCTIONS))
feature_commands = ''.join(_make_command_list(FEATURE_FUNCTIONS))
general_commands = ''.join(_make_command_list(GENERAL_FUNCTIONS))
parser = argparse.ArgumentParser(
prog='summaryrank',
formatter_class=argparse.RawDescriptionHelpFormatter,
usage='%(prog)s [options..] command [args..]',
add_help=False,
description=DESCRIPTION.format(
importer_commands, feature_commands, general_commands)
)
parser.add_argument('command', nargs='?', help=argparse.SUPPRESS)
parser.add_argument('argv', nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
args = parser.parse_args()
commands = dict()
commands.update(IMPORTER_FUNCTIONS)
commands.update(FEATURE_FUNCTIONS)
commands.update(GENERAL_FUNCTIONS)
if args.command in commands:
commands[args.command](args.argv)
else:
if args.command is not None:
parser.error("invalid command '{}'".format(args.command))
else:
parser.print_help()
|
pmverdugo/fiware-validator
|
validator/tests/api/middleware/test_ssl.py
|
Python
|
apache-2.0
| 1,568 | 0.000638 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for validator.api.middleware.ssl """
from __future__ import unicode_literals
import mock
from validator.api.middleware.ssl import SSLMiddleware
from validator.tests.base import ValidatorTestCase
class SSLMiddlewareTestCase(ValidatorTestCase):
""" Tests for class SSLMiddleware """
def setUp(self):
""" Create a SSLMiddleware instance """
super(SSLMiddlewareTestCase, self).setUp()
self.item = SSLMiddleware()
def test_process_request(self):
""" Tests for method process_request """
self.item.external = mock.MagicMock()
input = "MyInput"
expected =
|
"OK"
self.item.external.return_value = "OK"
observed = self.item.process_request(input)
self.assertEqual(expected, observed)
def tearDown(self):
""" Cleanup the SSLMiddleware instance """
super(SSLMiddlewareTestC
|
ase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
|
nickgashkov/virtualspace
|
virtualspace/utils/exceptions.py
|
Python
|
mit
| 290 | 0 |
# Copyright (c) 2017 Nick G
|
ashkov
#
# Distributed under MIT License. See LICENSE file for details.
class ValidationError(Exception):
def __init__(self, *args, **kwargs):
self.error_dict = kwargs.pop('error_dict')
super(ValidationError, self).__init__(*
|
args, **kwargs)
|
forYali/yali
|
yali/gui/ScrBootloader.py
|
Python
|
gpl-2.0
| 4,461 | 0.003362 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt5.Qt import QWidget, pyqtSignal
import yali.util
import yali.context as ctx
from yali.gui import ScreenWidget
from yali.gui.Ui.bootloaderwidget import Ui_BootLoaderWidget
from yali.storage.bootloader import BOOT_TYPE_NONE, BOOT_TYPE_PARTITION, BOOT_TYPE_MBR, BOOT_TYPE_RAID
class Widget(QWidget, ScreenWidget):
name = "bootloadersetup"
def __init__(self):
QWidget.__init__(self)
self.ui = Ui_BootLoaderWidget()
self.ui.setupUi(self)
self.bootloader = None
self.default = None
self.device = None
self.boot_disk = None
self.boot_partition = None
self.ui.defaultSettings.toggled[bool].connect(self.showDefaultSettings)
self.ui.noInstall.toggled[bool].connect(self.deactivateBootloader)
# self.ui.installPartition.toggled[bool].connect(self.activateInstallPartition)
self.ui.drives.currentIndexChanged[int].connect(self.currentDeviceChanged)
self.ui.advancedSettingsBox.show()
self.ui.defaultSettings.setChecked(True)
def fillDrives(self):
self.ui.drives.clear()
for drive in self.bootloader.drives:
device = ctx.storage.devicetree.getDeviceByName(drive)
item = u"%s" % (device.name)
self.ui.drives.addItem(item, device)
def shown(self):
if ctx.flags.install_type == ctx.STEP_RESCUE:
ctx.mainScreen.disableBack()
self.bootloader = ctx.bootloader
self.bootloader.storage = ctx.storage
self.fillDrives()
self.activateChoices()
def backCheck(self):
if ctx.storage.doAutoPart:
ctx.mainScreen.step_increment = 2
ctx.storage.reset()
return True
def execute(self):
self.bootloader.stage1Device = self.device
if self.ui.noInstall.isChecked():
self.bootloader.bootType = BOOT_TYPE_NONE
# elif self.ui.installPartition.isChecked():
# self.bootloader.bootType = BOOT_TYPE_PARTITION
elif self.ui.installMBR.isChecked():
self.bootloader.bootType = BOOT_TYPE_MBR
if ctx.flags.install_type == ctx.STEP_RESCUE:
ctx.mainScreen.step_increment = 2
else:
if ctx.flags.collection:
ctx.collections = yali.util.get_collections()
if len(ctx.collections) <= 1:
ctx.flags.collection = False
ctx.mainScreen.step_increment = 2
else:
ctx.mainScreen.step_increment = 2
return True
def showDefaultSettings(self, state):
if state:
self.device = self.default
|
self.ui.advancedSettingsBox.hide()
else:
self.ui.advancedSettingsBox.show()
def activateChoices(self):
for choice in self.bootloader.choices.keys():
if choice == BOOT_TYPE_M
|
BR:
self.ui.installMBR.setText(_("The first sector of"))
self.boot_disk = self.bootloader.choices[BOOT_TYPE_MBR][0]
# elif choice == BOOT_TYPE_RAID:
# self.ui.installPartition.setText("The RAID array where Pardus is installed")
# self.boot_partition = self.bootloader.choices[BOOT_TYPE_RAID][0]
# elif choice == BOOT_TYPE_PARTITION:
# self.ui.installPartition.setText(_("The partition where Pardus is installed"))
# self.boot_partition = self.bootloader.choices[BOOT_TYPE_PARTITION][0]
if self.boot_disk:
self.default = self.boot_disk
self.ui.installMBR.setChecked(True)
# else:
# self.default = self.boot_partition
# self.ui.installPartition.setChecked(True)
def deactivateBootloader(self):
self.device = None
def activateInstallPartition(self, state):
if state:
self.device = self.boot_partition
def currentDeviceChanged(self, index):
if index != -1:
self.device = self.ui.drives.itemData(index).name
|
ajrichards/GenesDI
|
genesdi/RunMakeFigures.py
|
Python
|
gpl-3.0
| 5,628 | 0.022388 |
#!/usr/bin/python
#
# to run an example
# python RunMakeFigures.py -p Demo -i 0 -j 1 -f 3FITC_4PE_004.fcs -h ./projects/Demo
#
import getopt,sys,os
import numpy as np
## important line to fix popup error in mac osx
import matplotlib
matplotlib.use('Agg')
from cytostream import Model
import matplotlib.pyplot as plt
## parse inputs
def bad_input():
print "\nERROR: incorrect args"
print sys.argv[0] + "-p projectID -i channel1 -j channel2 -f selectedFile -a alternateDirectory -s subset -t modelType -h homeDir"
print " projectID (-p) project name"
print " channel1 (-i) channel 1 name"
print " channel2 (-j) channel 2 name"
print " homeDir (-h) home directory of current project"
print " selectedFile (-f) name of selected file"
print " altDir (-a) alternative directory (optional)"
print " subset (-s) subsampling number (optional)"
print " modelName (-m) model name"
print " modelType (-t) model type"
print "\n"
sys.exit()
try:
optlist, args = getopt.getopt(sys.argv[1:],'i:j:s:a:p:f:m:t:h:')
except getopt.GetoptError:
print getopt.GetoptError
bad_input()
projectID = None
channel1 = None
channel2 = None
selectedFile = None
altDir = None
homeDir = None
modelType = None
modelName = None
subset = "all"
run = True
for o, a in optlist:
if o == '-i':
channel1 = a
if o == '-j':
channel2 = a
if o == '-f':
selectedFile = a
if o == '-a':
altDir = a
if o == '-p':
projectID = a
if o == '-s':
subset = a
if o == '-m':
modelName = a
if o == '-t':
modelType = a
if o == '-h':
homeDir = a
def make_scatter_plot(model,selectedFile,channel1Ind,channel2Ind,subset='all',labels=None,buff=0.02,altDir=None):
#fig = pyplot.figure(figsize=(7,7))
markerSize = 5
alphaVal = 0.5
fontName = 'arial'
fontSize = 12
plotType = 'png'
## prepare figure
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(111)
## specify channels
fileChannels = model.get_file_channel_list(selectedFile)
index1 = int(channel1Ind)
index2 = int(channel2Ind)
channel1 = fileChannels[index1]
channel2 = fileChannels[index2]
data = model.pyfcm_load_fcs_file(selectedFile)
## subset give an numpy array of indices
if subset != "all":
subsampleIndices = model.get_subsample_indices(subset)
data = data[subsampleIndices,:]
## make plot
totalPoints = 0
if labels == None:
ax.scatter([data[:,index1]],[data[:,index2]],color='blue',s=markerSize)
else:
if type(np.array([])) != type(labels):
labels = np.array(labels)
numLabels = np.unique(labels).size
maxLabel = np.max(labels)
cmp = model.get_n_color_colorbar(maxLabel+1)
for l in np.sort(np.unique(labels)):
rgbVal = tuple([val * 256 for val in cmp[l,:3]])
hexColor = model.rgb_to_hex(rgbVal)[:7]
x = data[:,index1][np.where(labels==l)[0]]
y = data[:,index2][np.where(labels==l)[0]]
totalPoints+=x.size
if x.size == 0:
continue
ax.scatter(x,y,color=hexColor,s=markerSize)
#ax.scatter(x,y,color=hexColor,s=markerSize)
## handle data edge buffers
bufferX = buff * (data[:,index1].max() - data[:,index1].min())
bufferY = buff * (data[:,index2].max() - data[:,index2].min())
ax.set_xlim([data[:,index1].min()-bufferX,data[:,index1].max()+bufferX])
ax.set_ylim([data[:,index2].min()-bufferY,data[:,index2].max()+bufferY])
## save file
fileName = selectedFile
ax.set_title("%s_%s_%s"%(channel1,channel2,fileName),fontname=fontName,fontsize=fontSize)
ax.set_xlabel(channel1,fontname=fontName,fontsize=fontSize)
ax.set_ylabel(channel2,fontname=fontName,fontsize=fontSize)
if altDir == None:
fileName = os.path.join(model.homeDir,'figs',"%s_%s_%s.%s"%(selectedFile[:-4],channel1,channel2,plotType))
fig.savefig(fileName,transparent=False,dpi=50)
else:
fileName = os.path.join(altDir,"%s_%s_%s.%s"%(selectedFile[:-4],channel1,channel2,plotType))
fig.savefig(fileName,transparent=False,dpi=50)
## error checking
if altDir == 'None':
altDir = None
if homeDir == 'None':
homeDir = None
if modelName == 'None':
modelName = None
statModel,statModelClasses = None,None
if altDir == None and homeDir == None:
bad_input()
run = False
|
print "WARNING: RunMakeFigures failed errorchecking"
if projectID == None or channel1 ==
|
None or channel2 == None or selectedFile == None:
bad_input()
run = False
print "WARNING: RunMakeFigures failed errorchecking"
if os.path.isdir(homeDir) == False:
print "ERROR: homedir does not exist -- bad project name", projectID, homeDir
run = False
if altDir != None and os.path.isdir(altDir) == False:
print "ERROR: specified alternative dir does not exist\n", altDir
run = False
if run == True:
model = Model()
model.initialize(projectID,homeDir)
if modelName == None:
make_scatter_plot(model,selectedFile,channel1,channel2,subset=subset,altDir=altDir)
else:
statModel,statModelClasses = model.load_model_results_pickle(modelName,modelType)
make_scatter_plot(model,selectedFile,channel1,channel2,labels=statModelClasses,subset=subset,altDir=altDir)
|
r0h4n/commons
|
tendrl/commons/objects/node_context/__init__.py
|
Python
|
lgpl-2.1
| 7,441 | 0 |
import json
import os
import socket
import sys
import uuid
import etcd
from tendrl.commons import objects
from tendrl.commons.utils import etcd_utils
from tendrl.commons.utils import event_utils
from tendrl.commons.utils import log_utils as logger
NODE_ID = None
class NodeContext(objects.BaseObject):
def __init__(self, node_id=None, fqdn=None, ipv4_addr=None,
tags=None, status=None, sync_status=None,
last_sync=None, pkey=None,
locked_by=None, *args, **kwargs):
super(NodeContext, self).__init__(*args, **kwargs)
self.node_id = node_id or self._get_node_id() or self._create_node_id()
self.fqdn = fqdn
self.ipv4_addr = ipv4_addr
if self.fqdn:
self.ipv4_addr = socket.gethostbyname(self.fqdn)
self.locked_by = locked_b
|
y
curr_tags = []
try:
_nc_data = etcd_utils.read(
"/nodes/%s/NodeContext/data" % self.node_id
).value
curr_tags = json.loads(_nc_data)['tags']
except etcd.EtcdKeyNotFound:
pass
try:
curr_tags = json.loads(curr_tags)
except (ValueError, TypeError):
# No existing tags
pass
self.tags = tags or
|
[]
self.tags += NS.config.data.get('tags', [])
self.tags += curr_tags
self.tags = list(set(self.tags))
self.status = status or "UP"
self.sync_status = sync_status
self.last_sync = last_sync
self.pkey = pkey or self.fqdn
self.value = 'nodes/{0}/NodeContext'
def _create_node_id(self):
node_id = str(uuid.uuid4())
try:
logger.log(
"debug",
NS.publisher_id,
{"message": "Registered Node (%s) with " % node_id}
)
except KeyError:
sys.stdout.write("message: Registered Node (%s) \n" % node_id)
local_node_id = "/var/lib/tendrl/node_id"
if not os.path.exists(os.path.dirname(local_node_id)):
os.makedirs(os.path.dirname(local_node_id))
with open(local_node_id, 'wb+') as f:
f.write(node_id)
global NODE_ID
NODE_ID = node_id
return node_id
def _get_node_id(self):
if NODE_ID:
return NODE_ID
local_node_id = "/var/lib/tendrl/node_id"
if os.path.isfile(local_node_id):
with open(local_node_id) as f:
node_id = f.read()
global NODE_ID
NODE_ID = node_id
return node_id
def render(self):
self.value = self.value.format(self.node_id or NS.node_context.node_id)
return super(NodeContext, self).render()
def save(self, update=True, ttl=None):
super(NodeContext, self).save(update)
status = self.value + "/status"
if ttl:
self._ttl = ttl
try:
etcd_utils.refresh(status, ttl)
except etcd.EtcdKeyNotFound:
pass
def on_change(self, attr, prev_value, current_value):
if attr == "status":
_tc = NS.tendrl.objects.TendrlContext(
node_id=self.node_id
).load()
if current_value is None:
self.status = "DOWN"
self.save()
msg = "Node {0} is DOWN".format(self.fqdn)
event_utils.emit_event(
"node_status",
self.status,
msg,
"node_{0}".format(self.fqdn),
"WARNING",
node_id=self.node_id,
integration_id=_tc.integration_id
)
# Load cluster_node_context will load node_context
# and it will be updated with latest values
cluster_node_context = NS.tendrl.objects.ClusterNodeContext(
node_id=self.node_id,
integration_id=_tc.integration_id
)
cluster_node_context.save()
del cluster_node_context
global_details = NS.tendrl.objects.GlobalDetails(
integration_id=_tc.integration_id).load()
if global_details.status.lower() == "healthy":
global_details.status = "unhealthy"
global_details.save()
_cluster = NS.tendrl.objects.Cluster(
integration_id=_tc.integration_id
).load()
msg = "Cluster:%s is %s" % (
_cluster.short_name, "unhealthy")
instance = "cluster_%s" % _tc.integration_id
event_utils.emit_event(
"cluster_health_status",
"unhealthy",
msg,
instance,
'WARNING',
integration_id=_tc.integration_id
)
_tag = "provisioner/%s" % _tc.integration_id
if _tag in self.tags:
_index_key = "/indexes/tags/%s" % _tag
self.tags.remove(_tag)
self.save()
etcd_utils.delete(_index_key)
_msg = "node_sync, STALE provisioner node "\
"found! re-configuring monitoring "\
"(job-id: %s) on this node"
payload = {
"tags": ["tendrl/node_%s" % self.node_id],
"run": "tendrl.flows.ConfigureMonitoring",
"status": "new",
"parameters": {
'TendrlContext.integration_id': _tc.integration_id
},
"type": "node"
}
_job_id = str(uuid.uuid4())
NS.tendrl.objects.Job(
job_id=_job_id,
status="new",
payload=payload
).save()
logger.log(
"debug",
NS.publisher_id,
{"message": _msg % _job_id}
)
if _tc.sds_name in ["gluster", "RHGS"]:
bricks = etcd_utils.read(
"clusters/{0}/Bricks/all/{1}".format(
_tc.integration_id,
self.fqdn
)
)
for brick in bricks.leaves:
try:
etcd_utils.write(
"{0}/status".format(brick.key),
"Stopped"
)
except (etcd.EtcdAlreadyExist, etcd.EtcdKeyNotFound):
pass
elif current_value == "UP":
msg = "{0} is UP".format(self.fqdn)
event_utils.emit_event(
"node_status",
"UP",
msg,
"node_{0}".format(self.fqdn),
"INFO",
node_id=self.node_id,
integration_id=_tc.integration_id
)
|
jrha/aquilon
|
build/bootstrap_ms/ms/version/__init__.py
|
Python
|
apache-2.0
| 726 | 0 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under th
|
e License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def addpkg(
|
*args, **kwargs):
pass
|
xlk521/cloudguantou
|
friendships/urls.py
|
Python
|
bsd-3-clause
| 252 | 0.019841 |
#
|
coding=utf8
'''
Created on 2012-9-19
@author: senon
'''
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('friendships.views',
url(r'^concerned_about_friends/', 'concerned_about_friends')
|
)
|
jshou/bsi
|
bsi/bsi_parser.py
|
Python
|
mit
| 930 | 0.012903 |
import ply.yacc as yacc
from bsi_lexer import tokens
from bsi_object import BsiObject
from bsi_array import BsiArray
def p_object_pairs(p):
'obj : pairs'
p[0] = BsiObject()
for pair in p[1]:
p[0].set(pair[0], pair[1])
def p_pairs_pair(p):
'pairs : pair'
p[0] = [p[1]]
def p_pairs_pair_pairs(p):
'pairs : pair pairs'
p[0] = [p[1]] + p[2]
def p_pair_key_eq_value(p):
'pair : KEY EQ val'
p[0] = (p[1], p[3])
def p_val_num(p):
'val : NUM'
p[0] = p[1]
def p_val_string(p):
'val : STRING'
p[0] = p[1]
def p_val_array(p):
'val : L_SQ_BR vals R_SQ_BR'
p[0] = BsiArray(p[2])
def p_array_val(p):
'vals : val'
p[0] = [p[1]]
d
|
ef p_array_vals(p):
'vals : val vals'
p[0] = [p[1]] + p[2]
def p_val
|
_nested_obj(p):
'val : L_BRACE obj R_BRACE'
p[0] = p[2]
def p_error(p):
print p
print "Syntax error in input!"
bsi_parser = yacc.yacc()
|
Fokko/incubator-airflow
|
airflow/task/task_runner/__init__.py
|
Python
|
apache-2.0
| 1,828 | 0.001094 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation
|
(ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
|
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
_TASK_RUNNER = conf.get('core', 'TASK_RUNNER')
def get_task_runner(local_task_job):
"""
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
"""
if _TASK_RUNNER == "StandardTaskRunner":
return StandardTaskRunner(local_task_job)
elif _TASK_RUNNER == "CgroupTaskRunner":
from airflow.task.task_runner.cgroup_task_runner import CgroupTaskRunner
return CgroupTaskRunner(local_task_job)
else:
raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
|
avedaee/DIRAC
|
Core/Utilities/Statistics.py
|
Python
|
gpl-3.0
| 2,281 | 0.021043 |
##################################################################################################
# $HeadURL$
##################################################################################################
"""Collection of DIRAC useful statistics related modules.
.. warning::
By default on Error they return None.
"""
__RCSID__ = "$Id$"
from math import sqrt # Mathematical functions.
def getMean( numbers ):
"""Returns the arithmetic mean of a numeric list.
:param list numbers: data sample
"""
|
if len(numbers):
numbers = sorted([float(x) for x in numbers])
return sum(numbers)/float(len(numbers))
def getMedian( numbers ):
""" Return the median of the list of numbers.
:param list numbers: data sample
"""
# Sort the list and take the middle ele
|
ment.
nbNum = len(numbers)
if not nbNum:
return
copy = sorted( [float(x) for x in numbers] )
if nbNum & 1: # There is an odd number of elements
return copy[nbNum//2]
else:
return 0.5*(copy[nbNum//2 - 1] + copy[nbNum//2])
def getVariance( numbers, posMean='Empty' ):
"""Determine the measure of the spread of the data set about the mean.
Sample variance is determined by default; population variance can be
determined by setting population attribute to True.
:param list numbers: data sample
:param mixed posMean: mean of a sample or 'Empty' str
"""
if not len(numbers):
return
if posMean == 'Empty':
mean = getMean(numbers)
else:
mean = posMean
numbers = sorted( [float(x) for x in numbers] )
# Subtract the mean from each data item and square the difference.
# Sum all the squared deviations.
return sum([(float(item)-mean)**2.0 for item in numbers ])/len(numbers)
def getStandardDeviation(numbers, variance='Empty', mean='Empty'):
"""Determine the measure of the dispersion of the data set based on the
variance.
:param list numbesr: data sample
:param mixed variance: variance or str 'Empty'
:param mixed mean: mean or str 'Empty'
"""
if not len(numbers):
return
# Take the square root of the variance.
if variance == 'Empty':
if mean == 'Empty':
variance = getVariance(numbers)
else:
variance = getVariance(numbers, posMean=mean)
return sqrt(variance)
|
jojoriveraa/titulacion-NFCOW
|
venv/bin/django-admin.py
|
Python
|
apache-2.0
| 192 | 0 |
#!/home/jojoriveraa/Dropbox/Capacit
|
ación/Platzi/Python-Django/NFCow/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_comma
|
nd_line()
|
plaid/plaid-python
|
plaid/model/transactions_rule_field.py
|
Python
|
mit
| 6,963 | 0.000718 |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TransactionsRuleField(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_
|
properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
|
allowed_values = {
('value',): {
'TRANSACTION_ID': "TRANSACTION_ID",
'NAME': "NAME",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""TransactionsRuleField - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Transaction field for which the rule is defined.., must be one of ["TRANSACTION_ID", "NAME", ] # noqa: E501
Keyword Args:
value (str): Transaction field for which the rule is defined.., must be one of ["TRANSACTION_ID", "NAME", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
|
sysid/nbs
|
lstm/lstm_text_generation.py
|
Python
|
mit
| 3,355 | 0.000596 |
'''Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=n
|
p.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model.
|
..')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, nb_epoch=1)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
model.save_weights('data/nietzsche_simple_TF.h5')
|
ajtowns/bitcoin
|
test/functional/feature_anchors.py
|
Python
|
mit
| 2,924 | 0.000684 |
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block-relay-only anchors functionality"""
import os
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import check_node_connections
INBOUND_CONNECTIONS = 5
BLOCK_RELAY_CONNECTIONS = 2
class AnchorsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.disable_autoconnect = False
def run_test(self):
node_anchors_path = os.path.join(
self.nodes[0].datadir, "regtest", "anchors.dat"
)
self.log.info("When node starts, check if anchors.dat doesn't exist")
assert not os.path.exists(node_anchors_path)
self.log.info(f"Add {BLOCK_RELAY_CONNECTIONS} block-relay-only connections to node")
for i in range(BLOCK_RELAY_CONNECTIONS):
self.log.debug(f"block-relay-only: {i}")
self.nodes[0].add_outbound_p2p_connection(
P2PInterface(), p2p_idx=i, connection_type="block-relay-only"
)
self.log.info(f"Add {INBOUND_CONNECTIONS} inbound connections to node")
for i in range(INBOUND_CONNECTIONS):
self.log.debug(f"inbound: {i}")
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info("Check node connections")
check_node_connections(node=self.nodes[0], num_in=5, num_out=2)
# 127.0.0.1
ip = "7f000001"
# Since the ip is always 127.0.0.1 for this case,
# we store only the port to identify the peers
|
block_relay_nodes_port = []
inbound_nodes_port = []
for p in self.nodes[0].getpeerinfo():
addr_split = p["addr"].split(":")
if p["connection_type"] == "block-relay-only":
block_relay_nodes_port.append(hex(int(addr_split[1]))[2:])
else:
inbound_nodes_port.append(hex(int(addr_split[1]))[2:])
self.log.info("Stop node 0
|
")
self.stop_node(0)
# It should contain only the block-relay-only addresses
self.log.info("Check the addresses in anchors.dat")
with open(node_anchors_path, "rb") as file_handler:
anchors = file_handler.read().hex()
for port in block_relay_nodes_port:
ip_port = ip + port
assert ip_port in anchors
for port in inbound_nodes_port:
ip_port = ip + port
assert ip_port not in anchors
self.log.info("Start node")
self.start_node(0)
self.log.info("When node starts, check if anchors.dat doesn't exist anymore")
assert not os.path.exists(node_anchors_path)
if __name__ == "__main__":
AnchorsTest().main()
|
ray-project/ray
|
python/ray/tests/test_node_manager.py
|
Python
|
apache-2.0
| 1,376 | 0.000727 |
import ray
from ray._private.test_utils import run_string_as_driver
# This tests the queue transitions for infeasible tasks. This has been an issue
# in the past, e.g., https://github.com/ray-project/ray/issues/3275.
def test_infeasible_tasks(ray_start_cluster):
cluster = ray_start_cluster
@ray.remote
def f():
return
cluster.add_node(resources={str(0): 100})
ray.init(address=cluster.address)
# Submit an infeasible task.
x_id = f._remote(args=[], kwargs={}, resources={str(1): 1})
# Add a node that makes the task feasible and make sure we can get the
# result.
cluster.add_node(resources={str(1): 100})
ray.get(x_id)
# Start a driver that submits an infeasible task and then let it exit.
driver_script = """
import ray
ray.init(address="{}")
@ray.remote(resources={})
def f():
{}pass # This is a weird hack to insert some blank space.
f.remote()
""".for
|
mat(
cluster.address, "{str(2): 1}", " "
)
run_string_as_driver(driver_script)
# Now add a new node that makes the task feasible
|
.
cluster.add_node(resources={str(2): 100})
# Make sure we can still run tasks on all nodes.
ray.get([f._remote(args=[], kwargs={}, resources={str(i): 1}) for i in range(3)])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
feer56/Kitsune1
|
scripts/fix_tb_basedon.py
|
Python
|
bsd-3-clause
| 1,520 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Finds revisions from the Thunderbird migration that don't have based_on
set correctly, and are still relavent, and fixes that.
Run this script like `./manage.py runscript fix_tb_basedon`.
"""
import sys
from traceback import print_exc
from django.db.models import Q
from kitsune.wiki.models import Document, Revision
def run():
try:
run_()
except Exception:
print_exc()
raise
class Progress():
def __init__(self, total):
self.current = 0
self.
|
total = total
def tick(self, incr=1):
self.current += incr
self.draw()
def draw(self):
self._wr('{0.current} / {0.total}\r'.format(self))
def _wr(self, s):
|
sys.stdout.write(s)
sys.stdout.flush()
def run_():
to_process = list(Document.objects.filter(
~Q(parent=None),
current_revision__based_on=None,
products__slug='thunderbird'))
if len(to_process) == 0:
print 'Nothing to do.'
prog = Progress(len(to_process))
for doc in to_process:
prog.tick()
oldest_parent_rev = (Revision.objects.filter(document=doc.parent)
.order_by('id')[0])
# It has localizations, clearly it should be localizable.
if not doc.parent.is_localizable:
doc.parent.is_localizable = True
doc.parent.save()
doc.current_revision.based_on = oldest_parent_rev
doc.current_revision.save()
|
adalke/rdkit
|
rdkit/sping/WX/__init__.py
|
Python
|
bsd-3-clause
| 20 | 0 |
from pidWX import *
| ||
mfraezz/osf.io
|
addons/wiki/utils.py
|
Python
|
apache-2.0
| 7,955 | 0.001383 |
# -*- coding: utf-8 -*-
import os
from future.moves.urllib.parse import quote
import uuid
import ssl
from pymongo import MongoClient
import requests
from django.apps import apps
from addons.wiki import settings as wiki_settings
from addons.wiki.exceptions import InvalidVersionError
from osf.utils.permissions import ADMIN, READ, WRITE
# MongoDB forbids field names that begin with "$" or contain ".". These
# utilities map to and from Mongo field names.
mongo_map = {
'.': '__!dot!__',
'$': '__!dollar!__',
}
def to_mongo(item):
for key, value in mongo_map.items():
item = item.replace(key, value)
return item
def to_mongo_key(item):
return to_mongo(item).strip().lower()
def generate_private_uuid(node, wname):
"""
Generate private uuid for internal use in sharejs namespacing.
Note that this will NEVER be passed to to the client or sharejs.
"""
private_uuid = str(uuid.uuid1())
wiki_key = to_mongo_key(wname)
node.wiki_private_uuids[wiki_key] = private_uuid
node.save()
return private_uuid
def get_sharejs_uuid(node, wname):
"""
Format private uuid into the form used in mongo and sharejs.
This includes node's primary ID to prevent fork namespace collision
"""
wiki_key = to_mongo_key(wname)
private_uuid = node.wiki_private_uuids.get(wiki_key)
return str(uuid.uuid5(
uuid.UUID(private_uuid),
str(node._id)
)) if private_uuid else None
def delete_share_doc(node, wname):
"""Deletes share document and removes namespace from model."""
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
db['docs'].remove({'_id': sharejs_uuid})
db['docs_ops'].remove({'name': sharejs_uuid})
wiki_key = to_mongo_key(wname)
del node.wiki_private_uuids[wiki_key]
node.save()
def migrate_uuid(node, wname):
"""Migrates uuid to new namespace."""
db = share_db()
old_sharejs_uuid = get_sharejs_uuid(node, wname)
broadcast_to_sharejs('lock', old_sharejs_uuid)
generate_private_uuid(node, wname)
new_sharejs_uui
|
d = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': old_sharejs_u
|
uid})
if doc_item:
doc_item['_id'] = new_sharejs_uuid
db['docs'].insert(doc_item)
db['docs'].remove({'_id': old_sharejs_uuid})
ops_items = [item for item in db['docs_ops'].find({'name': old_sharejs_uuid})]
if ops_items:
for item in ops_items:
item['_id'] = item['_id'].replace(old_sharejs_uuid, new_sharejs_uuid)
item['name'] = new_sharejs_uuid
db['docs_ops'].insert(ops_items)
db['docs_ops'].remove({'name': old_sharejs_uuid})
write_contributors = [
user._id for user in node.contributors
if node.has_permission(user, WRITE)
]
broadcast_to_sharejs('unlock', old_sharejs_uuid, data=write_contributors)
def share_db():
"""Generate db client for sharejs db"""
client = MongoClient(wiki_settings.SHAREJS_DB_URL, ssl_cert_reqs=ssl.CERT_NONE)
return client[wiki_settings.SHAREJS_DB_NAME]
def get_sharejs_content(node, wname):
db = share_db()
sharejs_uuid = get_sharejs_uuid(node, wname)
doc_item = db['docs'].find_one({'_id': sharejs_uuid})
return doc_item['_data'] if doc_item else ''
def broadcast_to_sharejs(action, sharejs_uuid, node=None, wiki_name='home', data=None):
"""
Broadcast an action to all documents connected to a wiki.
Actions include 'lock', 'unlock', 'redirect', and 'delete'
'redirect' and 'delete' both require a node to be specified
'unlock' requires data to be a list of contributors with write permission
"""
url = 'http://{host}:{port}/{action}/{id}/'.format(
host=wiki_settings.SHAREJS_HOST,
port=wiki_settings.SHAREJS_PORT,
action=action,
id=sharejs_uuid
)
if action == 'redirect' or action == 'delete':
redirect_url = quote(
node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True),
safe='',
)
url = os.path.join(url, redirect_url)
try:
requests.post(url, json=data)
except requests.ConnectionError:
pass # Assume sharejs is not online
def format_wiki_version(version, num_versions, allow_preview):
"""
:param str version: 'preview', 'current', 'previous', '1', '2', ...
:param int num_versions:
:param allow_preview: True if view, False if compare
"""
if not version:
return
if version.isdigit():
version = int(version)
if version > num_versions or version < 1:
raise InvalidVersionError
elif version == num_versions:
return 'current'
elif version == num_versions - 1:
return 'previous'
elif version != 'current' and version != 'previous':
if allow_preview and version == 'preview':
return version
raise InvalidVersionError
elif version == 'previous' and num_versions == 0:
raise InvalidVersionError
return version
def serialize_wiki_settings(user, nodes):
""" Format wiki data for project settings page
:param user: modular odm User object
:param nodes: list of parent project nodes
:return: treebeard-formatted data
"""
WikiPage = apps.get_model('addons_wiki.WikiPage')
items = []
for node in nodes:
assert node, '{} is not a valid Node.'.format(node._id)
can_read = node.has_permission(user, READ)
is_admin = node.has_permission(user, ADMIN)
include_wiki_settings = WikiPage.objects.include_wiki_settings(node)
if not include_wiki_settings:
continue
children = node.get_nodes(**{'is_deleted': False, 'is_node_link': False})
children_tree = []
wiki = node.get_addon('wiki')
if wiki:
children_tree.append({
'select': {
'title': 'permission',
'permission':
'public'
if wiki.is_publicly_editable
else 'private'
},
})
children_tree.extend(serialize_wiki_settings(user, children))
item = {
'node': {
'id': node._id,
'url': node.url if can_read else '',
'title': node.title if can_read else 'Private Project',
'is_public': node.is_public
},
'children': children_tree,
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, READ) else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
'admin': is_admin,
},
}
items.append(item)
return items
def serialize_wiki_widget(node):
from addons.wiki.models import WikiVersion
wiki = node.get_addon('wiki')
wiki_version = WikiVersion.objects.get_for_node(node, 'home')
# Show "Read more" link if there are multiple pages or has > 400 characters
more = node.wikis.filter(deleted__isnull=True).count() >= 2
MAX_DISPLAY_LENGTH = 400
rendered_before_update = False
if wiki_version and wiki_version.content:
if len(wiki_version.content) > MAX_DISPLAY_LENGTH:
more = True
rendered_before_update = wiki_version.rendered_before_update
# Content fetched and rendered by front-end
wiki_html = None
wiki_widget_data = {
'complete': True,
'wiki_content': wiki_html if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'rendered_before_update': rendered_before_update,
'more': more,
'include': False,
}
wiki_widget_data.update(wiki.config.to_json())
return wiki_widget_data
|
WebCampZg/conference-web
|
cfp/migrations/0005_auto_20150319_0019.py
|
Python
|
bsd-3-clause
| 767 | 0.002608 |
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('cfp', '0004_paperapplication_duration'),
]
operations = [
migrations.AlterField(
model_name='applicant',
nam
|
e='user',
field=models.OneToOneField(related_name='applicant', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AlterField(
model_name='paperapplication',
name='applicant',
field=models.ForeignKey(related_name='applications', to='cf
|
p.Applicant', on_delete=models.CASCADE),
preserve_default=True,
),
]
|
simleo/pydoop
|
examples/self_contained/vowelcount/__init__.py
|
Python
|
apache-2.0
| 850 | 0 |
# BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing
|
permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
A trivial MapReduce application that counts the occure
|
nce of each
vowel in a text input stream. It is more structured than would be
necessary because we want to test automatic distribution of a package
rather than a single module.
"""
|
catapult-project/catapult
|
third_party/gsutil/third_party/pyasn1/pyasn1/debug.py
|
Python
|
bsd-3-clause
| 3,361 | 0.000595 |
#
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import logging
from pyasn1 import __version__
from pyasn1 import error
from pyasn1.compat.octets import octs2ints
__all__ = ['Debug', 'setLogger', 'hexdump']
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'none': flagNone,
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Printer(object):
# noinspection PyShadowingNames
def __init__(self, logger=None, handler=None, formatter=None):
if logger is None:
logger = logging.getLogger('pyasn1')
logger.setLevel(logging.DEBUG)
if handler is None:
handler = logging.StreamHandler()
if formatter is None:
formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.__logger = logger
def __call__(self, msg):
self.__logger.debug(msg)
def __str__(self):
return '<python logging>'
if hasattr(logging, 'NullHandle
|
r'):
NullHandler = logging.NullHandler
else:
# Python 2.6 and older
class NullHandler(logging.Hand
|
ler):
def emit(self, record):
pass
class Debug(object):
defaultPrinter = Printer()
def __init__(self, *flags, **options):
self._flags = flagNone
if 'loggerName' in options:
# route our logs to parent logger
self._printer = Printer(
logger=logging.getLogger(options['loggerName']),
handler=NullHandler()
)
elif 'printer' in options:
self._printer = options.get('printer')
else:
self._printer = self.defaultPrinter
self._printer('running pyasn1 %s, debug flags %s' % (__version__, ', '.join(flags)))
for flag in flags:
inverse = flag and flag[0] in ('!', '~')
if inverse:
flag = flag[1:]
try:
if inverse:
self._flags &= ~flagMap[flag]
else:
self._flags |= flagMap[flag]
except KeyError:
raise error.PyAsn1Error('bad debug flag %s' % flag)
self._printer("debug category '%s' %s" % (flag, inverse and 'disabled' or 'enabled'))
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer(msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(userLogger):
global logger
if userLogger:
logger = userLogger
else:
logger = 0
def hexdump(octets):
return ' '.join(
['%s%.2X' % (n % 16 == 0 and ('\n%.5d: ' % n) or '', x)
for n, x in zip(range(len(octets)), octs2ints(octets))]
)
class Scope(object):
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
|
bobmyhill/burnman
|
examples/example_perplex.py
|
Python
|
gpl-2.0
| 2,092 | 0.000478 |
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_perplex
---------------
This minimal example demonstrates how burnman can be used
to read and interrogate a PerpleX tab file
(as produced by burnman/misc/create_burnman_readable_perplex_table.py
It also demonstrates how we can smooth a given property on a given P-T grid.
*Uses:*
* :doc:`PerplexMaterial`
* :func:`burnman.Material.evaluate`
* :func:`burnman.tools.math.smooth_array`
*Demonstrates:*
* Use of PerplexMaterial
* Smoothing gridded properties
"""
import numpy as np
import matplotlib.pyplot as plt
import burnman
from burnman.tools.math import smooth_array
if __name__ == "__main__":
rock = burnman.PerplexMaterial('../burnman/data/input_perplex/in23_1.tab')
P = 1.e9
T = 1650.
rock.set_state(P, T)
print('P: {0:.1f} GPa, T: {1:.1f} K, density: {2:.1f} kg/m^3'.format(P/1.e9, T, rock.rho))
pressures = np.linspace(10.e9, 25.e9, 151)
temperatures = [T] * len(pressures)
densities = rock.evaluate(['rho'], pressures, temperatures)[0]
plt.plot(pressures/1.e9, densities)
plt.xlabel('Pressure (GPa)')
plt.ylabel('Density (kg/m^3)')
plt.show()
pressures = np.linspace(10.e9, 2
|
5.e9, 151)
temperatures = np.linspace(1600., 1800., 3)
T = 1650.
entropies = rock.evaluate(['S'], pressures,
np.array([T] * len(pressures)))[0]
smoothed_entropies = smooth_array(array=entropies,
gr
|
id_spacing=np.array([pressures[1]
- pressures[0]]),
gaussian_rms_widths=np.array([5.e8]))
plt.plot(pressures/1.e9, entropies, label='entropies')
plt.plot(pressures/1.e9, smoothed_entropies, label='smoothed entropies')
plt.xlabel('Pressure (GPa)')
plt.ylabel('Entropy (J/K/mol)')
plt.legend(loc='upper right')
plt.show()
|
FrancoisRheaultUS/dipy
|
dipy/reconst/tests/test_ivim.py
|
Python
|
bsd-3-clause
| 19,029 | 0 |
"""
Testing the Intravoxel incoherent motion module
The values of the various parameters used in the tests are inspired by
the study of the IVIM model applied to MR images of the brain by
Federau, Christian, et al. [1].
References
----------
.. [1] Federau, Christian, et al. "Quantitative measurement
of brain perfusion with intravoxel incoherent motion
MR imaging." Radiology 265.3 (2012): 874-881.
"""
import warnings
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises, assert_array_less, run_module_suite,
assert_, assert_equal)
from dipy.testing import assert_greater_equal
import pytest
from dipy.reconst.ivim import ivim_prediction, IvimModel
from dipy.core.gradients import gradient_table, generate_bvecs
from dipy.sims.voxel import multi_tensor
from dipy.utils.optpkg import optional_package
cvxpy, have_cvxpy, _ = optional_package("cvxpy")
needs_cvxpy = pytest.mark.skipif(not have_cvxpy, reason="REQUIRES CVXPY")
def setup_module():
global gtab, ivim_fit_single, ivim_model_trr, data_single, params_trr, \
data_multi, ivim_params_trr, D_star, D, f, S0, gtab_with_multiple_b0, \
noisy_single, mevals, gtab_no_b0, ivim_fit_multi, ivim_model_VP, \
f_VP, D_star_VP, D_VP, params_VP
# Let us generate some data for testing.
bvals = np.array([0., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
N = len(bvals)
bvecs = generate_bvecs(N)
gtab = gradient_table(bvals, bvecs.T, b0_threshold=0)
S0, f, D_star, D = 1000.0, 0.132, 0.00885, 0.000921
# params for a single voxel
params_trr = np.array([S0, f, D_star, D])
mevals = np.array(([D_star, D_star, D_star], [D, D, D]))
# This gives an isotropic signal.
signal = multi_tensor(gtab, mevals, snr=None, S0=S0,
fractions=[f * 100, 100 * (1 - f)])
# Single voxel data
data_single = signal[0]
data_multi = np.zeros((2, 2, 1, len(gtab.bvals)))
data_multi[0, 0, 0] = data_multi[0, 1, 0] = data_multi[
1, 0, 0] = data_multi[1, 1, 0] = data_single
ivim_params_trr = np.zeros((2, 2, 1, 4))
ivim_params_trr[0, 0, 0] = ivim_params_trr[0, 1, 0] = params_trr
ivim_params_trr[1, 0, 0] = ivim_params_trr[1, 1, 0] = params_trr
ivim_model_trr = IvimModel(gtab, fit_method='trr')
ivim_model_one_stage = IvimModel(gtab, fit_method='trr')
ivim_fit_single = ivim_model_trr.fit(data_single)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
ivim_model_one_stage.fit(data_single)
ivim_model_one_stage.fit(data_multi)
bvals_no_b0 = np.array([5., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
_ = generate_bvecs(N) # bvecs_no_b0
gtab_no_b0 = gradient_table(bvals_no_b0, bvecs.T, b0_threshold=0)
bvals_with_multiple_b0 = np.array([0., 0., 0., 0., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300.,
400., 500., 600., 700., 800., 900.,
1000.])
bvecs_with_multiple_b0 = generate_bvecs(N)
gtab_with_multiple_b0 = gradient_table(bvals_with_multiple_b0,
bvecs_with_multiple_b0.T,
b0_threshold=0)
noisy_single = np.array([4243.71728516, 4317.81298828, 4244.35693359,
4439.36816406, 4420.06201172, 4152.30078125,
4114.34912109, 4104.59375, 4151.61914062,
4003.58374023, 4013.68408203, 3906.39428711,
3909.06079102, 3495.27197266, 3402.57006836,
3163.10180664, 2896.04003906, 2663.7253418,
2614.87695312, 2316.55371094, 2267.7722168])
noisy_multi = np.zeros((2, 2, 1, len(gtab.bvals)))
noisy_multi[0, 1, 0] = noisy_multi[
1, 0, 0] = noisy_multi[1, 1, 0] = noisy_single
noisy_multi[0, 0, 0] = data_single
ivim_model_VP = IvimModel(gtab, fit_method='VarPro')
f_VP, D_star_VP, D_VP = 0.13, 0.0088, 0.000921
# params for a single voxel
params_VP = np.array([f, D_star, D])
ivim_params_VP = np.zeros((2, 2, 1, 3))
ivim_params_VP[0, 0, 0] = ivim_params_VP[0, 1, 0] = params_VP
ivim_params_VP[1, 0, 0] = ivim_params_VP[1, 1, 0] = params_VP
def single_exponential(S0, D, bvals):
return S0 * np.exp(-bvals * D)
def test_single_voxel_fit():
"""
Test the implementation of the fitting for a single voxel.
Here, we will use the multi_tensor function to generate a
bi-exponential signal. The multi_tensor generates a multi
tensor signal and expects eigenvalues of each tensor in mevals.
Our basic test requires a scalar signal isotropic signal and
hence we set the same eigenvalue in all three directions to
generate the required signal.
The bvals, f, D_star and D are inspired from the paper by
Federau, Christian, et al. We use the function "generate_bvecs"
to simulate bvectors corresponding to the bvalues.
In the two stage fitting routine, initially we fit the signal
values for bvals less than the specified split_b using the
TensorModel and get an intial guess for f and D. Then, using
these parameters we fit the entire data for all bvalues.
"""
est_signal = ivim_prediction(ivim_fit_single.model_params, gtab)
assert_array_equal(est_signal.shape, data_single.shape)
assert_array_almost_equal(ivim_fit_single.model_params, params_trr)
assert_array_almost_equal(est_signal, data_single)
# Test predict function for single voxel
p = ivim_fit_single.predict(gtab)
assert_array_equal(p.shape, data_single.shape)
assert_array_almost_equal(p, data_single)
def test_multivoxel():
"""Test fitting with multivoxel data.
We generate a multivoxel signal to test the fit
|
ting for multivoxel data.
This is to ensure that the fitting routine takes care of signals packed as
1D, 2D or 3D arrays.
"""
|
ivim_fit_multi = ivim_model_trr.fit(data_multi)
est_signal = ivim_fit_multi.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(ivim_fit_multi.model_params, ivim_params_trr)
assert_array_almost_equal(est_signal, data_multi)
def test_ivim_errors():
"""
Test if errors raised in the module are working correctly.
Scipy introduced bounded least squares fitting in the version 0.17
and is not supported by the older versions. Initializing an IvimModel
with bounds for older Scipy versions should raise an error.
"""
ivim_model_trr = IvimModel(gtab, bounds=([0., 0., 0., 0.],
[np.inf, 1., 1., 1.]),
fit_method='trr')
ivim_fit = ivim_model_trr.fit(data_multi)
est_signal = ivim_fit.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(ivim_fit.model_params, ivim_params_trr)
assert_array_almost_equal(est_signal, data_multi)
def test_mask():
"""
Test whether setting incorrect mask raises and error
"""
mask_correct = data_multi[..., 0] > 0.2
mask_not_correct = np.array([[False, True, False], [True, False]],
dtype=np.bool)
ivim_fit = ivim_model_trr.fit(data_multi, mask_correct)
est_signal = ivim_fit.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(est_signal, data_multi)
assert_array_almost_equal(ivim_fit.model_params, ivim_params_trr)
assert_raises(ValueError, ivim_model_trr.fit, data_multi,
mask=mask_not_correct)
def test_with_higher_S0():
"""
Test whether fitting works for S0 > 1.
"""
# params for a single voxel
S0_2 = 1000.
pa
|
SalesforceFoundation/CumulusCI
|
cumulusci/core/source/github.py
|
Python
|
bsd-3-clause
| 4,708 | 0.000637 |
import os
import shutil
from cumulusci.core.exceptions import DependencyResolutionError
from cumulusci.core.github import get_github_api_for_repo
from cumulusci.core.github import find_latest_release
from cumulusci.core.github import find_previous_release
from cumulusci.utils import download_extract_github
class GitHubSource:
def __init__(self, project_config, spec):
self.project_config = project_config
self.spec = spec
self.url = spec["github"]
if self.url.endswith(".git"):
self.url = self.url[:-4]
repo_owner, repo_name = self.url.split("/")[-2:]
self.repo_owner = repo_owner
self.repo_name = repo_name
self.gh = get_github_api_for_repo(
project_config.keychain, repo_owner, repo_name
)
self.repo = self.gh.repository(self.repo_owner, self.repo_name)
self.resolve()
def __repr__(self):
return f"<GitHubSource {str(self)}>"
def __str__(self):
s = f"GitHub: {self.repo_owner}/{self.repo_name}"
if self.description:
s += f" @ {self.description}"
if self.commit != self.description:
s += f" ({self.commit})"
return s
def __hash__(self):
return hash((self.url, self.commit))
def resolve(self):
"""Resolve a github source into a specific commit.
The spec must include:
- github: the URL of the github repository
The spec
|
may include one of:
- commit: a commit hash
- ref: a git ref
- branch: a git branch
- tag: a git tag
- release: "latest" | "previous" | "latest_beta"
If none of these are specified, CumulusCI will look for the latest release.
If there is no release, it will use the default branch.
|
"""
ref = None
if "commit" in self.spec:
self.commit = self.description = self.spec["commit"]
return
elif "ref" in self.spec:
ref = self.spec["ref"]
elif "tag" in self.spec:
ref = "tags/" + self.spec["tag"]
elif "branch" in self.spec:
ref = "heads/" + self.spec["branch"]
elif "release" in self.spec:
release_spec = self.spec["release"]
if release_spec == "latest":
release = find_latest_release(self.repo, include_beta=False)
elif release_spec == "latest_beta":
release = find_latest_release(self.repo, include_beta=True)
elif release_spec == "previous":
release = find_previous_release(self.repo)
else:
raise DependencyResolutionError(f"Unknown release: {release_spec}")
if release is None:
raise DependencyResolutionError(
f"Could not find release: {release_spec}"
)
ref = "tags/" + release.tag_name
if ref is None:
release = find_latest_release(self.repo, include_beta=False)
if release:
ref = "tags/" + release.tag_name
else:
ref = "heads/" + self.repo.default_branch
self.description = ref[6:] if ref.startswith("heads/") else ref
self.commit = self.repo.ref(ref).object.sha
def fetch(self, path=None):
"""Fetch the archive of the specified commit and construct its project config."""
# To do: copy this from a shared cache
if path is None:
path = (
self.project_config.cache_dir
/ "projects"
/ self.repo_name
/ self.commit
)
if not path.exists():
path.mkdir(parents=True)
zf = download_extract_github(
self.gh, self.repo_owner, self.repo_name, ref=self.commit
)
try:
zf.extractall(path)
except Exception:
# make sure we don't leave an incomplete cache
shutil.rmtree(path)
raise
assert path.is_dir()
project_config = self.project_config.construct_subproject_config(
repo_info={
"root": os.path.realpath(path),
"owner": self.repo_owner,
"name": self.repo_name,
"url": self.url,
"commit": self.commit,
}
)
return project_config
@property
def frozenspec(self):
"""Return a spec to reconstruct this source at the current commit"""
return {
"github": self.url,
"commit": self.commit,
"description": self.description,
}
|
doerlbh/Indie-nextflu
|
augur/scratch/fitness_nonepitope.py
|
Python
|
agpl-3.0
| 2,409 | 0.027397 |
# assign epitope fitness to each node in the phylogeny
import time
from io_util import *
from tree_util import *
from date_util import *
from seq_util import *
import numpy as np
from itertools import izip
from collections im
|
port defaultdict
def append_nonepitope_sites(viruses):
for virus in viruses:
sites_ne = nonepitope_sites(virus['seq'])
virus['sites_ne'] = sites_ne
def remove_nonepitope_sites(viruses):
for virus in viruses:
virus.pop("sites_ne", None)
def remove_no
|
nepitope_distances(viruses):
for virus in viruses:
virus.pop("distance_ne", None)
def most_frequent(char_list):
d = defaultdict(int)
for i in char_list:
d[i] += 1
return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)[0][0]
def consensus_nonepitope(viruses):
"""Return consensus non-epitope sequence"""
consensus = ""
length = len(viruses[0]['sites_ne'])
for i in range(0, length):
column = [v['sites_ne'][i] for v in viruses]
consensus += most_frequent(column)
return consensus
def distance_to_consensus(virus, consensus_ne):
"""Return distance of virusA to virusB by comparing non-epitope sites"""
virus_ne = virus['sites_ne']
ne_distance = sum(a != b for a, b in izip(virus_ne, consensus_ne))
return ne_distance
def compute(viruses):
"""Append non-epitope distances to each virus"""
print "Computing epitope distances"
consensus = consensus_nonepitope(viruses)
for virus in viruses:
distance = distance_to_consensus(virus, consensus)
virus['distance_ne'] = distance
print virus['strain'] + ": " + str(virus['distance_ne'])
def normalize(viruses):
"""Normalizing non-epitope distances to give non-epitope fitness"""
print "Normalizing non-epitope distances"
distances = [v['distance_ne'] for v in viruses]
mean = np.mean(distances)
sd = np.std(distances)
for virus in viruses:
virus['fitness_ne'] = -1 * ( ( virus['distance_ne'] - mean) / sd )
print virus['strain'] + ": " + str(virus['fitness_ne'])
def main(in_fname = None):
print "--- Non-epitope fitness at " + time.strftime("%H:%M:%S") + " ---"
if in_fname is None: in_fname='data/virus_epitope.json'
viruses = read_json(in_fname)
append_nonepitope_sites(viruses)
compute(viruses)
# normalize(viruses)
remove_nonepitope_sites(viruses)
# remove_nonepitope_distances(viruses)
out_fname = "data/virus_nonepitope.json"
write_json(viruses, out_fname)
return out_fname
if __name__ == "__main__":
main()
|
diego-d5000/MisValesMd
|
env/lib/python2.7/site-packages/django/contrib/gis/geos/base.py
|
Python
|
mit
| 1,714 | 0.000583 |
from ctypes import c_void_p
from django.contrib.gis.ge
|
os.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
|
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
|
lanpa/tensorboardX
|
tests/test_record_writer.py
|
Python
|
mit
| 1,257 | 0.000796 |
from tensorboardX import SummaryWriter
import unittest
from tensorboardX.record_writer import S3RecordWriter, make_valid_tf_name, GCSRecordWriter
import os
import boto3
from moto import mock_s3
os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
class RecordWriterTest(unittest.TestCase):
@mock_s3
def test_record_writer_s3(self):
client = boto3.client('s3', region_name='us-east-1')
client.create_bucket(Bucket='this')
writer = S3RecordWriter('s3://this/is/apen')
bucket, path = writer.bucket_and_path()
assert bucket == 'this'
assert path == 'is/apen'
writer.write(bytes(42))
writer.flush()
def test_make_valid_tf_name(self):
newname = ma
|
ke_valid_tf_name('$ave/&sound')
assert newname == '._ave/_sound'
def test_record_writer_gcs(self
|
):
pass
# we don't have mock test, so expect error here. However,
# Travis CI env won't raise exception for the following code,
# so I commented it out.
# with self.assertRaises(Exception):
# writer = GCSRecordWriter('gs://this/is/apen')
# writer.write(bytes(42))
# writer.flush()
|
hpleva/pyemto
|
pyemto/system.py
|
Python
|
mit
| 148,431 | 0.003099 |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 3 14:25:06 2014
@author: Matti Ropo
@author: Henrik Levämäki
"""
from __future__ import print_function
import time
import os
import sys
import numpy as np
import pyemto.common.common as common
class System:
"""The main class which provides the basis for the pyEMTO scripts.
Somewhere in the beginning of a pyEMTO script a new instance of
the system class should be created. All subsequent communication
with the newly created system should be through the class methods,
which are described below.
:param folder: Main folder where the input and output files will
be stored. Use of absolute paths is recommended
(Default value = current working directory)
:type folder: str
:param EMTOdir: Path to the folder of the EMTO installation.
This entry can and should be modified by the user
inside the System.__init__ function
(Default value = /home/user/EMTO5.8)
:type EMTOdir: str
:param xc: Choice for the xc-functional can be set here.
(Default value = PBE)
:type xc: str
:returns: None
:rtype: None
"""
def __init__(self, folder=None, EMTOdir=None, xc=None):
# Import necessary packages
from pyemto.latticeinputs.latticeinputs import Latticeinputs
from pyemto.emtoinputs.emtoinputs import Emtoinputs
# Check input arguments
if folder is None:
self.folder = os.getcwd() # Use current folder
else:
self.folder = folder
if EMTOdir is None:
self.EMTOdir = "/home/hpleva/EMTO5.8"
else:
self.EMTOdir = EMTOdir
# Initialize default parameters
self.ca_range_default = np.linspace(1.50, 1.70, 7)
self.elastic_constants_points = 6
self.elastic_constants_deltas = np.linspace(0.0, 0.05,
self.elastic_constants_points)
self.RyBohr3_to_GPa = 14710.5065722
self.kappaw_default = [0.0, -20.0]
self.hcpo_relax_points = 5
self.hcpm_relax_points = 5
if xc is None:
self.xc = 'PBE'
else:
self.xc = xc
# Create working folders
common.check_folders(self.folder + '/kgrn', self.folder + '/kgrn/tmp',
self.folder + '/kfcd', self.folder + '/fit')
# BMDL, KSTR, SHAPE, KGRN and KFCD class instances
self.lattice = Latticeinputs()
self.emto = Emtoinputs()
return
def bulk(self, jobname=None, lat=None, atoms=None, concs=None, splts=None, sws=None,
latname=None, latpath=None, emtopath=None, ibz=None, bmod=None, xc=None, ca=None,
**kwargs):
"""Initializes the basic parameters for bulk systems.
Basic information concerning the system,
such as the types of atoms and the crystal structure should be given to this function and
it should be called right after the class instance has been created.
:param jobname: Name of the system (Default value = None)
:type jobname:
:param lat: The type of lattice structure (Default value = None)
:type lat:
:param atoms: List of atoms in the system (Default value = None)
:type atoms:
:param concs: List of concentrations of the elements in the
'atoms' list. This information is only used in CPA
calculations (Default value = None)
:type concs:
:param splts: List of initial magnetic moments of the elements in the
'atoms' list (Default value = None)
:type splts:
:param sws: The Wigner-Seitz radius of the system (Default value = None)
:type sws: float
:param latname: The 'jobname' of the BMDL, KSTR and SHAPE output files. These
structure output files have to be located in the 'latpath'
directory and they have to be named jobname.extention
(Default value = None)
:type latname:
:param latpath: The absolute path to the folder where the 'bmdl', 'kstr' and 'shape'
folders are located, which in turn contain the output files of the
structure calculation (Default value = None)
:type latpath:
:param emtopath: The absolute path to the folder where the EMTO installation is
located (Default value = None)
:type emtopath:
:param ibz: The code number indicating the Bravais lattice that the crystal
structure of the system has. For a list of possible values, please consult the
EMTO manual (Default value = None)
:type ibz:
:param bmod: The bulk modulus can be inputed here and if it is given,
it will be used by the elastic modulus routines (Default value = None)
:type bmod:
:param xc: The choice of the xc-functional. If None, PBE will be used as default
(Default value = None)
:type xc:
:param ca: The c/a ratio of hcp structures can be inputed here and if it is given,
it will be used by the elastic modulus routines (Default value = None)
:type ca:
:param **kwargs: Arbitrary other KGRN and KFCD input parameters can be given here
as keyword arguments. They will be passed down to the
self.emto.set_values() functio
|
n
:type **kwargs: str,int,float,list(str),list(int),list(float)
:returns: None
:rtype: None
"""
if lat is None:
|
sys.exit('System.bulk(): \'lat\' has to be given!')
else:
self.lat = lat
if latname is None:
self.latname = self.lat
else:
self.latname = latname
if latpath is None:
self.latpath = "./"
else:
self.latpath = latpath
if emtopath is None:
self.emtopath = self.folder
else:
self.emtopath = emtopath
if atoms is None:
sys.exit('System.bulk(): \'atoms\' has to be given!')
else:
self.atoms = atoms
if concs is None:
# Assume equal concentrations for each element
self.concs = np.zeros(len(atoms))
self.concs[:] = 1.0 / float(len(atoms))
else:
self.concs = concs
if splts is None:
self.splts = np.zeros(len(atoms))
else:
self.splts = np.asarray(splts)
if sws is None:
self.sws = 0.0
#sys.exit('System.bulk(): \'sws\' has to be given!')
else:
self.sws = sws
if jobname is None:
self.jobname, self.fulljobname = self.create_jobname()
else:
self.jobname = jobname
self.fulljobname = self.create_jobname(jobname)
if ibz is None:
self.ibz = common.lat_to_ibz(self.lat)
else:
self.ibz = ibz
# Knowledge of the c/a lattice parameter for hcp systems
if ca is not None:
self.ca = ca
else:
self.ca = None
# Knowledge of the xc-functional we want to use
if xc is None:
self.xc = 'PBE'
else:
self.xc = xc
# Knowledge of the value of the bulk modulus, which
# is mainly needed in the elastic constant functions
self.bmod = bmod
# hcp requires that we "double" the atoms array and
# create a non-trivial iqs-array because hcp has a
# non-trivial two-atom basis.
if self.lat == 'hcp':
self.atoms = np.array([self.atoms, self.atoms]).flatten()
if concs is None:
self.concs = np.zeros(len(self.atoms))
self.concs[:] = 2.0 / float(len(self.atoms))
else:
self.concs = np.array([self.concs, self.concs]).flatten()
self.iqs = n
|
jefftc/changlab
|
Betsy/Betsy/modules/plot_prediction.py
|
Python
|
mit
| 2,613 | 0.003827 |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import mplgraph
from genomicode import filelib
in_data = antecedents
matrix = [x for x in filelib.read_cols(in_data.identifier)]
header = matrix[0]
index = header.index('Confidence')
matrix = matrix[1:]
confidence = [float(i[index]) for i in matrix]
sample = [i[0] for i in matrix]
if confidence == [''] * len(matrix) or 'Correct?' in header:
index = header.inde
|
x('Predicted_class')
class_value = [i[index] for i in matrix]
label_dict = dict()
label_list = []
i = -1
for label in class_value:
if
|
label not in label_dict.keys():
i = i + 1
label_dict[label] = i
label_list.append(label_dict[label])
yticks = label_dict.keys()
ytick_pos = [label_dict[i] for i in label_dict.keys()]
fig = mplgraph.barplot(label_list,
box_label=sample,
ylim=(-0.5, 1.5),
ytick_pos=ytick_pos,
yticks=yticks,
xtick_rotation='vertical',
ylabel='Prediction',
xlabel='Sample')
fig.savefig(outfile)
else:
fig = mplgraph.barplot(confidence,
box_label=sample,
ylim=(-1.5, 1.5),
xtick_rotation='vertical',
ylabel='Prediction',
xlabel='Sample')
fig.savefig(outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for plot_prediction_bar fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
loocv = ''
if antecedents.data.attributes['loocv'] == 'yes':
loocv = 'loocv'
filename = ('prediction_' + original_file + '_' +
antecedents.data.attributes['classify_alg'] + loocv + '.png')
return filename
|
iulian787/spack
|
var/spack/repos/builtin/packages/perl-config-general/package.py
|
Python
|
lgpl-2.1
| 557 | 0.005386 |
# Copyright 2013-2020 Lawrence Livermo
|
re National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlConfigGeneral(PerlPackage):
"""Config::General - Generic Config Module"""
homepage = "https://metacpan.org/pod/Config::General"
url = "https://cpan.metacpan.org/authors/id/T/TL/TLINDEN/Config-General-2.63.tar.gz"
version('2.63', sha256='0a9bf977b8
|
aabe76343e88095d2296c8a422410fd2a05a1901f2b20e2e1f6fad')
|
david2777/DavidsTools
|
Standalone/openPathTool/UI_openPathTool.py
|
Python
|
gpl-2.0
| 3,656 | 0.003282 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'openPathTool.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(457, 95)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.formLayout = QtGui.QFormLayout(self.centralwidget)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.pathInLineEdit = QtGui.QLineEdit(self.centralwidget)
self.pathInLineEdit.setObjectNam
|
e(_fromUtf8("pathInLineEdit"))
self.formLayout.setWidget(0, QtGui.QFormLayout.SpanningRole, self.pathInLineEdit)
self.pathOutLineEdit = QtGui.QLineEdit(self.centralwidget)
self.pathOutLi
|
neEdit.setReadOnly(True)
self.pathOutLineEdit.setObjectName(_fromUtf8("pathOutLineEdit"))
self.formLayout.setWidget(1, QtGui.QFormLayout.SpanningRole, self.pathOutLineEdit)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setObjectName(_fromUtf8("buttonLayout"))
self.explorerButton = QtGui.QPushButton(self.centralwidget)
self.explorerButton.setObjectName(_fromUtf8("explorerButton"))
self.buttonLayout.addWidget(self.explorerButton)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem)
self.convertButton = QtGui.QPushButton(self.centralwidget)
self.convertButton.setObjectName(_fromUtf8("convertButton"))
self.buttonLayout.addWidget(self.convertButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.buttonLayout.addItem(spacerItem1)
self.closeButton = QtGui.QPushButton(self.centralwidget)
self.closeButton.setObjectName(_fromUtf8("closeButton"))
self.buttonLayout.addWidget(self.closeButton)
self.formLayout.setLayout(2, QtGui.QFormLayout.SpanningRole, self.buttonLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.pathInLineEdit.setPlaceholderText(_translate("MainWindow", "Input Path", None))
self.pathOutLineEdit.setPlaceholderText(_translate("MainWindow", "Output Path", None))
self.explorerButton.setText(_translate("MainWindow", "Open In Explorer", None))
self.convertButton.setText(_translate("MainWindow", "Convert", None))
self.closeButton.setText(_translate("MainWindow", "Close", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
tensorflow/probability
|
tensorflow_probability/python/sts/components/autoregressive_test.py
|
Python
|
apache-2.0
| 6,809 | 0.004112 |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Autoregressive State Space Model Tests."""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import AutoregressiveStateSpaceModel
from tensorflow_probability.python.sts import LocalLevelStateSpaceModel
def ar_explicit_logp(y, coefs, level_scale):
"""Manual log-prob computation for an autoregressive process."""
num_coefs = len(coefs)
lp = 0.
# For the first few steps of y, where previous values
# are not available, we model them as zero-mean with
# stddev `prior_scale`.
for i in range(num_coefs):
zero_padded_y = np.zeros([num_coefs])
zero_padded_y[num_coefs - i:num_coefs] = y[:i]
pred_y = np.dot(zero_padded_y, coefs[::-1])
lp += tfd.Normal(pred_y, level_scale).log_prob(y[i])
for i in range(num_coefs, len(y)):
pred_y = np.dot(y[i - num_coefs:i], coefs[::-1])
lp += tfd.Normal(pred_y, level_scale).log_prob(y[i])
return lp
class _AutoregressiveStateSpaceModelTest(test_util.TestCase):
def testEqualsLocalLevel(self):
# An AR1 process with coef 1 is just a random walk, equivalent to a local
# level model. Test that both models define the same distribution
# (log-prob).
num_timesteps = 10
observed_time_series = self._build_placeholder(
np.random.randn(num_timesteps, 1))
level_scale = self._build_placeholder(0.1)
# We'll test an AR1 process, and also (just for kicks) that the trivial
# embedding as an AR2 process gives the same model.
coefficients_order1 = np.array([1.]).astype(self.dtype)
coefficients_order2 = np.array([1., 0.]).astype(self.dtype)
ar1_ssm = Autoreg
|
ressiveState
|
SpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order1,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar2_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order2,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
local_level_ssm = LocalLevelStateSpaceModel(
num_timesteps=num_timesteps,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar1_lp, ar2_lp, ll_lp = self.evaluate(
(ar1_ssm.log_prob(observed_time_series),
ar2_ssm.log_prob(observed_time_series),
local_level_ssm.log_prob(observed_time_series)))
self.assertAllClose(ar1_lp, ll_lp)
self.assertAllClose(ar2_lp, ll_lp)
def testLogprobCorrectness(self):
# Compare the state-space model's log-prob to an explicit implementation.
num_timesteps = 10
observed_time_series_ = np.random.randn(num_timesteps)
coefficients_ = np.array([.7, -.1]).astype(self.dtype)
level_scale_ = 1.0
observed_time_series = self._build_placeholder(observed_time_series_)
level_scale = self._build_placeholder(level_scale_)
expected_logp = ar_explicit_logp(
observed_time_series_, coefficients_, level_scale_)
ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 0.]))
lp = ssm.log_prob(observed_time_series[..., tf.newaxis])
self.assertAllClose(self.evaluate(lp), expected_logp)
def testBatchShape(self):
seed = test_util.test_seed(sampler_type='stateless')
# Check that the model builds with batches of parameters.
order = 3
batch_shape = [4, 2]
# No `_build_placeholder`, because coefficients must have static shape.
coefficients = np.random.randn(*(batch_shape + [order])).astype(self.dtype)
level_scale = self._build_placeholder(
np.exp(np.random.randn(*batch_shape)))
ssm = AutoregressiveStateSpaceModel(
num_timesteps=10,
coefficients=coefficients,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=self._build_placeholder(np.ones([order]))))
if self.use_static_shape:
self.assertAllEqual(
tensorshape_util.as_list(ssm.batch_shape), batch_shape)
else:
self.assertAllEqual(self.evaluate(ssm.batch_shape_tensor()), batch_shape)
y = ssm.sample(seed=seed)
if self.use_static_shape:
self.assertAllEqual(tensorshape_util.as_list(y.shape)[:-2], batch_shape)
else:
self.assertAllEqual(self.evaluate(tf.shape(y))[:-2], batch_shape)
def _build_placeholder(self, ndarray):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestStaticShape32(
_AutoregressiveStateSpaceModelTest):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestDynamicShape32(
_AutoregressiveStateSpaceModelTest):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class AutoregressiveStateSpaceModelTestStaticShape64(
_AutoregressiveStateSpaceModelTest):
dtype = np.float64
use_static_shape = True
del _AutoregressiveStateSpaceModelTest # Don't run tests for the base class.
if __name__ == '__main__':
test_util.main()
|
aseciwa/independent-study
|
scripts/stopWords.py
|
Python
|
mit
| 971 | 0.008239 |
# Removing stop words
# What to do with the Retweets (RT)?
# Make adjust so that the # and @ are attached to their associated word (i.e. #GOP, @twitter)
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import sys
def remove_stopwords(tweets):
with open(tweets, 'r', buffering=1028) as read_tweet:
for tweet in read_tweet:
#Use stop word method
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(tweet)
filtered_tweet = []
for word in word_tokens:
if word not in stop_words:
# Capture only words not listed in stop_word txt
filtered_tweet.append(word)
print(filtered_tweet)
def main():
t
|
weets = "/Users/alanseciwa/Desktop/Independent_Study/Sep16-GOP-TweetsONLY/clean_data-TWEETONLY.csv"
remove_stopwords(tweets)
if __name__ == '__main__':
|
main()
sys.exit()
|
TexasLAN/texaslan.org
|
config/wsgi.py
|
Python
|
mit
| 1,450 | 0 |
"""
WSGI config for Texas LAN Web project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense t
|
o replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or
|
combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
bminchew/PySAR
|
pysar/polsar/decomp.py
|
Python
|
gpl-3.0
| 6,649 | 0.025417 |
"""
PySAR
Polarimetric SAR decomposition
Contents
--------
decomp_fd(hhhh,vvvv,hvhv,hhvv,numthrd=None) : Freeman-Durden 3-component decomposition
"""
from __future__ import print_function, division
import sys,os
import numpy as np
###===========================================================================================
def decomp_fd(hhhh,vvvv,hvhv,hhvv,null=None,numthrd=None,maxthrd=8):
"""
Freeman-Durden 3-component decomposition
Parameters
----------
hhhh : ndarray
horizontally polarized power
vvvv : ndarray
vertically polarized power
hvhv : ndarray
cross-polarized power
hhvv : ndarray
co-polarized cross product (complex-valued)
null : float or None
null value to exclude from decomposition
numthrd : int or None
number of pthreads; None sets numthrd based on the data array size [None]
maxthrd : int or None
maximum allowable numthrd [8]
Returns
-------
ps : ndarray
surface-scattered power
pd : ndarray
double-bounce power
pv : ndarray
volume-scattered power
Notes
-----
* arrays are returned with the same type as hhhh data
Reference
---------
1. Freeman, A. and Durden, S., "A three-component scattering model for polarimetric SAR data", *IEEE Trans. Geosci. Remote Sensing*, vol. 36, no. 3, pp. 963-973, May 1998.
"""
from pysar.polsar._decomp_modc import free_durden
if not numthrd:
numthrd = np.max([len(hhhh)//1e5, 1])
if numthrd > maxthrd: numthrd = maxthrd
elif numthrd < 1:
raise ValueError('numthrd must be >= 1')
if null:
nullmask = np.abs(hhhh-null) < 1.e-7
nullmask += np.abs(vvvv-null) < 1.e-7
nullmask += np.abs(hvhv-null) < 1.e-7
nullmask += np.abs(hhvv-null) < 1.e-7
hhvv[nullmask] = 0.
hhhhtype = None
if hhhh.dtype != np.float32:
hhhhtype = hhhh.dtype
hhhh = hhhh.astype(np.float32)
vvvv = vvvv.astype(np.float32)
hvhv = hvhv.astype(np.float32)
hhvv = hhvv.astype(np.complex64)
if not all({2-x for x in [hhhh.ndim, vvvv.ndim, hvhv.ndim, hhvv.ndim]}):
hhhh, vvvv = hhhh.flatten(), vvvv.flatten()
hvhv, hhvv = hvhv.flatten(), hhvv.flatten()
P = free_durden(hhhh, vvvv, hvhv, hhvv, numthrd)
if hhhhtype: P = P.astype(hhhhtype)
P = P.reshape(3,-1)
if null: P[0,nullmask], P[1,nullmask], P[2,nullmask] = null, null, null
return P[0,:], P[1,:], P[2,:]
###---------------------------------------------------------------------------------
def decomp_haa(hhhh,vvvv,hvhv,hhhv,hhvv,hvvv,matform='C',null=None,numthrd=None,maxthrd=8):
"""
Cloude-Pottier H/A/alpha polarimetric decomposition
Parameters
----------
hhhh : ndarray
horizontal co-polarized power (or 0.5|HH + VV|^2 if matform = 'T')
vvvv : ndarray
vertical co-polarized power (or 0.5|HH - VV|^2 i
|
f matform = 'T')
hvhv : ndarray
cross-polarized power (2|HV|^2 for matform = 'T')
hhhv : ndarray
HH.HV* cross-product (or 0.5(HH+VV)(HH-VV)* for matform = 'T')
hhvv : ndarray
HH.VV* cross
|
-product (or HV(HH+VV)* for matform = 'T')
hvvv : ndarray
HV.VV* cross-product (or HV(HH-VV)* for matform = 'T')
matform : str {'C' or 'T'}
form of input matrix entries: 'C' for covariance matrix and
'T' for coherency matrix ['C'] (see ref. 1)
null : float or None
null value to exclude from decomposition
numthrd : int or None
number of pthreads; None sets numthrd based on the data array size [None]
maxthrd : int or None
maximum allowable numthrd [8]
Returns
-------
H : ndarray
entropy (H = -(p1*log_3(p1) + p2*log_3(p2) + p3*log_3(p3))
where pi = lam_i/(hhhh+vvvv+hvhv)) and lam is an eigenvalue
A : ndarray
anisotropy (A = (lam_2-lam_3)/(lam_2+lam_3) --> lam_1 >= lam_2 >= lam_3
alpha : ndarray
alpha angle in degrees (see ref. 1)
Notes
-----
* arrays are returned with the same type as hhhh data
* if covariance matrix form is used, do not multiply entries by any constants
Reference
---------
1. Cloude, S. and Pottier, E., "An entropy based classification scheme for land applications of polarimetric SAR", *IEEE Trans. Geosci. Remote Sensing*, vol. 35, no. 1, pp. 68-78, Jan. 1997.
"""
from pysar.polsar._decomp_modc import cloude_pot
if matform == 'C' or matform == 'c':
mtf = 1
elif matform == 'T' or matform == 't':
mtf = 0
else:
raise ValueError("matform must be 'C' or 'T'")
if not numthrd:
numthrd = np.max([len(hhhh)//1e5, 1])
if numthrd > maxthrd: numthrd = maxthrd
elif numthrd < 1:
raise ValueError('numthrd must be >= 1')
if null:
nullmask = np.abs(hhhh-null) < 1.e-7
nullmask += np.abs(vvvv-null) < 1.e-7
nullmask += np.abs(hvhv-null) < 1.e-7
nullmask += np.abs(hhhv-null) < 1.e-7
nullmask += np.abs(hhvv-null) < 1.e-7
nullmask += np.abs(hvvv-null) < 1.e-7
hhhh[nullmask], vvvv[nullmask] = 0., 0.
hvhv[nullmask] = 0.
hhhhtype = None
if hhhh.dtype != np.float32:
hhhhtype = hhhh.dtype
hhhh = hhhh.astype(np.float32)
vvvv = vvvv.astype(np.float32)
hvhv = hvhv.astype(np.float32)
hhhv = hhhv.astype(np.complex64)
hhvv = hhvv.astype(np.complex64)
hvvv = hvvv.astype(np.complex64)
if not all({2-x for x in [hhhh.ndim, vvvv.ndim, hvhv.ndim, hhhv.ndim, hhvv.ndim, hvvv.ndim]}):
hhhh, vvvv = hhhh.flatten(), vvvv.flatten()
hvhv, hhvv = hvhv.flatten(), hhvv.flatten()
hhhv, hvvv = hhhv.flatten(), hvvv.flatten()
P = cloude_pot(hhhh, vvvv, hvhv, hhhv, hhvv, hvvv, mtf, numthrd)
if hhhhtype: P = P.astype(hhhhtype)
P = P.reshape(3,-1)
if null: P[0,nullmask], P[1,nullmask], P[2,nullmask] = null, null, null
return P[0,:], P[1,:], P[2,:]
def decomp_cp(hhhh,vvvv,hvhv,hhhv,hhvv,hvvv,matform='C',null=None,numthrd=None,maxthrd=8):
__doc__ = decomp_haa.__doc__
return decomp_haa(hhhh=hhhh,vvvv=vvvv,hvhv=hvhv,hhhv=hhhv,hhvv=hhvv,hvvv=hvvv,
matform=matform,null=null,numthrd=numthrd,maxthrd=maxthrd)
|
edsuom/sAsync
|
sasync/test/test_items.py
|
Python
|
apache-2.0
| 12,595 | 0.006431 |
# sAsync:
# An enhancement to the SQLAlchemy package that provides persistent
# item-value stores, arrays, and dictionaries, and an access broker for
# conveniently managing database access, table setup, and
# transactions. Everything can be run in an asynchronous fashion using
# the Twisted framework and its deferred processing capabilities.
#
# Copyright (C) 2006, 2015 by Edwin A. Suominen, http://edsuom.com
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Unit tests for sasync.items.py.
"""
from twisted.internet.defer import Deferred, DeferredList
from sqlalchemy import *
from sasync.database import transact, AccessBroker
import sasync.items as items
from sasync.test.testbase import MockThing, TestCase
GROUP_ID = 123
VERBOSE = False
db = 'items.db'
class TestableItemsTransactor(items.Transactor):
@transact
def pre(self):
# Group 123
self.sasync_items.insert().execute(
group_id=123, name='foo', value='OK')
# Set up an experienced MockThing to have pickled
thing = MockThing()
thing.method(1)
self.sasync_items.insert().execute(
group_id=123, name='bar', value=thing)
# Group 124
self.sasync_items.insert().execute(
group_id=124, name='foo', value='bogus')
self.sasync_items.insert().execute(
group_id=124, name='invalid', value='bogus')
@transact
def post(self):
self.sasync_items.delete().execute()
class ItemsMixin:
def tearDown(self):
def _tearDown():
si = self.i.t.sasync_items
si.delete(si.c.group_id == GROUP_ID).execute()
d = self.i.t.deferToQueue(_tearDown, niceness=20)
d.addCallback(lambda _: self.i.shutdown())
return d
class TestItemsTransactor(ItemsMixin, TestCase):
def setUp(self):
url = "sqlite:///%s" % db
self.i = items.Items(GROUP_ID, url)
self.i.t = TestableItemsTransactor(self.i.groupID, url)
return self.i.t.pre()
def tearDown(self):
return self.i.t.post()
def test_load(self):
def gotValue(value, name):
if name == 'foo':
self.failUnlessEqual(value, 'OK')
else:
|
self.failUnless(
isinstance(value, MockThing),
"Item 'bar' is a '%s', not an instance of 'MockThing'" \
% value)
self.failUnless(
value.beenThereDoneThat,
"Class instance wasn't properly persisted with its state")
self.fai
|
lUnlessEqual(
value.method(2.5), 5.0,
"Class instance wasn't properly persisted with its method")
dList = []
for name in ('foo', 'bar'):
dList.append(self.i.t.load(name).addCallback(gotValue, name))
return DeferredList(dList)
def test_load(self):
def gotValue(value, name):
if name == 'foo':
self.failUnlessEqual(value, 'OK')
else:
self.failUnless(
isinstance(value, MockThing),
"Item 'bar' is a '%s', not an instance of 'MockThing'" \
% value)
self.failUnless(
value.beenThereDoneThat,
"Class instance wasn't properly persisted with its state")
self.failUnlessEqual(
value.method(2.5), 5.0,
"Class instance wasn't properly persisted with its method")
dList = []
for name in ('foo', 'bar'):
dList.append(self.i.t.load(name).addCallback(gotValue, name))
return DeferredList(dList)
def test_loadAbsent(self):
def gotValue(value):
self.failUnless(
isinstance(value, items.Missing),
"Should have returned 'Missing' object, not '%s'!" % \
str(value))
def gotExpectedError(failure):
self.fail("Shouldn't have raised error on missing value")
return self.i.t.load('invalid').addCallbacks(
gotValue, gotExpectedError)
def test_loadAll(self):
def loaded(items):
itemKeys = items.keys()
itemKeys.sort()
self.failUnlessEqual(itemKeys, ['bar', 'foo'])
return self.i.t.loadAll().addCallback(loaded)
def insertLots(self, callback):
noviceThing = MockThing()
experiencedThing = MockThing()
experiencedThing.method(0)
self.whatToInsert = {
'alpha':5937341,
'bravo':'abc',
'charlie':-3.1415,
'delta':(1,2,3),
'echo':True,
'foxtrot':False,
'golf':noviceThing,
'hotel':experiencedThing,
'india':MockThing
}
dList = []
for name, value in self.whatToInsert.iteritems():
dList.append(self.i.t.insert(name, value))
return DeferredList(dList).addCallback(
callback, self.whatToInsert.copy())
def test_insert(self):
def done(null, items):
def check():
table = self.i.t.sasync_items
for name, inserted in items.iteritems():
value = table.select(
and_(table.c.group_id == 123,
table.c.name == name)
).execute().fetchone()['value']
msg = "Inserted '{}:{}' ".format(name, inserted) +\
"but read '{}' back from the database!".format(value)
self.failUnlessEqual(value, inserted, msg)
for otherName, otherValue in items.iteritems():
if otherName != name and value == otherValue:
self.fail(
"Inserted item '%s' is equal to item '%s'" % \
(name, otherName))
return self.i.t.deferToQueue(check)
return self.insertLots(done)
def test_deleteOne(self):
def gotOriginal(value):
self.failUnlessEqual(value, 'OK')
return self.i.t.delete('foo').addCallback(getAfterDeleted)
def getAfterDeleted(null):
return self.i.t.load('foo').addCallback(checkIfDeleted)
def checkIfDeleted(value):
self.failUnless(isinstance(value, items.Missing))
return self.i.t.load('foo').addCallback(gotOriginal)
def test_deleteMultiple(self):
def getAfterDeleted(null):
return self.i.t.loadAll().addCallback(checkIfDeleted)
def checkIfDeleted(values):
self.failUnlessEqual(values, {})
return self.i.t.delete('foo', 'bar').addCallback(getAfterDeleted)
def test_namesFew(self):
def got(names):
names.sort()
self.failUnlessEqual(names, ['bar', 'foo'])
return self.i.t.names().addCallback(got)
def test_namesMany(self):
def get(null, items):
return self.i.t.names().addCallback(got, items.keys())
def got(names, shouldHave):
shouldHave += ['foo', 'bar']
names.sort()
shouldHave.sort()
self.failUnlessEqual(names, shouldHave)
return self.insertLots(get)
def test_update(self):
def update(null, items):
return DeferredList([
self.i.t.update('alpha', 1),
self.i.t.update('bravo', 2),
|
SimeonRolev/RolevPlayerQT
|
RolevPlayer/Scrobbler.py
|
Python
|
gpl-3.0
| 1,642 | 0.003654 |
import json
import time
from _md5 import md5
import requests
import RolevPlayer as r
def now_playing_last_fm(artist, track):
update_now_playing_sig = md5(("api_key" + r.API_KEY +
"artist" + artist +
"method" + "track.updateNowPlaying" +
"sk" + r.SK +
"track" + track +
r.SECRET).encode('utf-8')).hexdigest()
url = "htt
|
p://ws.audioscrobbler.com/2.0/?method=track.updateNowPlaying" + \
"&api_key=" + r.API_KEY + \
"&api_sig=" + update_now_playing_sig + \
"&artist=" + artist + \
"&format=json" + \
"&sk=" + r.SK + \
"&track=" + track
req = requests.post(url).text
json_obj = json.loads(req)
def scrobble(artist, track):
# this gives us a timestamp, casted to integer
ts = time.time()
scrobbling_sig = md5(("api_key" + r.API_KEY +
|
"artist" + artist +
"method" + "track.scrobble" +
"sk" + r.SK +
"timestamp" + str(ts) +
"track" + track +
r.SECRET).encode('utf-8')).hexdigest()
req = requests.post(
"http://ws.audioscrobbler.com/2.0/?method=track.scrobble" +
"&api_key=" + r.API_KEY +
"&api_sig=" + scrobbling_sig +
"&artist=" + artist +
"&format=json" +
"&sk=" + r.SK +
"×tamp=" + str(ts) +
"&track=" + track).text
json_obj = json.loads(req)
|
tanji/replication-manager
|
share/opensvc/compliance/com.replication-manager/remove_files.py
|
Python
|
gpl-3.0
| 2,367 | 0.005492 |
#!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_REMOVE_FILES_",
"example_value": """
[
"/tmp/foo",
"/bar/to/delete"
]
""",
"description": """* Verify files and file trees are uninstalled
""",
"form_definition": """
Desc: |
A rule defining a set of files to remove, fed to the 'remove_files' compliance object.
Css: comp48
Outputs:
-
Dest: compliance variable
Class: remove_files
Type: json
Format: list
Inputs:
-
Id: path
Label: File path
DisplayModeLabel: ""
LabelCss: edit16
Mandatory: Yes
Help: You must set paths in fully qualified form.
Type: string
""",
}
import os
import sys
import re
import json
from glob import glob
import shutil
sys.path.append(os.path.dirname(__file__))
from comp import *
blacklist = [
"/",
"/root"
]
class CompRemoveFiles(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
patterns = self.get_rules()
patterns = sorted(list(set(patterns)))
self.files = self.expand_patterns(patterns)
if len(self.files) == 0:
pinfo("no files matching patterns")
raise NotApplicable
def expand_patterns(self, patterns):
l = []
for pattern in patterns:
l += glob(
|
pattern)
return l
def fixable(self):
return RET_NA
def check_file(self, _file):
if not os.path.exists(_file):
pinfo(_file, "does not exist. on target.")
return RET_OK
perror(_file, "exists. shouldn't")
return RET_ERR
def fix_file(self, _file):
if not os.path.exists(_file):
return RET_OK
try:
if os.path.isdir(_f
|
ile) and not os.path.islink(_file):
shutil.rmtree(_file)
else:
os.unlink(_file)
pinfo(_file, "deleted")
except Exception as e:
perror("failed to delete", _file, "(%s)"%str(e))
return RET_ERR
return RET_OK
def check(self):
r = 0
for _file in self.files:
r |= self.check_file(_file)
return r
def fix(self):
r = 0
for _file in self.files:
r |= self.fix_file(_file)
return r
if __name__ == "__main__":
main(CompRemoveFiles)
|
AversivePlusPlus/AversivePlusPlus
|
tools/conan/conans/client/new.py
|
Python
|
bsd-3-clause
| 3,503 | 0.001427 |
conanfile = """from conans import ConanFile, CMake, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False]}}
default_options = "shared=False"
generators = "cmake"
def source(self):
self.run("git clone https://github.com/memsharded/hello.git")
self.run("cd hello && git checkout static_shared")
# This small hack might be useful to guarantee proper /MT /MD linkage in MSVC
# if the packaged project doesn't have variables to set it properly
tools.replace_in_file("hello/CMakeLists.txt", "PROJECT(MyHello)", '''PROJECT(MyHello)
include(${{CMAKE_BINARY_DIR}}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self.settings)
shared = "-DBUILD_SHARED_LIBS=ON" if self.options.shared else ""
self.run('cmake hello %s %s' % (cmake.command_line, shared))
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*hello.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
"""
conanfile_header = """from conans import ConanFile, tools
import os
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
license = "<Put the package license here>"
url = "<Package recipe repository url here, for issues about the package>"
# No settings/options are necessary, this is header only
def source(self):
'''retrieval of the source code here. Remember you can also put the
|
code in the folder and
use exports instead of retrieving it with this source() method
'''
#self.run("git clone ...") or
#tools.download("url", "file.zip")
#tools.unzip("file.zip" )
def package(self):
self.copy("*.h", "includ
|
e")
"""
test_conanfile = """from conans import ConanFile, CMake
import os
channel = os.getenv("CONAN_CHANNEL", "{channel}")
username = os.getenv("CONAN_USERNAME", "{user}")
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "{name}/{version}@%s/%s" % (username, channel)
generators = "cmake"
def build(self):
cmake = CMake(self.settings)
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def imports(self):
self.copy("*.dll", "bin", "bin")
self.copy("*.dylib", "bin", "bin")
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
"""
test_cmake = """PROJECT(PackageTest)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
ADD_EXECUTABLE(example example.cpp)
TARGET_LINK_LIBRARIES(example ${CONAN_LIBS})
"""
test_main = """#include <iostream>
#include "hello.h"
int main() {
hello();
std::cout<<"*** Running example, will fail by default, implement yours! ***\\n";
return -1; // fail by default, remember to implement your test
}
"""
|
nkgilley/home-assistant
|
tests/components/linky/test_config_flow.py
|
Python
|
apache-2.0
| 6,904 | 0.000145 |
"""Tests for the Linky config flow."""
from pylinky.exceptions import (
PyLinkyAccessException,
PyLinkyEnedisException,
PyLinkyException,
PyLinkyWrongLoginException,
)
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.linky.const import DEFAULT_TIMEOUT, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
USERNAME = "username@hotmail.fr"
USERNAME_2 = "username@free.fr"
PASSWORD = "password"
TIMEOUT = 20
@pytest.fixture(name="login")
def mock_controller_login():
"""Mock a successful login."""
with patch(
"homeassistant.components.linky.config_flow.LinkyClient"
) as service_mock:
service_mock.return_value.login = Mock(return_value=True)
service_mock.return_value.close_session = Mock(return_value=None)
yield service_mock
@pytest.fixture(name="fetch_data")
def mock_controller_fetch_data():
"""Mock a successful get data."""
with patch(
|
"homeassistant.components.linky.config_flow.LinkyClient"
) as service_mock:
service_mock.return_value.fetch_data = Mock(return_value={})
servi
|
ce_mock.return_value.close_session = Mock(return_value=None)
yield service_mock
async def test_user(hass: HomeAssistantType, login, fetch_data):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == DEFAULT_TIMEOUT
async def test_import(hass: HomeAssistantType, login, fetch_data):
"""Test import step."""
# import with username and password
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == DEFAULT_TIMEOUT
# import with all
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: USERNAME_2,
CONF_PASSWORD: PASSWORD,
CONF_TIMEOUT: TIMEOUT,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == USERNAME_2
assert result["title"] == USERNAME_2
assert result["data"][CONF_USERNAME] == USERNAME_2
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_TIMEOUT] == TIMEOUT
async def test_abort_if_already_setup(hass: HomeAssistantType, login, fetch_data):
"""Test we abort if Linky is already setup."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
unique_id=USERNAME,
).add_to_hass(hass)
# Should fail, same USERNAME (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same USERNAME (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_login_failed(hass: HomeAssistantType, login):
"""Test when we have errors during login."""
login.return_value.login.side_effect = PyLinkyAccessException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "access"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.login.side_effect = PyLinkyWrongLoginException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "wrong_login"}
hass.config_entries.flow.async_abort(result["flow_id"])
async def test_fetch_failed(hass: HomeAssistantType, login):
"""Test when we have errors during fetch."""
login.return_value.fetch_data.side_effect = PyLinkyAccessException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "access"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.fetch_data.side_effect = PyLinkyEnedisException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "enedis"}
hass.config_entries.flow.async_abort(result["flow_id"])
login.return_value.fetch_data.side_effect = PyLinkyException()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
hass.config_entries.flow.async_abort(result["flow_id"])
|
branden/dcos
|
gen/__init__.py
|
Python
|
apache-2.0
| 29,065 | 0.002408 |
"""Helps build config packages for installer-specific templates.
Takes in a bunch of configuration files, as well as functions to calculate the values/strings which
need to be put into the configuration.
Operates strictly:
- All paramaters are strings. All things calculated / derived are strings.
- Every given parameter must map to some real config option.
- Every config option must be given only once.
- Defaults can be overridden. If no default is given, the parameter must be specified
- empty string is not the same as "not specified"
"""
import importlib.machinery
import json
import logging as log
import os
import os.path
import pprint
import textwrap
from copy import copy, deepcopy
from typing import List
import yaml
import gen.calc
import gen.internals
import gen.template
import gen.util
from gen.exceptions import ValidationError
from pkgpanda import PackageId
from pkgpanda.util import (
hash_checkout,
json_prettyprint,
load_string,
split_by_token,
write_json,
write_string,
write_yaml,
)
# List of all roles all templates should have.
role_names = {"master", "slave", "slave_public"}
role_template = '/etc/mesosphere/roles/{}'
CLOUDCONFIG_KEYS = {'coreos', 'runcmd', 'apt_sources', 'root', 'mounts', 'disk_setup', 'fs_setup', 'bootcmd'}
PACKAGE_KEYS = {'package', 'root'}
# Allow overriding calculators with a `gen_extra/calc.py` if it exists
gen_extra_calc = None
if os.path.exists('gen_extra/calc.py'):
gen_extra_calc = importlib.machinery.SourceFileLoader('gen_extra.calc', 'gen_extra/calc.py').load_module()
def stringify_configuration(configuration: dict):
"""Create a stringified version of the complete installer configuration
to send to gen.generate()"""
gen_config = {}
for key, value in configuration.items():
if isinstance(value, list) or isinstance(value, dict):
log.debug("Caught %s for genconf configuration, transforming to JSON string: %s", type(value), value)
value = json.dumps(value)
elif isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
elif isinstance(value, int):
log.debug("Caught int for genconf configuration, transforming to string: %s", value)
value = str(value)
elif isinstance(value, str):
pass
else:
log.error("Invalid type for value of %s in config. Got %s, only can handle list, dict, "
"int, bool, and str", key, type(value))
raise Exception()
gen_config[key] = value
log.debug('Stringified configuration: \n{}'.format(gen_config))
return gen_config
def add_roles(cloudconfig, roles):
for role in roles:
cloudconfig['write_files'].append({
"path": role_template.format(role),
"content": ""})
return cloudconfig
def add_units(cloudconfig, services, cloud_init_implementation='coreos'):
'''
Takes a services dict in the format of CoreOS cloud-init 'units' and
injects into cloudconfig a transformed version appropriate for the
cloud_init_implementation. See:
https://coreos.com/os/docs/latest/cloud-config.html for the CoreOS 'units'
specification. See: https://cloudinit.readthedocs.io/en/latest/index.html
for the Canonical implementation.
Parameters:
* cloudconfig is a dict
* services is a list of dict's
* cloud_init_implementation is a string: 'coreos' or 'canonical'
'''
if cloud_init_implementation == 'canonical':
cloudconfig.setdefault('write_files', [])
cloudconfig.setdefault('runcmd', [])
for unit in services:
unit_name = unit['name']
if 'content' in unit:
write_files_entry = {'path': '/etc/systemd/system/{}'.format(unit_name),
'content': unit['content'],
'permissions': '0644'}
cloudconfig['write_files'].append(write_files_entry)
if 'enable' in unit and unit['enable']:
runcmd_entry = ['systemctl', 'enable', unit_name]
cloudconfig['runcmd'].append(runcmd_entry)
if 'command' in unit:
opts = []
if 'no_block' in unit and unit['no_block']:
opts.append('--no-block')
if unit['command'] in ['start', 'stop', 'reload', 'restart', 'try-restart', 'reload-or-restart',
'reload-or-try-restart']:
runcmd_entry = ['systemctl'] + opts + [unit['command'], unit_name]
else:
raise Exception("Unsupported unit command: {}".format(unit['command']))
cloudconfig['runcmd'].append(runcmd_entry)
elif cloud_init_implementation == 'coreos':
cloudconfig.setdefault('coreos', {}).setdefault('units', [])
cloudconfig['coreos']['units'] += services
else:
raise Exception("Parameter value '{}' is invalid for cloud_init_implementation".format(
cloud_init_implementation))
return cloudconfig
# For converting util -> a namespace only.
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
def render_cloudconfig(data):
return "#cloud-config\n" + render_yaml(data)
utils = Bunch({
"role_template": role_template,
"add_roles": add_roles,
"role_names": role_names,
"add_services": None,
"add_stable_artifact": None,
"add_channel_artifact": None,
"add_units": add_units,
"render_cloudconfig": render_cloudconfig
})
def render_yaml(data):
return yaml.dump(data, default_style='|', default_flow_style=False)
# Recursively merge to python dictionaries.
# If both base and addition contain the same key, that key's value will be
# merged if it is a dictionary.
# This is unlike the python dict.update() method which just overwrites matching
# keys.
def merge_dictionaries(base, additions):
base_copy = base.copy()
for k, v in additions.items():
try:
if k not in base:
base_copy[k] = v
continue
if isinstance(v, dict) and isinstance(base_copy[k], dict):
base_copy[k] = merge_dictionaries(base_copy.get(k, dict()), v)
continue
# Append arrays
if isinstance(v, list) and isinstance(base_copy[k], list):
base_copy[k].extend(v)
continue
# Merge sets
if isinstance(v, set) and isinstance(base_copy[k], set):
base_copy[k] |= v
continue
# Unknown types
raise ValueError("Can't merge type {} into type {}".format(type(v), type(base_copy[k])))
except ValueError as ex:
raise ValueError("{} inside key {}".format(ex, k)) from ex
return base_copy
def load_templates(template_dict):
result = dict()
for name, template_list in template_dict.items():
result_list = list()
for template_name in template_list:
result_list.append(gen.template.parse_resources(template_name))
extra_filename = "gen_extra/" + template_name
if os.path.exists(extra_filename):
result_list.append(gen.templat
|
e.parse_str(
load_string(extra_filename)))
result[name] = result_list
return result
# Render the Jinja/YAML into YAML, then load the YAML and merge it to make the
# final configuration files.
def render_templates(template_dict, arguments):
rendered_templates = dict()
templates = load_templates(template_dict)
for name, templates in temp
|
lates.items():
full_template = None
for template in templates:
rendered_template = template.render(arguments)
# If not yaml, just treat opaquely.
if not name.endswith('.yaml'):
# No merging support currently.
assert len(templates) == 1
full_template = rendered_template
continue
tem
|
KanoComputing/kano-toolset
|
kano/utils/disk.py
|
Python
|
gpl-2.0
| 1,133 | 0 |
# disk.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Utilities relating to disk manangement
from kano.utils.shell import run_cmd
def get_free_space(path="/"):
"""
Returns the amount of free space in certain location in MB
:param path: The location to measure the free space at.
:type path: str
:return: Number of free megabytes.
:rtype: int
"""
out, dummy_err, dummy_rv = run_cmd("df {}".format(path))
dummy_device,
|
dummy_size, dummy_used, free, dummy_percent, dummy_mp = \
out.split('\n')[1].split()
return int(free) / 1024
def get_partition_info():
device = '/dev/mmcblk0'
try:
cmd = 'lsblk -n -b {} -o SIZE'.format(device)
stdout, dummy_stderr, r
|
eturncode = run_cmd(cmd)
if returncode != 0:
from kano.logging import logger
logger.warning("error running lsblk")
return []
lines = stdout.strip().split('\n')
sizes = map(int, lines)
return sizes
except Exception:
return []
|
swarmer/tester
|
core/migrations/0009_auto_20150821_0243.py
|
Python
|
mit
| 376 | 0 |
# -
|
*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20150819_0050'),
]
operations = [
migrations.AlterUniqueTogether(
name='test',
unique_together=set([('owner', 'name')]),
|
),
]
|
koparasy/faultinjection-gem5
|
src/arch/x86/isa/insts/general_purpose/data_transfer/xchg.py
|
Python
|
bsd-3-clause
| 3,156 | 0 |
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLAR
|
Y, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWIS
|
E) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# All the memory versions need to use LOCK, regardless of if it was set
def macroop XCHG_R_R
{
# Use the xor trick instead of moves to reduce register pressure.
# This probably doesn't make much of a difference, but it's easy.
xor reg, reg, regm
xor regm, regm, reg
xor reg, reg, regm
};
def macroop XCHG_R_M
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_R_P
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
def macroop XCHG_M_R
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_P_R
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
def macroop XCHG_LOCKED_M_R
{
ldstl t1, seg, sib, disp
stul reg, seg, sib, disp
mov reg, reg, t1
};
def macroop XCHG_LOCKED_P_R
{
rdip t7
ldstl t1, seg, riprel, disp
stul reg, seg, riprel, disp
mov reg, reg, t1
};
'''
|
vaibhavi-r/CSE-415
|
Assignment3/puzzle0.py
|
Python
|
mit
| 129 | 0.015504 |
i
|
mport EightPuzzleWithHeuristics as Problem
# puzzle0:
CREATE_INITIAL_STATE = lambd
|
a: Problem.State([0, 1, 2, 3, 4, 5, 6, 7, 8])
|
wwj718/edx-video
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 50,206 | 0.002769 |
import datetime
import feedparser
import json
import logging
import random
import re
import string # pylint: disable=W0402
import urllib
import uuid
import time
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.core.cache import cache
from django.core.context_processors import csrf
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, validate_slug, ValidationError
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, transaction
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotAllowed, Http404
from django.shortcuts import redirect
from django_future.csrf import ensure_csrf_cookie
from django.utils.http import cookie_date
from django.utils.http import base36_to_int
from django.utils.translation import ugettext as _
from ratelimitbackend.exceptions import RateLimitException
from mitxmako.shortcuts import render_to_response, render_to_string
from bs4 import BeautifulSoup
from student.models import (Registration, UserProfile, TestCenterUser, TestCenterUserForm,
TestCenterRegistration, TestCenterRegistrationForm,
PendingNameChange, PendingEmailChange,
CourseEnrollment, unique_id_for_user,
get_testcenter_registration, CourseEnrollmentAllowed)
from student.forms import PasswordResetFormNoActive
from certificates.models import CertificateStatuses, certificate_status_for_student
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement
from courseware.access import has_access
from external_auth.models import ExternalAuthMap
from statsd import statsd
from pytz import UTC
log = logging.getLogger("mitx.student")
AUDIT_LOG = logging.getLogger("audit")
Article = namedtuple('Article', 'title url author image deck publication publish_date')
def csrf_token(context):
''' A csrf token that can be included in a form.
'''
csrf_token = context.get('csrf_token', '')
if csrf_token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (csrf_token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context={}, user=None):
'''
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
'''
# The course selection work is done in courseware.courses.
domain = settings.MITX_FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain == False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(None, domain=domain)
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def course_from_id(course_id):
"""Return the CourseDescriptor corresponding to this course_id"""
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc)
day_pattern = re.compile(r'\s\d+,\s')
multimonth_pattern = re.compile(r'\s?\-\s?\S+\s')
def _get_date_for_press(publish_date):
# strip off extra months, and just use the first:
date = re.sub(multimonth_pattern, ", ", publish_date)
if re.search(day_pattern, date):
date = datetime.datetime.strptime(date, "%B %d, %Y").replace(tzinfo=UTC)
else:
date = datetime.datetime.strptime(date, "%B, %Y").replace(tzinfo=UTC)
return date
def press(request):
json_articles = cache.get("student_press_json_articles")
if json_articles is None:
if hasattr(settings, 'RSS_URL'):
content = urllib.urlopen(settings.PRESS_URL).read()
json_articles = json.loads(content)
else:
content = open(settings.PROJECT_ROOT / "templates" / "press.json").read()
json_articles = json.loads(content)
cache.set("student_press_json_articles", json_articles)
articles = [Article(**article) for article in json_articles]
articles.sort(key=lambda item: _get_date_for_press(item.publish_date), reverse=True)
return render_to_response('static_templates/press.html', {'articles': articles})
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course):
"""
Get the certificate info needed to render the dashboard section for the given
student and course. Returns a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
"""
if not course.has_ended():
return {}
return _cert_info(user, course, certificate_status_for_student(user, course.id))
def _cert_info(user, course, cert_status):
"""
Implements the logic for cert_info -- split out for testing.
"""
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False}
if cert_status is None:
return default_info
# simplify the status for the template using this lookup table
template_state = {
Cer
|
tificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
|
}
status = template_state.get(cert_status['status'], default_status)
d = {'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating', }
if (status in ('generating', 'ready', 'notpassing', 'restricted') and
course.end_of_course_survey_url is not None):
d.update({
'show_survey_button': True,
'survey_url': process_survey_link(course.end_of_course_survey_url, user)})
else:
d['show_survey_button'] = False
if status == 'ready':
if 'download_url' not in cert_status:
log.warning("User %s has a downloadable cert for %s, but no download url",
user.username, course.id)
return default_info
else:
d['download_url'] = cert_status['download_url']
if status in ('generating', 'ready', 'notpassing', 'restricted'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
d['grade'] = cert_status['gra
|
noirhat/bin
|
games/doom/doom-1.py
|
Python
|
gpl-2.0
| 755 | 0.003974 |
#!/usr/bin/env python3
from os import environ, system
from subprocess import Popen
print('\nUltimate Doom (Classic)')
print('Link: https://store.steampowered.com/app/2280/Ultimate_Doom/\n')
ho
|
me = environ['HOME']
core = home + '/bin/games/steam-connect/steam-connect-core.py'
logo = home + '/bin/games/steam-connect/doom-logo.txt'
ga
|
me = 'doom-1'
stid = '2280'
proc = 'gzdoom'
flag = ' +set dmflags 4521984'
conf = ' -config ' + home + '/.config/gzdoom/gzdoom-classic.ini'
save = ' -savedir ' + home + '/.config/gzdoom/saves/' + game
iwad = ' -iwad DOOM.WAD'
mods = ' -file music-doom.zip sprite-fix-6-d1.zip doom-sfx-high.zip speed-weapons.zip'
args = proc + flag + conf + save + iwad + mods
system('cat ' + logo)
Popen([core, stid, args]).wait()
|
fusionbox/django-importcsvadmin
|
importcsvadmin/forms.py
|
Python
|
bsd-2-clause
| 3,694 | 0.000812 |
import csv
from django.db import transaction
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class CSVImportError(Exception):
pass
class ImportCSVForm(forms.Form):
csv_file = forms.FileField(required=True, label=_('CSV File'))
has_headers = forms.BooleanField(
label=_('Has headers'),
help_text=_('Check this if your CSV file '
'has a row with column headers.'),
initial=True,
required=False,
)
def __init__(self, *args, **kwargs):
self.importer_class = kwargs.pop('importer_class')
self.dialect = kwargs.pop('dialect')
super(ImportCSVForm, self).__init__(*args, **kwargs)
self.fields['csv_file'].help_text = "Expected fields: {}".format(self.expected_fields)
def clean_csv_file(self):
if six.PY3:
# DictReader expects a str, not bytes in Python 3.
csv_text = self.cleaned_data['csv_file'].read()
csv_decoded = six.StringIO(csv_text.decode('utf-8'))
return csv_decoded
else:
return self.cleaned_data['csv_file']
@property
def expected_fields(self):
fields = self.importer_class._meta.fields
return ', '.join(fields)
@transaction.atomic
def import_csv(self):
try:
reader = csv.DictReader(
self.cleaned_data['csv_file'],
fieldnames=self.importer_class._meta.fields,
dialect=self.dialect,
)
reader_iter = enumerate(reader, 1)
if self.cleaned_data['has_headers']:
six.advance_iterator(reader_iter)
self.process_csv(reader_iter)
if not self.is_valid():
raise CSVImportError() # Abort the transaction
except csv.Error:
self.append_import_error(_("Bad CSV format"))
raise CSVImportError()
def process_csv(self, reader):
for i, row in reader:
self.process_row(
|
i, row)
def append_import_error(self, error, rownumber=None, column_name=None):
if rownumber is not None:
if column_name is not None:
# Translators: "{row}", "{column}" and "{error}"
# should not be translated
|
fmt = _("Could not import row #{row}: {column} - {error}")
else:
# Translators: "{row}" and "{error}" should not be translated
fmt = _("Could not import row #{row}: {error}")
else:
if column_name is not None:
raise ValueError("Cannot raise a CSV import error on a specific "
"column with no row number.")
else:
# Translators: "{error}" should not be translated
fmt = _("Could not import the CSV document: {error}")
if NON_FIELD_ERRORS not in self._errors:
self._errors[NON_FIELD_ERRORS] = self.error_class()
self._errors[NON_FIELD_ERRORS].append(
fmt.format(error=error, row=rownumber, column=column_name))
def process_row(self, i, row):
importer = self.importer_class(data=row)
if importer.is_valid():
importer.save()
else:
for error in importer.non_field_errors():
self.append_import_error(rownumber=i, error=error)
for field in importer:
for error in field.errors:
self.append_import_error(rownumber=i, column_name=field.label,
error=error)
|
XeCycle/indico
|
indico/MaKaC/services/interface/rpc/offline.py
|
Python
|
gpl-3.0
| 3,627 | 0.001103 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from indico.util.json import dumps
import MaKaC
def jsonDescriptor(object):
# TODO: Merge with locators?
if isinstance(object, MaKaC.conference.Conference):
return {'conference': object.getId()}
elif isinstance(object, MaKaC.conference.Contrib
|
ution):
return {'conference': object.getConference().getId(),
'co
|
ntribution': object.getId()}
elif isinstance(object, MaKaC.conference.Session):
return {'conference': object.getConference().getId(),
'session': object.getId()}
elif isinstance(object, MaKaC.conference.SessionSlot):
return {'conference': object.getConference().getId(),
'session': object.getSession().getId(),
'slot': object.getId()}
elif isinstance(object, MaKaC.schedule.BreakTimeSchEntry):
info = {'conference': object.getOwner().getConference().getId(),
'break': object.getId()}
if isinstance(object.getOwner(), MaKaC.conference.SessionSlot):
info['slot'] = object.getOwner().getId()
info['session'] = object.getOwner().getSession().getId()
return info
return None
def jsonDescriptorType(descriptor):
if 'break' in descriptor:
return MaKaC.schedule.BreakTimeSchEntry
elif 'slot' in descriptor:
return MaKaC.conference.SessionSlot
elif 'contribution' in descriptor:
return MaKaC.conference.Contribution
elif 'session' in descriptor:
return MaKaC.conference.Session
elif 'conference' in descriptor:
return MaKaC.conference.Conference
else:
return None
def decideInheritanceText(event):
if isinstance(event, MaKaC.conference.SessionSlot):
text = _("Inherit from parent slot")
elif isinstance(event, MaKaC.conference.Session):
text = _("Inherit from parent session")
elif isinstance(event, MaKaC.conference.Conference):
text = _("Inherit from parent event")
else:
text = str(repr(parent))
return text
def roomInfo(event, level='real'):
# gets inherited/real/own location/room properties
if level == 'inherited':
room = event.getInheritedRoom()
location = event.getInheritedLocation()
text = decideInheritanceText(event.getLocationParent())
elif level == 'real':
room = event.getRoom()
location = event.getLocation()
text = decideInheritanceText(event)
elif level == 'own':
room = event.getOwnRoom()
location = event.getOwnLocation()
text = ''
locationName, roomName, address = None, None, None
if location:
locationName = location.getName()
address = location.getAddress()
if room:
roomName = room.getName()
return {'location': locationName,
'room': roomName,
'address': address,
'text': text}
|
mholtrop/Phys605
|
Python/Getting_Started/CSV_Plot.py
|
Python
|
gpl-3.0
| 1,762 | 0.009081 |
#
# Here is a more complicated example that loads a .csv file and
# then creates a plot from the x,y data in it.
# The data file is the saved curve from partsim.com of the low pass filter.
# It was saved as xls file and then opened in Excel and exported to csv
#
# First import the csv parser, the numeric tools and plotting tools
import csv
import numpy as np # This gives numpy the shorthand np
import matplotlib.pyplot as plt
#
# Open the file
#
f = open("low_pass
|
_filter.csv")
#
# Pass the file to the csv parser
#
data = csv.reader(f)
headers = data.next()
units = data.next()
#
# Here is a "wicked" way in Python that does quicker what the
# the more verbose code does below. It is "Matlab" like.
# dat = np.array([ [float(z) for z in x] for x in data ]) # put the data in dat as floats.
# x_ar = dat[:,0] # select the first column
# y1_ar =
|
dat[:,1] # select the second column
# y2_ar = dat[:,2] # select the third column
x_ar = [] # Create a new list (array) called dat to hold the data.
y1_ar = []
y2_ar = []
for (x,y1,y2) in data: # Unpack the csv data into x,y1,y2 variables.
x_ar.append( float(x))
y1_ar.append(float(y1))
y2_ar.append(float(y2)) # Convert the variable from string to float and add to dat
#
# Now plot the data. plt.plot returns a tuple (plot, )
#
(p1,) = plt.plot(x_ar,y1_ar,color='green',label=headers[1])
(p2,) = plt.plot(x_ar,y2_ar,color='blue',label=headers[2])
plt.legend(handles=[p1,p2]) # make sure the legend is drawn
plt.xscale('log') # plot with a log x axis
plt.yscale('log')
plt.grid(True) # and a grid.
plt.title('Low pass filter')
plt.xlabel('F[Hz]',position=(0.9,1))
plt.ylabel('Amplitude [Volt]')
plt.show() # show the plot.
|
pymir3/pymir3
|
scripts/ismir2016/birdclef_tza_bands.py
|
Python
|
mit
| 13,712 | 0.004959 |
import numpy as np
import logging
import glob
import bandwise_features as BF
import time
import mir3.modules.features.stats as feat_stats
import mir3.modules.tool.to_texture_window as texture_window
import remove_random_noise as rrn
from multiprocessing import Pool
logger = logging.getLogger("birdclef_tza_bands")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
class BandJob:
"""
:type filename: string
:type band_iterator: string
:type band_step: int
:type lnf_use: bool
:type lnf_compensation: string
:type lnf_passes: int
"""
def __init__(self, filename, band_iterator='mel', band_step=500, band_nbands=None, also_one_band=False, lnf_use=False, lnf_compensation='log10', lnf_passes=1):
self.filename = filename
self.band_iterator = band_iterator
self.band_step = band_step
self.band_nbands = band_nbands
self.also_one_band=also_one_band
self.lnf_use = lnf_use
self.lnf_compensation = lnf_compensation
self.lnf_passes = lnf_passes
class BandExperiment:
def __init__(self, mirex_list_file, mirex_scratch_folder,
output_file,
band_iterator='mel',
band_step=500,
band_nbands=None,
also_one_band=False,
lnf_use=False,
lnf_compensation='log10',
lnf_passes=1,
mean=True, variance=True, slope=False, limits=False, csv=False, normalize=True):
self.mirex_list_file=mirex_list_file
self.mirex_scratch_folder=mirex_scratch_folder
self.output_file=output_file
self.band_iterator=band_iterator
self.band_step=band_step
self.band_nbands=band_nbands
self.also_one_band=also_one_band
self.lnf_use=lnf_use
self.lnf_compensation=lnf_compensation
self.lnf_passes=lnf_passes
self.mean=mean
self.variance=variance
self.slope=slope
self.limits=limits
self.csv=csv
self.normalize=normalize
# def tza_sep_bands_parallel(experiment, n_processes = 1):
# """
# :type experiment: BandExperiment
# :type n_processes: int
# """
#
# files = sorted(glob.glob(experiment.wav_path + "*.wav"))
# jobs = []
# for f in files:
# jobs.append(BandJob(f, experiment.band_iterator, experiment.band_step, experiment.band_nbands,
# lnf_use=experiment.lnf_use,
# lnf_compensation=experiment.lnf_compensation,
# lnf_passes=experiment.lnf_passes))
#
# pool = Pool(processes=n_processes)
#
# features = pool.map(tza_sep_bands, jobs)
#
# pool.close()
# pool.join()
#
# n_bands = (len(features[0]) - 2) / 6
#
# print "number of bands: ", n_bands, len(features[0])
#
# bands = dict()
#
# for band in features:
# for i in range(0, len(band)-2, 6):
# track_feats = []
# for k in range(6):
# track_feats.append(band[i+k])
# key = band[i].metadata.feature.split("_")[1]
# if not bands.has_key(key):
# bands[key] = []
# bands[key].append(track_feats)
#
# for band in bands:
# print band
# for track in bands[band]:
# print track[0].metadata.filename
# for feature in track:
# print feature.metadata.feature
#
# #TODO: tenho que fazer o feats.join.... pra fazer o join precisa de um objeto Bandwise features
# #for band in bands:
#
#
# #
# # stats = feat_stats.Stats()
# # m = stats.stats(features,
# # mean=experiment.mean,
# # variance=experiment.variance,
# # slope=experiment.slope,
# # limits=experiment.limits,
# # csv=experiment.csv,
# # normalize=experiment.normalize)
# #
# # f = open(experiment.output_file, "wb")
# #
# # m.save(f)
# #
# # f.close()
#
# def tza_sep_bands(job):
# """
# :type job: BandJob
# """
#
# if job.lnf_use:
# feats = BF.BandwiseFeatures(job.filename, db_spec=False)
# rrn.remove_random_noise(feats.spectrogram, filter_compensation=job.lnf_compensation, passes=job.lnf_passes)
# feats.spec_to_db()
# else:
# feats = BF.BandwiseFeatures(job.filename)
#
# if job.band_iterator == 'one':
# a = BF.OneBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq))
#
# if job.band_iterator == 'linear':
# a = BF.LinearBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq),
# step=job.band_step,
# nbands=job.band_nbands)
# if job.band_iterator == 'mel':
# a = BF.MelBand(low=int(feats.spectrogram.metadata.min_freq),
# high=int(feats.spectrogram.metadata.max_freq),
# step=job.band_step,
# nbands=job.band_nbands)
#
# logger.debug("Extracting features for %s", job.filename)
# T0 = time.time()
# feats.calculate_features_per_band(a)
# T1 = time.time()
# logger.debug("Feature extraction took %f seconds", T1 - T0)
#
# return feats.band_features
def tza_bands_parallel(experiment, n_processes = 1):
"""
:type experiment: BandExperiment
:type n_processes: int
"""
jobs = []
with open(experiment.mirex_list_file) as f:
files = f.read().splitlines()
for f in files:
jobs.append(BandJob(f, experiment.band_iterator, experiment.band_step, experiment.band_nbands,
also_one_band=experiment.also_one_band,
lnf_use=experiment.lnf_use,
lnf_compensation=experiment.lnf_compensation,
lnf_passes=experiment.lnf_passes))
#calculate features
pool = Pool(processes=n_processes)
features = pool.map(tza_bands, jobs)
pool.close()
pool.join()
jobs = []
for f in features:
jobs.append((f, 100))
#calculate texture windows
pool = Pool(processes=n_processes)
textures = pool.map(tza_calc_textures, jobs)
pool.close()
pool.join()
stats = feat_stats.Stats()
m = stats.stats(textures,
mean=experiment.mean,
variance=experiment.variance,
slope=experiment.slope,
limits=experiment.limits,
csv=experiment.csv,
normalize=experiment.normalize)
|
f = open(experiment.mirex_scratch_folder + "/" + experiment.output_file, "wb")
m.save(f, restore_state=True)
f.close()
return m
def tza_calc_textures(args):
|
tw = texture_window.ToTextureWindow()
feature = args[0]
logger.debug("calculating textures for %s", feature.metadata.filename)
T0 = time.time()
results = tw.to_texture(feature, args[1])
T1 = time.time()
logger.debug("texture calculation took %f seconds", T1-T0)
return results
def tza_bands(job):
"""
:type job: BandJob
"""
if job.lnf_use:
feats = BF.BandwiseFeatures(job.filename, db_spec=False)
rrn.remove_random_noise(feats.spectrogram, filter_compensation=job.lnf_compensation, passes=job.lnf_passes)
feats.spec_to_db()
else:
feats = BF.BandwiseFeatures(job.filename)
if job.band_iterator == 'one':
a = BF.OneBand(low=int(feats.spectrogram.metadata.min_freq),
high=int(feats.spectrogram.metadata.max_freq))
if job.band_iterator == 'linear':
a = BF.LinearBand(low=int(feats.spectrogram.metadata.min_freq),
high=int(feats.spectrogram.metadata.max_freq),
step=job.band_step,
nbands=job.band_nbands)
if job.band_iterator == 'mel':
a = B
|
Kami/libcloud
|
libcloud/test/storage/test_oss.py
|
Python
|
apache-2.0
| 31,715 | 0.000126 |
# -*- coding=utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import os
import sys
import unittest
try:
import mock
except ImportError:
from unittest import mock
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.common.types import InvalidCredsError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.oss import OSSConnection
from libcloud.storage.drivers.oss import OSSStorageDriver
from libcloud.storage.drivers.oss import CHUNK_SIZE
from libcloud.storage.drivers.dummy import DummyIterator
from libcloud.test import MockHttp, generate_random_data, make_response # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_OSS_PARAMS
class OSSConnectionTestCase(unittest.TestCase):
def setUp(self):
self.conn = OSSConnection('44CF9590006BF252F707',
'OtxrzxIsfpFjA7SwPzILwy8Bw21TLhquhboDYROV')
def test_signature(self):
expected = b('26NBxoKdsyly4EDv6inkoDft/yA=')
headers = {
'Content-MD5': 'ODBGOERFMDMzQTczRUY3NUE3NzA5QzdFNUYzMDQxNEM=',
'Content-Type': 'text/html',
'Expires': 'Thu, 17 Nov 2005 18:49:58 GMT',
'X-OSS-Meta-Author': 'foo@bar.com',
'X-OSS-Magic': 'abracadabra',
'Host': 'oss-example.oss-cn-hangzhou.aliyuncs.com'
}
action = '/oss-example/nelson'
actual = OSSConnection._get_auth_signature('PUT', headers, {},
headers['Expires'],
self.conn.key,
action,
'x-oss-')
self.assertEqual(expected, actual)
class ObjectTestCase(unittest.TestCase):
def test_object_with_chinese_name(self):
driver = OSSStorageDriver(*STORAGE_OSS_PARAMS)
obj = Object(name='中文', size=0, hash=None, extra=None,
meta_data=None, container=None, driver=driver)
self.assertTrue(obj.__repr__() is not None)
class OSSMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('oss')
base_headers = {}
def _unauthorized(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers_empty(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_empty(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_chinese(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_chinese.xml')
ret
|
urn (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.O
|
K])
def _list_container_objects_prefix(self, method, url, body, headers):
params = {'prefix': self.test.prefix}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('list_container_objects_prefix.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _get_container(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _get_object(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _notexisted_get_object(self, method, url, body, headers):
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_get_object(self, method, url, body, headers):
self.base_headers.update(
{'accept-ranges': 'bytes',
'connection': 'keep-alive',
'content-length': '0',
'content-type': 'application/octet-stream',
'date': 'Sat, 16 Jan 2016 15:38:14 GMT',
'etag': '"D41D8CD98F00B204E9800998ECF8427E"',
'last-modified': 'Fri, 15 Jan 2016 14:43:15 GMT',
'server': 'AliyunOSS',
'x-oss-object-type': 'Normal',
'x-oss-request-id': '569A63E6257784731E3D877F',
'x-oss-meta-rabbits': 'monkeys'})
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _invalid_name(self, method, url, body, headers):
# test_create_container_bad_request
return (httplib.BAD_REQUEST,
body,
headers,
httplib.responses[httplib.OK])
def _already_exists(self, method, url, body, headers):
# test_create_container_already_existed
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _create_container(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
self.assertEqual('', body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _create_container_location(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
location_constraint = ('<CreateBucketConfiguration>'
'<LocationConstraint>%s</LocationConstraint>'
'</CreateBucketConfiguration>' %
self.test.ex_location)
self.assertEqual(location_constraint, body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_doesnt_exist(self, method, url, body, headers):
# test_delete_container_doesnt_exist
return (httplib.NOT_FOUND,
body
|
pybursa/homeworks
|
s_shybkoy/hw5/hw5_task1.py
|
Python
|
gpl-2.0
| 2,975 | 0.00041 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
Задание 1: классный Человек.
УСЛОВИЕ:
Реализовать класс Person, который отображает запись в книге контактов.
Класс имеет 4 атрибута:
- surname - строка - фамилия контакта (обязательный)
- first_name - строка - имя контакта (обязательный)
- nickname - строка - псевдоним (опциональный)
- birth_date - объект datetime.date (обязательный)
Каждый вызов класса должен создавать экземпляр (инстанс) класса с указанными
атрибутами.
Также класс имеет 2 метода:
- get_age() - считает возраст контакта в полных годах на дату вызова и
возвращает строку вида: "27";
- get_fullname() - возвращает строку, отражающую полное имя (фамилия + имя)
контакта;
"""
__author__ = "Sergei Shybkoi"
__copyright__ = "Copyright 2014, The Homework Project"
__email__ = "heap_@mail.ru"
__status__ = "Production"
__date__ = "2014-11-18"
import datetime
class Person(object):
u"""Класс Person"""
def __init__(self, surname, first_name, birth_date, nickname=None):
u"""Инишн класса"""
try:
var_date = datetime.datetime.strptime(birth_date, "%Y-%m-%d")
res_date = datetime.date(var_date.year,
var_date.month, var_date.day)
except TypeError:
print "Incorrect type of birthday date!"
res_date = None
except ValueError:
print "Wrong value of birthday date!"
res_date = None
self.surname = surna
|
me
self.first_name = first_name
self.birth_date = res_date
if nickname is not None:
self.nickname = nickname
def get_age(self):
u"""Метод класса подсчитывает и выводит количество полных лет"""
if self.birth_date is not None:
today
|
_date = datetime.date.today()
delta = today_date.year - self.birth_date.year
if today_date.month <= self.birth_date.month \
and today_date.day < self.birth_date.day:
delta -= 1
print "Age:", delta
return str(delta)
else:
print "No correct data about person's birthday."
return "0"
def get_fullname(self):
u"""Метод выводит и возвращаем полное имя экземпляра класса Person"""
print self.surname, self.first_name
return self.surname + " " + self.first_name
|
ShaolongHu/lpts
|
tests/glxgears.py
|
Python
|
gpl-2.0
| 3,938 | 0.01248 |
# -*- coding:utf-8 -*-
'''
x11perf测试工具执行脚本
'''
import os, shutil, re
from test import BaseTest
from lpt.lib.error import *
from lpt.lib import lptxml
from lpt.lib import lptlog
from lpt.lib.share import utils
from lpt.lib import lptreport
import glob
glxgears_keys = ["gears"]
class TestControl(BaseTest):
'''
继承BaseTest属性和方法
'''
def __init__(self, jobs_xml, job_node, tool, tarball='UnixBench5.1.3-1.tar.bz2'):
super(TestControl, self).__init__(jobs_xml, job_node, tool, tarball)
def check_deps(self):
'''编译ubgears需要提供libx11-devel包和libGL-devel包
'''
utils.has_gcc()
utils.has_file("libX11-devel", "/usr/include/X11/Xlib.h")
utils.has_file("libGL-devel", "/usr/include/GL/gl.h")
utils.has_file("libXext-devel","/usr/include/X11/extensions/Xext.h")
def setup(self):
'''编译源码,设置程序
'''
if not self.check_bin(self.processBin):
self.tar_src_dir = self.extract_bar()
os.chdir(self.tar_src_dir)
utils.make(extra='clean', make='make')
#修改Makefile文件
lptlog.info("修改Makefile, 取消#GRAPHIC_TESTS = defined注释")
cmd = '''sed -i "s/^#GRAPHIC_TESTS/GRAPHIC_TESTS/g" Makefile '''
utils.system(cmd)
self.compile(make_status=True)
os.chdir(self.lpt_root)
def run(self):
tool_node = self.check_tool_result_node()
lptlog.info("----------开始获取测试参数")
self.times = self.get_config_value(tool_node, "times", 10, valueType=int)
lptlog.info("测试次数: %d" % self.times)
self.parallels = [1]
cmd = "./Run"
args_list = ["ubgears", "-i", "%d" % self.times]
self.mainParameters["parameters"] = " ".join([cmd]+args_list)
#运行unixbench程序,进入unixbench 根目录
os.chdir(self.tar_src_dir)
utils.system("rm -rf results/*")
lptlog.info("---------运行测试脚本")
utils.run_shell2(cmd, args_list=args_list, file=os.devnull)
os.chdir(self.lpt_root)
def create_result(self):
#数据处理
#
os.chdir(self.tar_src_dir)
temp_result_list = glob.glob("./results/*[0-9]")
if not temp_result_list:
raise NameError, "% result data not found.." % self.tool
else:
temp_result_file = temp_result_list[0]
self.__match_index(temp_result_file)
#返回根目录
os.chdir(self.lpt_root)
def __match_index(self, file):
'''获取unixbench屏幕输出
'''
self.parallels = [1]
self.times = 3
result_d
|
ic = {}.fromkeys(glxgears_keys, 0)
result_lines = utils.read_all_lines(file)
for parallel in self.parallels:
re_match = "[\d]+ CPUs in
|
system; running %d parallel copy of tests" % parallel
parallel_result_dic = result_dic.copy()
for line in result_lines:
if re.search(re_match, line, re.I):
parallel_index = result_lines.index(line)
paralell_result_list = [ self.__get_value(result_lines, parallel_index+index) for index in (5,) ]
for l,v in zip(tuple(glxgears_keys), tuple([utils.change_type(i) for i in paralell_result_list])):
parallel_result_dic[l] = "%.1f" % v
parallel_result_attrib = self.create_result_node_attrib("Average", self.times, parallel, self.parallels)
self.result_list.append([parallel_result_attrib, parallel_result_dic])
def __get_value(self, lines, index):
return lines[index].split()[-2]
|
auduny/home-assistant
|
tests/components/stream/test_recorder.py
|
Python
|
apache-2.0
| 2,235 | 0 |
"""The tests for hls streams."""
from datetime import timedelta
from io import BytesIO
from unittest.mock import patch
from homeassistant.setup import async_setup_component
from homeassistant.components.stream.core import Segment
from homeassistant.components.stream.recorder import recorder_save_worker
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import (
generate_h264_video, preload_stream)
async def test_record_stream(hass, hass_client):
"""
Test record stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, 'stream', {
'stream': {}
})
with patch(
'homeassistant.components.stream.recorder.recorder_save_worker'):
# Setup demo track
source = generate_h264_video()
stream = preload_stream(hass, source)
recorder = stream.add_provider('recorder')
stream.start()
segments = 0
while True:
segment = await recorder.recv()
if not segment:
break
segments += 1
stream.stop()
assert segments > 1
async def test_recorder_timeout(hass, hass_client):
"""Test recorder timeout."""
await async_setup_component(hass, 'stream', {
'stream': {}
})
with patch(
'homeassistant.components.stream.recorder.RecorderOutput.cleanup'
) as mock_cleanup:
#
|
Setup demo track
source = generate_h264_video()
stream = preload_stream(hass, source)
recorder = stream.add_provider('recorder')
stream.start()
await recorder.recv()
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert mock_cleanup.called
async def test_recorder_save():
"""Test recorder save."""
# Setup
source =
|
generate_h264_video()
output = BytesIO()
output.name = 'test.mp4'
# Run
recorder_save_worker(output, [Segment(1, source, 4)])
# Assert
assert output.getvalue()
|
kyclark/metagenomics-book
|
python/hello/hello_arg3.py
|
Python
|
gpl-3.0
| 251 | 0 |
#!/usr/bin/env python3
"""hello with args"""
import sys
import os
args = sys.argv
if len(args) != 2:
|
script = os.path.basename(args[0])
print('Usage: {} NAME'.format(script))
|
sys.exit(1)
name = args[1]
print('Hello, {}!'.format(name))
|
egancatriona1/python-jumpstart
|
apps/06_lolcat_factory/you_try/program.py
|
Python
|
mit
| 1,553 | 0.000644 |
import os
import platform
import subprocess
import cat_service
from apps.general import headers
def main():
headers.print_header('LOLCAT FACTORY')
# look for a directory if not there create it
dir_path = get_or_create_output_folder()
n_cats = get_number_cats()
# contact the lol cat api, get binary
download_cats(dir_path, n_cats)
# launch explorer
display_cats(dir_path)
def get_or_create_output_folder():
dir_path = os.path.join('C:\\Users', 'Catriona', 'Desktop', 'Lolcats')
if not os.path.exists(dir_path) or not os.path.isdir(dir_path):
os.mkdir(dir_path)
return dir_path
def get_number_cats():
n_cats = 0
while True:
number_files = input('On a scale of 1 to 10 how much cheer
|
ing up do you need?')
if number_files.isnumeric():
n_cats = int(number_files)
return n_cats
print('That was not a valid number please try again')
def download_cats(dir_path, n_cats):
for i in range(n_cats):
cat_service.get_cat(dir_path, 'lol_cat{}.jpg'.format(i))
def display_cats(folder):
print('Opening folder: {}'.format(folder))
if platform.system() == 'D
|
arwin':
subprocess.call(['open', folder])
elif platform.system() == 'Windows':
print('with windows')
subprocess.call(['explorer', folder])
elif platform.system() == 'Linux':
subprocess.call(['xdg-open', folder])
else:
print('Do not support your os "{}"'.format(platform.system()))
if __name__ == '__main__':
main()
|
arunchaganty/presidential-debates
|
django/twit/migrations/0007_retweet.py
|
Python
|
mit
| 697 | 0.002869 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, mod
|
els
class Migration(migration
|
s.Migration):
dependencies = [
('twit', '0006_auto_20160419_0248'),
]
operations = [
migrations.CreateModel(
name='Retweet',
fields=[
('id', models.BigIntegerField(serialize=False, help_text='Unique id that comes from Twitter', primary_key=True)),
('created_at', models.DateTimeField(help_text='Time tweet was created')),
('tweet', models.ForeignKey(to='twit.Tweet')),
('user', models.ForeignKey(to='twit.User')),
],
),
]
|
Hironsan/uecda-pyclient
|
src/connection.py
|
Python
|
mit
| 1,996 | 0 |
# -*- coding: utf-8 -*-
import socket
import struct
|
import signal
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
class Connection(object):
"""
サーバとの通信用クラス
"""
BINARY_INT = '!1I'
BINARY_TABLE = '!120I'
def __init__(self, addr='127.0.0.1', port=42485):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.sock.connect((addr, port))
def __enter__(self):
return self
def __exit__(self, *exc):
self.sock.close()
def recv_int(self):
|
unpacked_value = self._recv_msg(byte_length=4)
s = struct.Struct(self.BINARY_INT)
integer = s.unpack(unpacked_value)
return integer[0]
def recv_table(self):
unpacked_value = self._recv_msg(byte_length=480)
s = struct.Struct(self.BINARY_TABLE)
ls = s.unpack(unpacked_value)
table = [ls[15 * i: 15 * (i + 1)][:] for i in range(8)] # 8x15のリストに変換
return table
def _recv_msg(self, byte_length):
unpacked_data = b''
while len(unpacked_data) < byte_length:
chunk = self.sock.recv(byte_length - len(unpacked_data), 0)
if chunk == b'':
raise RuntimeError('socket connection broken')
unpacked_data += chunk
return unpacked_data
# この中で、配列を構築すべきではない。構築する部分は分離して配列をsend_tableに渡すべき
def send_name(self, name, protocol=20070):
table = [[0] * 15 for i in range(8)]
table[0][0] = protocol
for i, ch in enumerate(name):
table[1][i] = ord(ch)
self.send_table(table)
def send_table(self, table):
ls = [item for inner in table for item in inner] # 2次元リストを1次元に変換
s = struct.Struct(self.BINARY_TABLE)
packed_value = s.pack(*ls)
self._send_msg(packed_value)
def _send_msg(self, msg):
self.sock.sendall(msg)
|
CenterForOpenScience/SHARE
|
share/transform/chain/__init__.py
|
Python
|
apache-2.0
| 484 | 0 |
from share.transf
|
orm.chain.exceptions import * # noqa
from share.transform.chain.links import * # noqa
from share.transform.chain.parsers import * # noqa
from share.transform.chain.transformer import ChainTransformer # noqa
from share.transform.chain.links import Context
# Context singleton to be used for parser definitions
# Class SHOULD be thread safe
# Accessing subattribtues will result in a new copy of the context
# to avoid leaking data between chains
ctx = Context()
| |
beni55/sentry
|
src/sentry/migrations/0015_auto__add_field_message_project__add_field_messagecountbyminute_projec.py
|
Python
|
bsd-3-clause
| 15,392 | 0.007926 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['checksum', 'logger', 'view']
db.delete_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
# Removing unique constraint on 'MessageFilterValue', fields ['group', 'value', 'key']
db.delete_unique('sentry_messagefiltervalue', ['group_id', 'value', 'key'])
# Removing unique constraint on 'FilterValue', fields ['key', 'value']
db.delete_unique('sentry_filtervalue', ['key', 'value'])
# Removing unique constraint on 'MessageCountByMinute', fields ['date', 'group']
db.delete_unique('sentry_messagecountbyminute', ['date', 'group_id'])
# Adding field 'Message.project'
db.add_column('sentry_message', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding field 'MessageCountByMinute.project'
db.add_column('sentry_messagecountbyminute', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'MessageCountByMinute', fields ['project', 'date', 'group']
db.create_unique('sentry_messagecountbyminute', ['project_id', 'date', 'group_id'])
# Adding field 'FilterValue.project'
db.add_column('sentry_filtervalue', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'FilterValue', fields ['project', 'value', 'key']
db.create_unique(
|
'sentry_filtervalue', ['project_id', 'value', 'key'])
# Adding field 'MessageFilterValue.project'
db.add_column('sentry_messagefiltervalue', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'MessageFilterValue', fields ['project', 'group', 'value', 'key']
db.create_unique('sentry_messagefiltervalue',
|
['project_id', 'group_id', 'value', 'key'])
# Adding field 'GroupedMessage.project'
db.add_column('sentry_groupedmessage', 'project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Project'], null=True), keep_default=False)
# Adding unique constraint on 'GroupedMessage', fields ['project', 'checksum', 'logger', 'view']
db.create_unique('sentry_groupedmessage', ['project_id', 'checksum', 'logger', 'view'])
def backwards(self, orm):
# Removing unique constraint on 'GroupedMessage', fields ['project', 'checksum', 'logger', 'view']
db.delete_unique('sentry_groupedmessage', ['project_id', 'checksum', 'logger', 'view'])
# Removing unique constraint on 'MessageFilterValue', fields ['project', 'group', 'value', 'key']
db.delete_unique('sentry_messagefiltervalue', ['project_id', 'group_id', 'value', 'key'])
# Removing unique constraint on 'FilterValue', fields ['project', 'value', 'key']
db.delete_unique('sentry_filtervalue', ['project_id', 'value', 'key'])
# Removing unique constraint on 'MessageCountByMinute', fields ['project', 'date', 'group']
db.delete_unique('sentry_messagecountbyminute', ['project_id', 'date', 'group_id'])
# Deleting field 'Message.project'
db.delete_column('sentry_message', 'project_id')
# Deleting field 'MessageCountByMinute.project'
db.delete_column('sentry_messagecountbyminute', 'project_id')
# Adding unique constraint on 'MessageCountByMinute', fields ['date', 'group']
db.create_unique('sentry_messagecountbyminute', ['date', 'group_id'])
# Deleting field 'FilterValue.project'
db.delete_column('sentry_filtervalue', 'project_id')
# Adding unique constraint on 'FilterValue', fields ['key', 'value']
db.create_unique('sentry_filtervalue', ['key', 'value'])
# Deleting field 'MessageFilterValue.project'
db.delete_column('sentry_messagefiltervalue', 'project_id')
# Adding unique constraint on 'MessageFilterValue', fields ['group', 'value', 'key']
db.create_unique('sentry_messagefiltervalue', ['group_id', 'value', 'key'])
# Deleting field 'GroupedMessage.project'
db.delete_column('sentry_groupedmessage', 'project_id')
# Adding unique constraint on 'GroupedMessage', fields ['checksum', 'logger', 'view']
db.create_unique('sentry_groupedmessage', ['checksum', 'logger', 'view'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_leng
|
conejoninja/xbmc-seriesly
|
servers/wupload.py
|
Python
|
gpl-3.0
| 11,622 | 0.015835 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# seriesly - XBMC Plugin
# Conector para Wupload
# http://blog.tvalacarta.info/plugin-xbmc/seriesly/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[wupload.py] test_video_exists(page_url='%s')" % page_url)
# Existe: http://www.wupload.com/file/2666595132
# No existe: http://www.wupload.es/file/2668162342
location = scrapertools.get_header_from_response(page_url,header_to_get="location")
logger.info("location="+location)
if location!="":
page_url = location
data = scrapertools.downloadpageWithoutCookies(page_url)
logger.info("data="+data)
patron = '<p class="fileInfo filename"><span>Filename: </span> <strong>([^<]+)</strong></p>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
return True,""
else:
patron = '<p class="deletedFile">(Sorry, this file has been removed.)</p>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
return False,matches[0]
patron = '<div class="section CL3 regDownloadMessage"> <h3>(File does not exist)</h3> </div>'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
return False,matches[0]
return True,""
# Returns an array of possible video url's from the page_url
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[wupload.py] get_video_url( page_url='%s' , user='%s' , password='%s', video_password=%s)" % (page_url , user , "**************************"[0:len(password)] , video_password) )
if not premium:
#return get_free_url(page_url)
logger.info("[wupload.py] free no soportado")
else:
# Hace el login y consigue la cookie
#login_url = "http://www.wupload.es/account/login"
login_url = "http://www.wupload.com/account/login"
post = "email="+user.replace("@","%40")+"&redirect=%2F&password="+password+"&rememberMe=1"
location = scrapertools.get_header_from_response( url=login_url, header_to_get="location", post=post)
logger.info("location="+location)
if location!="":
login_url = location
data = scrapertools.cache_page(url=login_url, post=post)
# Obtiene la URL final
headers = scrapertools.get_headers_from_response(page_url)
location1 = ""
for header in headers:
logger.info("header1="+str(header))
|
if header[0]=="location":
location1 = header[1]
logger.info("location1="+str(header))
# Obtiene la URL final
headers = scrapertools.get_headers_from_response(location1)
location2 = ""
content_disposition = ""
for header in headers:
logger.info("header2="+str(header))
|
if header[0]=="location":
location2 = header[1]
location = location2
if location=="":
location = location1
return [ ["(Premium) [wupload]",location + "|" + "User-Agent="+urllib.quote("Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12") ] ]
return []
def get_free_url(page_url):
location = scrapertools.get_header_from_response(page_url,header_to_get="location")
if location!="":
page_url = location
logger.info("[wupload.py] location=%s" % page_url)
video_id = extract_id(page_url)
logger.info("[wupload.py] video_id=%s" % video_id)
data = scrapertools.cache_page(url=page_url)
patron = 'href="(.*?start=1.*?)"'
matches = re.compile(patron).findall(data)
scrapertools.printMatches(matches)
if len(matches)==0:
logger.error("[wupload.py] No encuentra el enlace Free")
return []
# Obtiene link de descarga free
download_link = matches[0]
if not download_link.startswith("http://"):
download_link = urlparse.urljoin(page_url,download_link)
logger.info("[wupload.py] Link descarga: "+ download_link)
# Descarga el enlace
headers = []
headers.append( ["X-Requested-With", "XMLHttpRequest"] )
headers.append( ["Referer" , page_url ])
headers.append( ["User-Agent" , "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12" ])
headers.append( ["Content-Type" , "application/x-www-form-urlencoded; charset=UTF-8"])
headers.append( ["Accept-Encoding" , "gzip, deflate"])
headers.append( ["Accept","*/*"])
headers.append( ["Accept-Language","es-es,es;q=0.8,en-us;q=0.5,en;q=0.3"])
headers.append( ["Accept-Charset","ISO-8859-1,utf-8;q=0.7,*;q=0.7"])
headers.append( ["Connection","keep-alive"])
headers.append( ["Pragma","no-cache"])
headers.append( ["Cache-Control","no-cache"])
data = scrapertools.cache_page( download_link , headers=headers, post="" )
logger.info(data)
while True:
# Detecta el tiempo de espera
patron = "countDownDelay = (\d+)"
matches = re.compile(patron).findall(data)
if len(matches)>0:
tiempo_espera = int(matches[0])
logger.info("[wupload.py] tiempo de espera %d segundos" % tiempo_espera)
#import time
#time.sleep(tiempo_espera)
from platformcode.xbmc import xbmctools
resultado = xbmctools.handle_wait(tiempo_espera+5,"Progreso","Conectando con servidor Wupload (Free)")
if resultado == False:
break
tm = get_match(data,"name='tm' value='([^']+)'")
tm_hash = get_match(data,"name='tm_hash' value='([^']+)'")
post = "tm=" + tm + "&tm_hash=" + tm_hash
data = scrapertools.cache_page( download_link , headers=headers, post=post )
logger.info(data)
else:
logger.info("[wupload.py] no encontrado tiempo de espera")
# Detecta captcha
patron = "Recaptcha\.create"
matches = re.compile(patron).findall(data)
if len(matches)>0:
logger.info("[wupload.py] est� pidiendo el captcha")
recaptcha_key = get_match( data , 'Recaptcha\.create\("([^"]+)"')
logger.info("[wupload.py] recaptcha_key="+recaptcha_key)
data_recaptcha = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k="+recaptcha_key)
patron="challenge.*?'([^']+)'"
challenges = re.compile(patron, re.S).findall(data_recaptcha)
if(len(challenges)>0):
challenge = challenges[0]
image = "http://www.google.com/recaptcha/api/image?c="+challenge
#CAPTCHA
exec "import seriesly.captcha as plugin"
tbd = plugin.Keyboard("","",image)
tbd.doModal()
confirmed = tbd.isConfirmed()
if (confirmed):
tecleado = tbd.getText()
#logger.info("")
#tecleado = raw_input('Grab ' + image + ' : ')
post = "recaptcha_challenge_field=%s&recaptcha_response_field=%s" % (challenge,tecleado.replace(" ","+"))
data = scrapertools.cache_page( download_link , headers=headers, post=post )
logger.info(data)
else:
logger.info("[wupload.py] no encontrado captcha")
# Detecta captcha
patron = '<p><a href="(http\:\/\/.*?wupload[^"]+)">'
matches = re.compile(patron).findall(data)
if len(matches)>0:
final_url = matches[0]
'''
'GET /download/2616019677/4f0391ba/9bed4add/0/1/580dec58/3317afa30905a31794733c6a32da1987719292ff
HTTP/1.1
Accept-Language: es-es,es;q=0.8,en-us;q=0.5,en;q=0.3
Acce
|
AIESECGermany/gis-hubspot-sync
|
gis_token_generator.py
|
Python
|
gpl-2.0
| 825 | 0.004848 |
import urllib
import urllib2
import cookielib
import logging
class GISTokenGenerator:
def __init__(self, email, password):
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.email = email
self.login_data = urllib.urlencode({'user[email]': email, 'user[password]'
|
: password})
def generate_token(self):
logging.info('Generating a token for {0}...'.format(self.email))
self.opener.open('https://auth.aiesec.org/users/sign_in', self.login_data)
tok
|
en = None
for cookie in self.cj:
if cookie.name == 'expa_token':
token = cookie.value
if token is None:
raise Exception('Unable to generate a token for {0}!'.format(self.email))
return token
|
JQIamo/artiq
|
artiq/compiler/testbench/perf_embedding.py
|
Python
|
lgpl-3.0
| 2,231 | 0.001793 |
import sys, os
from pythonparser import diagnostic
from ...language.environment import ProcessArgumentManager
from ...master.databases import DeviceDB, DatasetDB
from ...master.worker_db import DeviceManager, DatasetManager
from ..module import Module
from ..embedding import Stitcher
from ..targets import OR1KTarget
from . import benchmark
def main():
if not len(sys.argv) == 2:
print("Expected exactly one module filename", file=sys.stderr)
exit(1)
def process_diagnostic(diag):
print("\n".join(diag.render()), file=sys.stderr)
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
with open(sys.argv[1]) as f:
testcase_code = com
|
pile(f.read(), f.name, "exec")
testcase_vars = {'__name__': 'testbench'}
exec(testcase_code, testcase_vars)
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
device_
|
mgr = DeviceManager(DeviceDB(device_db_path))
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))
argument_mgr = ProcessArgumentManager({})
def embed():
experiment = testcase_vars["Benchmark"]((device_mgr, dataset_mgr, argument_mgr))
stitcher = Stitcher(core=experiment.core, dmgr=device_mgr)
stitcher.stitch_call(experiment.run, (), {})
stitcher.finalize()
return stitcher
stitcher = embed()
module = Module(stitcher)
target = OR1KTarget()
llvm_ir = target.compile(module)
elf_obj = target.assemble(llvm_ir)
elf_shlib = target.link([elf_obj])
benchmark(lambda: embed(),
"ARTIQ embedding")
benchmark(lambda: Module(stitcher),
"ARTIQ transforms and validators")
benchmark(lambda: target.compile(module),
"LLVM optimizations")
benchmark(lambda: target.assemble(llvm_ir),
"LLVM machine code emission")
benchmark(lambda: target.link([elf_obj]),
"Linking")
benchmark(lambda: target.strip(elf_shlib),
"Stripping debug information")
if __name__ == "__main__":
main()
|
listamilton/supermilton.repository
|
plugin.audio.soundcloud/resources/lib/nightcrawler/core/abstract_context.py
|
Python
|
gpl-2.0
| 10,518 | 0.001236 |
import json
import os
import urllib
from ..storage import WatchLaterList, FunctionCache, SearchHistory, FavoriteList, AccessManager
from .abstract_settings import AbstractSettings
from .. import utils
class AbstractContext(object):
CACHE_ONE_MINUTE = 60
CACHE_ONE_HOUR = 60 * CACHE_ONE_MINUTE
CACHE_ONE_DAY = 24 * CACHE_ONE_HOUR
CACHE_ONE_WEEK = 7 * CACHE_ONE_DAY
CACHE_ONE_MONTH = 4 * CACHE_ONE_WEEK
SORT_METHOD_ALBUM = 'album'
SORT_METHOD_ALBUM_IGNORE_THE = 'album_ignore_the'
SORT_METHOD_ARTIST = 'artist'
SORT_METHOD_ARTIST_IGNORE_THE = 'artist_ignore_the'
SORT_METHOD_BIT_RATE = 'bit_rate'
SORT_METHOD_CHANNEL = 'channel'
SORT_METHOD_COUNTRY = 'country'
SORT_METHOD_DATE = 'date'
SORT_METHOD_DATE_ADDED = 'date_added'
SORT_METHOD_DATE_TAKEN = 'date_taken'
SORT_METHOD_DRIVE_TYPE = 'drive_type'
SORT_METHOD_DURATION = 'duration'
SORT_METHOD_EPISODE = 'episode'
SORT_METHOD_FILE = 'file'
SORT_METHOD_FULL_PATH = 'full_path'
SORT_METHOD_GENRE = 'genre'
SORT_METHOD_LABEL = 'label'
SORT_METHOD_LABEL_IGNORE_FOLDERS = 'label_ignore_folders'
SORT_METHOD_LABEL_IGNORE_THE = 'label_ignore_the'
SORT_METHOD_LAST_PLAYED = 'last_played'
SORT_METHOD_LISTENERS = 'listeners'
SORT_METHOD_MPAA_RATING = 'mpaa_rating'
SORT_METHOD_NONE = 'none'
SORT_METHOD_PLAY_COUNT = 'play_count'
SORT_METHOD_PLAYLIST_ORDER = 'playlist_order'
SORT_METHOD_PRODUCTION_CODE = 'production_code'
SORT_METHOD_PROGRAM_COUNT = 'program_count'
SORT_METHOD_SIZE = 'size'
SORT_METHOD_SONG_RATING = 'song_rating'
SORT_METHOD_STUDIO = 'studio'
SORT_METHOD_STUDIO_IGNORE_THE = 'studio_ignore_the'
SORT_METHOD_TITLE = 'title'
SORT_METHOD_TITLE_IGNORE_THE = 'title_ignore_the'
SORT_METHOD_TRACK_NUMBER = 'track_number'
SORT_METHOD_UNSORTED = 'unsorted'
SORT_METHOD_VIDEO_RATING = 'video_rating'
SORT_METHOD_VIDEO_RUNTIME = 'video_runtime'
SORT_METHOD_VIDEO_SORT_TITLE = 'video_sort_title'
SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = 'video_sort_title_ignore_the'
SORT_METHOD_VIDEO_TITLE = 'video_title'
SORT_METHOD_VIDEO_YEAR = 'video_year'
CONTENT_TYPE_FILES = 'files'
CONTENT_TYPE_SONGS = 'songs'
CONTENT_TYPE_ARTISTS = 'artists'
CONTENT_TYPE_ALBUMS = 'albums'
CONTENT_TYPE_MOVIES =
|
'movies'
CONTENT_TYPE_TV_SHOWS = 'tvshows'
CONTENT_TYPE_EPISODES = 'episodes'
CONTENT_TYPE_MUSIC_VIDEOS = 'musicvideos'
LOG_DEBUG = 0
LOG_INFO = 1
LOG_WARNING = 2
|
LOG_ERROR = 3
def __init__(self, path=u'/', params=None, plugin_name=u'', plugin_id=u''):
if not params:
params = {}
pass
self._path_match = None
self._python_version = None
self._cache_path = None
self._function_cache = None
self._search_history = None
self._favorite_list = None
self._watch_later_list = None
self._access_manager = None
self._plugin_name = unicode(plugin_name)
self._version = 'UNKNOWN'
self._plugin_id = plugin_id
self._path = path
self._params = params
self._utils = None
self._view_mode = None
# create valid uri
self._uri = self.create_uri(self._path, self._params)
pass
def set_path_match(self, path_match):
"""
Sets the current regular expression match for a navigated path
:param path_match: regular expression match
"""
self._path_match = path_match
pass
def get_path_match(self):
"""
Returns the current path match of regular expression
:return: match of regular expression
"""
return self._path_match
def format_date_short(self, date_obj):
raise NotImplementedError()
def format_time(self, time_obj):
raise NotImplementedError()
def get_language(self):
raise NotImplementedError()
def _get_cache_path(self):
if not self._cache_path:
self._cache_path = os.path.join(self.get_data_path(), 'kodion')
pass
return self._cache_path
def get_function_cache(self):
if not self._function_cache:
settings = self.get_settings()
max_cache_size_mb = settings.get_int(AbstractSettings.ADDON_CACHE_SIZE, 5)
self._function_cache = FunctionCache(os.path.join(self._get_cache_path(), 'cache'),
max_file_size_kb=max_cache_size_mb * 1024)
if settings.is_clear_cache_enabled():
self.log_info('Clearing cache...')
settings.disable_clear_cache()
self._function_cache.remove_file()
self.log_info('Clearing cache done')
pass
pass
return self._function_cache
def cache_function(self, seconds, func, *args, **keywords):
return self.get_function_cache().get(seconds, func, *args, **keywords)
def get_search_history(self):
if not self._search_history:
max_search_history_items = self.get_settings().get_int(AbstractSettings.ADDON_SEARCH_SIZE, 50,
lambda x: x * 10)
self._search_history = SearchHistory(os.path.join(self._get_cache_path(), 'search'),
max_search_history_items)
pass
return self._search_history
def get_favorite_list(self):
if not self._favorite_list:
self._favorite_list = FavoriteList(os.path.join(self._get_cache_path(), 'favorites'))
pass
return self._favorite_list
def get_watch_later_list(self):
if not self._watch_later_list:
self._watch_later_list = WatchLaterList(os.path.join(self._get_cache_path(), 'watch_later'))
pass
return self._watch_later_list
def get_access_manager(self):
if not self._access_manager:
self._access_manager = AccessManager(self.get_settings())
pass
return self._access_manager
def get_video_playlist(self):
raise NotImplementedError()
def get_audio_playlist(self):
raise NotImplementedError()
def get_video_player(self):
raise NotImplementedError()
def get_audio_player(self):
raise NotImplementedError()
def get_ui(self):
raise NotImplementedError()
def get_system_version(self):
raise NotImplementedError()
def get_system_name(self):
raise NotImplementedError()
def get_python_version(self):
if not self._python_version:
try:
import platform
python_version = str(platform.python_version())
python_version = python_version.split('.')
self._python_version = tuple(map(lambda x: int(x), python_version))
except Exception, ex:
self.log_error('Unable to get the version of python')
self.log_error(ex.__str__())
self._python_version = [0, 0]
pass
pass
return self._python_version
def create_uri(self, path=u'/', params=None):
if not params:
params = {}
pass
uri_path = utils.path.to_uri(path)
if uri_path:
uri = "%s://%s%s" % ('plugin', utils.strings.to_utf8(self._plugin_id), uri_path)
else:
uri = "%s://%s/" % ('plugin', utils.strings.to_utf8(self._plugin_id))
pass
if len(params) > 0:
# make a copy of the map
uri_params = {}
uri_params.update(params)
# encode in utf-8
for key in uri_params:
param = params[key]
# convert dict to string via json
if isinstance(param, dict):
param = json.dumps(param)
pass
uri_params[key] = utils.strings.to_utf8(param)
pass
uri += '?' + urllib.urlencode(uri_params)
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.