repository_name
stringlengths 7
107
| function_path
stringlengths 4
190
| function_identifier
stringlengths 1
236
| language
stringclasses 1
value | function
stringlengths 9
647k
| docstring
stringlengths 5
488k
| function_url
stringlengths 71
285
| context
stringlengths 0
2.51M
| license
stringclasses 5
values |
---|---|---|---|---|---|---|---|---|
andrenarchy/krypy
|
krypy/utils.py
|
Projection.matrix
|
python
|
def matrix(self):
return self.apply(numpy.eye(self.V.shape[0]))
|
Builds matrix representation of projection.
Builds the matrix representation
:math:`P = X \\langle Y,X\\rangle^{-1} \\langle Y, I_N\\rangle`.
**Use with care!** This routine may be helpful for testing purposes but
should not be used in production codes for high dimensions since
the resulting matrix is dense.
|
https://github.com/andrenarchy/krypy/blob/56f25817194edbe98b30e144986703a2a3137ff9/krypy/utils.py#L667-L677
|
import time
import warnings
from collections import defaultdict
import numpy
import scipy.linalg
import scipy.linalg.blas as blas
from scipy.sparse import isspmatrix
from scipy.sparse.sputils import isintlike
__all__ = [
"ArgumentError",
"AssumptionError",
"ConvergenceError",
"LinearOperatorError",
"InnerProductError",
"RuntimeError",
"Arnoldi",
"BoundCG",
"BoundMinres",
"ConvergenceError",
"Givens",
"House",
"IdentityLinearOperator",
"LinearOperator",
"MatrixLinearOperator",
"NormalizedRootsPolynomial",
"Projection",
"Timer",
"angles",
"arnoldi",
"arnoldi_res",
"arnoldi_projected",
"bound_perturbed_gmres",
"gap",
"get_linearoperator",
"hegedus",
"inner",
"ip_euclid",
"norm",
"norm_MMlr",
"norm_squared",
"orthonormality",
"qr",
"ritz",
"shape_vec",
"shape_vecs",
"strakos",
]
class ArgumentError(Exception):
class AssumptionError(Exception):
pass
class ConvergenceError(Exception):
def __init__(self, msg, solver):
super(ConvergenceError, self).__init__(msg)
self.solver = solver
class LinearOperatorError(Exception):
class InnerProductError(Exception):
class RuntimeError(Exception):
def find_common_dtype(*args):
dtypes = []
for arg in args:
if (
type(arg) is numpy.ndarray
or isspmatrix(arg)
or isinstance(arg, LinearOperator)
):
if hasattr(arg, "dtype"):
dtypes.append(arg.dtype)
else:
warnings.warn("object %s does not have a dtype." % arg.__repr__)
return numpy.find_common_type(dtypes, [])
def shape_vec(x):
return numpy.reshape(x, (x.shape[0], 1))
def shape_vecs(*args):
ret_args = []
flat_vecs = True
for arg in args:
if type(arg) is numpy.ndarray:
if len(arg.shape) == 1:
arg = shape_vec(arg)
else:
flat_vecs = False
ret_args.append(arg)
return flat_vecs, ret_args
def ip_euclid(X, Y):
return numpy.dot(X.T.conj(), Y)
def inner(X, Y, ip_B=None):
if ip_B is None or isinstance(ip_B, IdentityLinearOperator):
return numpy.dot(X.T.conj(), Y)
(N, m) = X.shape
(_, n) = Y.shape
try:
B = get_linearoperator((N, N), ip_B)
except TypeError:
return ip_B(X, Y)
if m > n:
return numpy.dot((B * X).T.conj(), Y)
else:
return numpy.dot(X.T.conj(), B * Y)
def norm_squared(x, Mx=None, inner_product=ip_euclid):
assert len(x.shape) == 2
if Mx is None:
rho = inner_product(x, x)
else:
assert len(Mx.shape) == 2
rho = inner_product(x, Mx)
if rho.shape == (1, 1):
if abs(rho[0, 0].imag) > abs(rho[0, 0]) * 1e-10 or rho[0, 0].real < 0.0:
raise InnerProductError(
("<x,Mx> = %g. Is the inner product " "indefinite?") % rho[0, 0]
)
return numpy.linalg.norm(rho, 2)
def norm(x, y=None, ip_B=None):
if y is None and (ip_B is None or isinstance(ip_B, IdentityLinearOperator)):
return numpy.linalg.norm(x, 2)
if y is None:
y = x
ip = inner(x, y, ip_B=ip_B)
nrm_diag = numpy.linalg.norm(numpy.diag(ip), 2)
nrm_diag_imag = numpy.linalg.norm(numpy.imag(numpy.diag(ip)), 2)
if nrm_diag_imag > nrm_diag * 1e-10:
raise InnerProductError(
"inner product defined by ip_B not positive "
"definite? ||diag(ip).imag||/||diag(ip)||="
f"{nrm_diag_imag/nrm_diag}"
)
return numpy.sqrt(numpy.linalg.norm(ip, 2))
def get_linearoperator(shape, A, timer=None):
ret = None
import scipy.sparse.linalg as scipylinalg
if isinstance(A, LinearOperator):
ret = A
elif A is None:
ret = IdentityLinearOperator(shape)
elif isinstance(A, numpy.ndarray) or isspmatrix(A):
ret = MatrixLinearOperator(A)
elif isinstance(A, numpy.matrix):
ret = MatrixLinearOperator(numpy.atleast_2d(numpy.asarray(A)))
elif isinstance(A, scipylinalg.LinearOperator):
if not hasattr(A, "dtype"):
raise ArgumentError("scipy LinearOperator has no dtype.")
ret = LinearOperator(A.shape, dot=A.matvec, dot_adj=A.rmatvec, dtype=A.dtype)
else:
raise TypeError("type not understood")
if (
A is not None
and not isinstance(A, IdentityLinearOperator)
and timer is not None
):
ret = TimedLinearOperator(ret, timer)
if shape != ret.shape:
raise LinearOperatorError("shape mismatch")
return ret
def norm_MMlr(M, Ml, A, Mr, b, x0, yk, inner_product=ip_euclid):
xk = x0 + Mr * yk
r = b - A * xk
Mlr = Ml * r
norm_Mlr = norm(Mlr)
if norm_Mlr == 0:
MMlr = numpy.zeros(Mlr.shape)
norm_MMlr = 0
else:
nMlr = Mlr / norm_Mlr
nMMlr = M * nMlr
MMlr = nMMlr * norm_Mlr
norm_MMlr = norm(Mlr, MMlr, inner_product=inner_product)
return xk, Mlr, MMlr, norm_MMlr
def orthonormality(V, ip_B=None):
return norm(numpy.eye(V.shape[1]) - inner(V, V, ip_B=ip_B))
def arnoldi_res(A, V, H, ip_B=None):
N = V.shape[0]
invariant = H.shape[0] == H.shape[1]
A = get_linearoperator((N, N), A)
if invariant:
res = A * V - numpy.dot(V, H)
else:
res = A * V[:, :-1] - numpy.dot(V, H)
return norm(res, ip_B=ip_B)
class House:
def __init__(self, x):
if len(x.shape) != 2 or x.shape[1] != 1:
raise ArgumentError("x is not a vector of dim (N,1)")
v = x.copy()
gamma = v[0].item()
v[0] = 1
if x.shape[0] == 1:
sigma = 0
xnorm = numpy.abs(gamma)
beta = 0
alpha = 1 if gamma == 0 else gamma / xnorm
else:
sigma = numpy.linalg.norm(v[1:], 2)
xnorm = numpy.sqrt(numpy.abs(gamma) ** 2 + sigma ** 2)
if sigma == 0:
beta = 0
xnorm = numpy.abs(gamma)
alpha = 1 if gamma == 0 else gamma / xnorm
else:
beta = 2
if gamma == 0:
v[0] = -sigma
alpha = 1
else:
v[0] = gamma + gamma / numpy.abs(gamma) * xnorm
alpha = -gamma / numpy.abs(gamma)
self.xnorm = xnorm
self.v = v / numpy.sqrt(numpy.abs(v[0]) ** 2 + sigma ** 2)
self.alpha = alpha
self.beta = beta
def apply(self, x):
if len(x.shape) != 2:
raise ArgumentError("x is not a matrix of shape (N,*)")
if self.beta == 0:
return x
return x - self.beta * self.v * numpy.dot(self.v.T.conj(), x)
def matrix(self):
n = self.v.shape[0]
return numpy.eye(n, n) - self.beta * numpy.dot(self.v, self.v.T.conj())
class Givens:
def __init__(self, x):
if x.shape != (2, 1):
raise ArgumentError("x is not a vector of shape (2,1)")
a = x[0].item()
b = x[1].item()
if numpy.isreal(x).all():
a = numpy.real(a)
b = numpy.real(b)
c, s = blas.drotg(a, b)
else:
c, s = blas.zrotg(a, b)
self.c = c
self.s = s
self.r = c * a + s * b
self.G = numpy.array([[c, s], [-numpy.conj(s), c]])
def apply(self, x):
return numpy.dot(self.G, x)
class Projection(object):
def __init__(self, X, Y=None, ip_B=None, orthogonalize=True, iterations=2):
self.ip_B = ip_B
if iterations < 1:
raise ArgumentError("iterations < 1 not allowed")
self.orthogonalize = orthogonalize
self.iterations = iterations
Y = X if Y is None else Y
if len(X.shape) != 2:
raise ArgumentError("X does not have shape==(N,k)")
if X.shape != Y.shape:
raise ArgumentError("X and Y have different shapes")
if X.shape[1] == 0:
self.V = self.W = numpy.zeros(X.shape)
self.VR = self.WR = self.Q = self.R = None
return
if orthogonalize:
self.V, self.VR = qr(X, ip_B=ip_B)
else:
self.V = X
self.VR = None
if Y is X and orthogonalize:
self.W, self.WR = self.V, self.VR
self.Q, self.R = None, None
else:
if orthogonalize:
self.W, self.WR = qr(Y, ip_B=ip_B)
else:
self.W = Y
self.WR = None
M = inner(self.W, self.V, ip_B=ip_B)
self.Q, self.R = scipy.linalg.qr(M)
def _apply(self, a, return_Ya=False):
if self.V.shape[1] == 0:
Pa = numpy.zeros(a.shape)
if return_Ya:
return Pa, numpy.zeros((0, a.shape[1]))
return Pa
c = inner(self.W, a, ip_B=self.ip_B)
if return_Ya:
Ya = c.copy()
if self.WR is not None:
Ya = self.WR.T.conj().dot(Ya)
if self.Q is not None and self.R is not None:
c = scipy.linalg.solve_triangular(self.R, self.Q.T.conj().dot(c))
Pa = self.V.dot(c)
if return_Ya:
return Pa, Ya
return Pa
def _apply_adj(self, a):
if self.V.shape[1] == 0:
return numpy.zeros(a.shape)
"""Single application of the adjoint projection."""
c = inner(self.V, a, ip_B=self.ip_B)
if self.Q is not None and self.R is not None:
c = self.Q.dot(
scipy.linalg.solve_triangular(self.R.T.conj(), c, lower=True)
)
return self.W.dot(c)
def apply(self, a, return_Ya=False):
if self.V.shape[1] == 0:
Pa = numpy.zeros(a.shape)
if return_Ya:
return Pa, numpy.zeros((0, a.shape[1]))
return Pa
if return_Ya:
x, Ya = self._apply(a, return_Ya=return_Ya)
else:
x = self._apply(a)
for i in range(self.iterations - 1):
z = a - x
w = self._apply(z)
x = x + w
if return_Ya:
return x, Ya
return x
def apply_adj(self, a):
if self.V.shape[1] == 0:
return numpy.zeros(a.shape)
x = self._apply_adj(a)
for i in range(self.iterations - 1):
z = a - x
w = self._apply_adj(z)
x = x + w
return x
def apply_complement(self, a, return_Ya=False):
if self.V.shape[1] == 0:
if return_Ya:
return a.copy(), numpy.zeros((0, a.shape[1]))
return a.copy()
if return_Ya:
x, Ya = self._apply(a, return_Ya=True)
else:
x = self._apply(a)
z = a - x
for i in range(self.iterations - 1):
w = self._apply(z)
z = z - w
if return_Ya:
return z, Ya
return z
def apply_complement_adj(self, a):
if self.V.shape[1] == 0:
return a.copy()
x = self._apply_adj(a)
z = a - x
for i in range(self.iterations - 1):
w = self._apply_adj(z)
z = z - w
return z
def _get_operator(self, fun, fun_adj):
N = self.V.shape[0]
t = numpy.find_common_type([self.V.dtype, self.W.dtype], [])
return LinearOperator((N, N), t, fun, fun_adj)
def operator(self):
if self.V.shape[1] == 0:
N = self.V.shape[0]
return ZeroLinearOperator((N, N))
return self._get_operator(self.apply, self.apply_adj)
def operator_complement(self):
if self.V.shape[1] == 0:
N = self.V.shape[0]
return IdentityLinearOperator((N, N))
return self._get_operator(self.apply_complement, self.apply_complement_adj)
|
MIT License
|
fluidinfo/tickery
|
tickery/adder.py
|
AdderCache._reportCancelled
|
python
|
def _reportCancelled(self, fail, screenname):
fail.trap(defer.CancelledError)
log.msg('Addition of user %r cancelled.' % screenname)
|
A user addition was cancelled. Log it and absorb the failure.
|
https://github.com/fluidinfo/tickery/blob/e64673ff1a5286b5ebec3f15f877682ed824f653/tickery/adder.py#L255-L260
|
from operator import attrgetter
from twisted.internet import defer
from twisted.python import log
from txrdq.rdq import ResizableDispatchQueue
from tickery.cacheutils import DumpingCache
from tickery import ftwitter
class User(object):
_states = ('queued', 'underway', 'added', 'canceled', 'failed')
_illegalTransitions = {
'queued': ('added', 'failed'),
'underway': ('queued',),
'added': ('queued', 'underway', 'canceled', 'failed'),
'canceled': ('underway', 'added', 'failed'),
'failed': ('underway', 'added'),
}
def __init__(self, screenname, nFriends, priority):
self.screenname = screenname
self.nFriends = nFriends
self.priority = priority
self.reset()
def reset(self):
self.state = 'queued'
def setState(self, newState):
currentState = self.state
screenname = self.screenname
if newState in self._states:
if newState in self._illegalTransitions[currentState]:
log.msg("Adder logic error? Can't transition %r from state "
'%r to %r.' % (screenname, currentState, newState))
else:
if newState == currentState:
log.msg('Logic error? %r is already in state %r.' %
(screenname, newState))
else:
log.msg('Adder: %r state change: %r -> %r.' %
(screenname, currentState, newState))
self.state = newState
else:
log.msg('Error: Unknown state %r.' % newState)
def canceled(self):
return self.state == 'canceled'
def __str__(self):
return ('%-16s state=%s priority=%d nFriends=%d'
% (self.screenname, self.state, self.priority, self.nFriends))
def __repr__(self):
return '<%s screenname=%r state=%r priority=%d nFriends=%d>' % (
self.__class__.__name__, self.screenname, self.state,
self.priority, self.nFriends)
class AdderCache(DumpingCache):
def __init__(self, cache, queueWidth, endpoint):
super(AdderCache, self).__init__()
self.cache = cache
self.queueWidth = queueWidth
self.endpoint = endpoint
def load(self, cacheFile):
self.rdq = ResizableDispatchQueue(self._addUser, width=self.queueWidth)
self.users = super(AdderCache, self).load(cacheFile)
if self.users is None:
self.users = {}
self.setCache(self.users)
else:
added = [u for u in self.users.values() if u.state == 'added']
notAdded = [u for u in self.users.values() if u.state != 'added']
log.msg('Loaded adder cache: found %d added, %d unadded users' %
(len(added), len(notAdded)))
if self.cache.restoreAddQueue:
log.msg('Restoring add queue.')
for user in sorted(notAdded, key=attrgetter('queuedAt')):
log.msg('Restoring %r (previous state %r)' %
(user.screenname, user.state))
user.reset()
d = self.rdq.put(user, user.priority)
d.addErrback(self._reportCancelled, user.screenname)
self.clean = False
else:
log.msg('Not restoring formerly queued names.')
for user in notAdded:
log.msg('Dropping user %r (in state %r)' %
(user.screenname, user.state))
del self.users[user.screenname.lower()]
self.clean = False
def __str__(self):
s = ['%d users in adder cache' % len(self.users)]
for key in sorted(self.users.keys()):
s.append(str(self.users[key]))
return '\n'.join(s)
def put(self, screenname, nFriends, priority):
screennameLower = screenname.lower()
user = self.users.get(screennameLower)
if user:
user.nFriends = nFriends
user.setState('queued')
else:
user = User(screenname, nFriends, priority)
self.users[screennameLower] = user
log.msg('Adding screenname %r to request queue.' % screenname)
self.clean = False
d = self.rdq.put(user, priority)
d.addErrback(self._reportCancelled, screenname)
def _addUser(self, user):
def _added(result):
user.setState('added')
self.clean = False
return result
def _failed(fail):
self.clean = False
if fail.check(ftwitter.Canceled):
assert user.canceled()
log.msg('Addition of user %r canceled.' % user.screenname)
else:
user.setState('failed')
log.msg('Failed to add %r: %s' % (user.screenname, fail))
if hasattr(fail.value, 'response_headers'):
for header in fail.value.response_headers:
if header.startswith('x-fluiddb-'):
print '\t%s: %s' % (
header, fail.value.response_headers[header][0])
log.msg('User %r received from request queue.' % user.screenname)
user.setState('underway')
d = ftwitter.addUserByScreenname(self.cache, self.endpoint, user)
d.addCallbacks(_added, _failed)
d.addErrback(log.err)
return d
def cancel(self, screenname):
log.msg('Attempting cancel of %r addition.' % screenname)
try:
user = self.users[screenname.lower()]
except KeyError:
raise Exception('Cannot cancel unknown user %r.' % screenname)
else:
if user.state == 'underway' or user.state == 'queued':
for job in self.rdq.underway() + set(self.rdq.pending()):
if job.jobarg.screenname == screenname:
log.msg('Cancelling %s %r addition.' %
(user.state, screenname))
job.cancel()
user.setState('canceled')
break
else:
raise Exception('Could not find %r in underway '
'or pending lists.' % screenname)
else:
user.setState('canceled')
def added(self, screenname):
try:
user = self.users[screenname.lower()]
except KeyError:
return False
else:
return user.state == 'added'
def known(self, screenname):
return screenname.lower() in self.users
def statusSummary(self, screennames):
position = {}
for i, user in enumerate(
[job.jobarg for job in self.rdq.pending()]):
position[user.screenname.lower()] = i
queued = []
underway = []
added = []
canceled = []
failed = []
unknown = []
for screenname in screennames:
try:
user = self.users[screenname.lower()]
except KeyError:
unknown.append(screenname)
else:
log.msg('user: %s' % user)
state = user.state
if state == 'queued':
try:
pos = position[screenname.lower()]
except KeyError:
log.msg('ERROR: User %r has no queue position.' %
screenname)
pos = -1
queued.append([screenname, user.nFriends, pos])
elif state == 'underway':
underway.append(
[screenname, user.nFriends,
float(user.workDone) / float(user.workToDo)])
elif state == 'added':
added.append(screenname)
elif state == 'canceled':
canceled.append(screenname)
elif state == 'failed':
failed.append(screenname)
else:
log.msg('ERROR: User %r is in an unknown state: %r' %
(screenname, state))
return {
'queued': queued,
'underway': underway,
'added': added,
'failed': failed,
'canceled': canceled,
'unknown': unknown,
}
@defer.inlineCallbacks
def close(self):
pending = yield self.rdq.stop()
if pending:
log.msg('Pending user additions canceled: %r' %
[job.jobarg.screenname for job in pending])
super(AdderCache, self).close()
|
Apache License 2.0
|
apache/bloodhound
|
bloodhound_theme/bhtheme/theme.py
|
BloodhoundTheme.post_process_request
|
python
|
def post_process_request(self, req, template, data, content_type):
if template is None and data is None and sys.exc_info() == (None, None, None):
return template, data, content_type
def is_active_theme():
is_active = False
active_theme = ThemeEngineSystem(self.env).theme
if active_theme is not None:
this_theme_name = self.get_theme_names().next()
is_active = active_theme['name'] == this_theme_name
return is_active
req.chrome['labels'] = self._get_whitelabelling()
if data is not None:
data['product_list'] = ProductModule.get_product_list(self.env, req)
links = req.chrome.get('links', {})
if self.env.project_icon == 'common/trac.ico':
bh_icon = 'theme/img/bh.ico'
new_icon = {'href': req.href.chrome(bh_icon),
'type': get_mimetype(bh_icon)}
if links.get('icon'):
links.get('icon')[0].update(new_icon)
if links.get('shortcut icon'):
links.get('shortcut icon')[0].update(new_icon)
is_active_theme = is_active_theme()
if self.disable_all_trac_css and is_active_theme:
for i, entry in enumerate(req.chrome['nav'].get('mainnav', [])):
if entry['name'] == 'admin':
req.chrome['nav'].setdefault('metanav', []) .append(req.chrome['nav']['mainnav'].pop(i))
if self.disable_all_trac_css:
stylesheets = links.get('stylesheet', [])
if stylesheets:
path = '/chrome/common/css/'
_iter = ([ss, ss.get('href', '')] for ss in stylesheets)
links['stylesheet'] = [ss for ss, href in _iter if not path in href or
href.rsplit('/', 1)[-1] in self.BLOODHOUND_KEEP_CSS]
template, modifier = self.BLOODHOUND_TEMPLATE_MAP.get(template, (template, None))
if modifier is not None:
modifier = getattr(self, modifier)
modifier(req, template, data, content_type, is_active_theme)
if is_active_theme and data is not None:
data['responsive_layout'] = self.env.config.getbool('bloodhound', 'responsive_layout',
'true')
data['bhrelations'] = self.env.config.getbool('components', 'bhrelations.*', 'false')
if req.locale is not None:
add_script(req, 'theme/bloodhound/%s.js' % req.locale)
return template, data, content_type
|
Post process request filter.
Removes all trac provided css if required
|
https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_theme/bhtheme/theme.py#L264-L329
|
import sys
from genshi.builder import tag
from genshi.core import TEXT
from genshi.filters.transform import Transformer
from genshi.output import DocType
from trac.config import ListOption, Option
from trac.core import Component, TracError, implements
from trac.mimeview.api import get_mimetype
from trac.resource import get_resource_url, Neighborhood, Resource
from trac.ticket.api import TicketSystem
from trac.ticket.model import Ticket, Milestone
from trac.ticket.notification import TicketNotifyEmail
from trac.ticket.web_ui import TicketModule
from trac.util.compat import set
from trac.util.presentation import to_json
from trac.versioncontrol.web_ui.browser import BrowserModule
from trac.web.api import IRequestFilter, IRequestHandler, ITemplateStreamFilter
from trac.web.chrome import (add_stylesheet, add_warning, INavigationContributor,
ITemplateProvider, prevnext_nav, Chrome, add_script)
from trac.wiki.admin import WikiAdmin
from trac.wiki.formatter import format_to_html
from themeengine.api import ThemeBase, ThemeEngineSystem
from bhdashboard.util import dummy_request
from bhdashboard.web_ui import DashboardModule
from bhdashboard import wiki
from multiproduct.env import ProductEnvironment
from multiproduct.web_ui import PRODUCT_RE, ProductModule
from bhtheme.translation import _, add_domain
try:
from multiproduct.ticket.web_ui import ProductTicketModule
except ImportError:
ProductTicketModule = None
class BloodhoundTheme(ThemeBase):
template = htdocs = css = screenshot = disable_trac_css = True
disable_all_trac_css = True
BLOODHOUND_KEEP_CSS = set(
(
'diff.css', 'code.css'
)
)
BLOODHOUND_TEMPLATE_MAP = {
'admin_accountsconfig.html': ('bh_admin_accountsconfig.html', '_modify_admin_breadcrumb'),
'admin_accountsnotification.html': ('bh_admin_accountsnotification.html', '_modify_admin_breadcrumb'),
'admin_basics.html': ('bh_admin_basics.html', '_modify_admin_breadcrumb'),
'admin_components.html': ('bh_admin_components.html', '_modify_admin_breadcrumb'),
'admin_enums.html': ('bh_admin_enums.html', '_modify_admin_breadcrumb'),
'admin_logging.html': ('bh_admin_logging.html', '_modify_admin_breadcrumb'),
'admin_milestones.html': ('bh_admin_milestones.html', '_modify_admin_breadcrumb'),
'admin_perms.html': ('bh_admin_perms.html', '_modify_admin_breadcrumb'),
'admin_plugins.html': ('bh_admin_plugins.html', '_modify_admin_breadcrumb'),
'admin_products.html': ('bh_admin_products.html', '_modify_admin_breadcrumb'),
'admin_repositories.html': ('bh_admin_repositories.html', '_modify_admin_breadcrumb'),
'admin_users.html': ('bh_admin_users.html', '_modify_admin_breadcrumb'),
'admin_versions.html': ('bh_admin_versions.html', '_modify_admin_breadcrumb'),
'repository_links.html': ('repository_links.html', '_modify_admin_breadcrumb'),
'prefs.html': ('bh_prefs.html', None),
'prefs_account.html': ('bh_prefs_account.html', None),
'prefs_advanced.html': ('bh_prefs_advanced.html', None),
'prefs_datetime.html': ('bh_prefs_datetime.html', None),
'prefs_general.html': ('bh_prefs_general.html', None),
'prefs_keybindings.html': ('bh_prefs_keybindings.html', None),
'prefs_language.html': ('bh_prefs_language.html', None),
'prefs_pygments.html': ('bh_prefs_pygments.html', None),
'prefs_userinterface.html': ('bh_prefs_userinterface.html', None),
'search.html': ('bh_search.html', '_modify_search_data'),
'wiki_delete.html': ('bh_wiki_delete.html', None),
'wiki_diff.html': ('bh_wiki_diff.html', None),
'wiki_edit.html': ('bh_wiki_edit.html', None),
'wiki_rename.html': ('bh_wiki_rename.html', None),
'wiki_view.html': ('bh_wiki_view.html', '_modify_wiki_page_path'),
'diff_view.html': ('bh_diff_view.html', None),
'manage.html': ('manage.html', '_modify_resource_breadcrumb'),
'milestone_edit.html': ('bh_milestone_edit.html', '_modify_roadmap_page'),
'milestone_delete.html': ('bh_milestone_delete.html', '_modify_roadmap_page'),
'milestone_view.html': ('bh_milestone_view.html', '_modify_roadmap_page'),
'query.html': ('bh_query.html', '_add_products_general_breadcrumb'),
'report_delete.html': ('bh_report_delete.html', '_add_products_general_breadcrumb'),
'report_edit.html': ('bh_report_edit.html', '_add_products_general_breadcrumb'),
'report_list.html': ('bh_report_list.html', '_add_products_general_breadcrumb'),
'report_view.html': ('bh_report_view.html', '_add_products_general_breadcrumb'),
'roadmap.html': ('bh_roadmap.html', '_modify_roadmap_page'),
'ticket.html': ('bh_ticket.html', '_modify_ticket'),
'ticket_delete.html': ('bh_ticket_delete.html', None),
'ticket_preview.html': ('bh_ticket_preview.html', None),
'attachment.html': ('bh_attachment.html', None),
'preview_file.html': ('bh_preview_file.html', None),
'browser.html': ('bh_browser.html', '_modify_browser'),
'changeset.html': ('bh_changeset.html', None),
'diff_form.html': ('bh_diff_form.html', None),
'dir_entries.html': ('bh_dir_entries.html', None),
'revisionlog.html': ('bh_revisionlog.html', '_modify_browser'),
'product_view.html': ('bh_product_view.html', '_add_products_general_breadcrumb'),
'product_list.html': ('bh_product_list.html', '_modify_product_list'),
'product_edit.html': ('bh_product_edit.html', '_add_products_general_breadcrumb'),
'about.html': ('bh_about.html', None),
'history_view.html': ('bh_history_view.html', None),
'timeline.html': ('bh_timeline.html', None),
'account_details.html': ('bh_account_details.html', None),
'login.html': ('bh_login.html', None),
'register.html': ('bh_register.html', None),
'reset_password.html': ('bh_reset_password.html', None),
'user_table.html': ('bh_user_table.html', None),
'verify_email.html': ('bh_verify_email.html', None),
}
BOOTSTRAP_CSS_DEFAULTS = (
("body//table[not(contains(@class, 'table'))]",
['table', 'table-condensed']),
)
labels_application_short = Option('labels', 'application_short',
'Bloodhound', """A short version of application name most commonly
displayed in text, titles and labels""", doc_domain='bhtheme')
labels_application_full = Option('labels', 'application_full',
'Apache Bloodhound', """This is full name with trade mark and
everything, it is currently used in footers and about page only""",
doc_domain='bhtheme')
labels_footer_left_prefix = Option('labels', 'footer_left_prefix', '',
"""Text to display before full application name in footers""",
doc_domain='bhtheme')
labels_footer_left_postfix = Option('labels', 'footer_left_postfix', '',
"""Text to display after full application name in footers""",
doc_domain='bhtheme')
labels_footer_right = Option('labels', 'footer_right', '',
"""Text to use as the right aligned footer""", doc_domain='bhtheme')
_wiki_pages = None
Chrome.default_html_doctype = DocType.HTML5
implements(IRequestFilter, INavigationContributor, ITemplateProvider,
ITemplateStreamFilter)
from trac.web import main
main.default_tracker = 'http://issues.apache.org/bloodhound'
def _get_whitelabelling(self):
return {
'application_short': self.labels_application_short,
'application_full': self.labels_application_full,
'footer_left_prefix': self.labels_footer_left_prefix,
'footer_left_postfix': self.labels_footer_left_postfix,
'footer_right': self.labels_footer_right,
'application_version': application_version
}
def filter_stream(self, req, method, filename, stream, data):
tx = Transformer('body')
def add_classes(classes):
def attr_modifier(name, event):
attrs = event[1][1]
class_list = attrs.get(name, '').split()
self.log.debug('BH Theme : Element classes ' + str(class_list))
out_classes = ' '.join(set(class_list + classes))
self.log.debug('BH Theme : Inserting class ' + out_classes)
return out_classes
return attr_modifier
for xpath, classes in self.BOOTSTRAP_CSS_DEFAULTS:
tx = tx.end().select(xpath) .attr('class', add_classes(classes))
tx = tx.end() .select("body//a[contains(@href,'/wiki/%s')]" % wiki.GUIDE_NAME) .map(lambda text: wiki.new_name(text), TEXT)
app_short = self.labels_application_short
tx = tx.end() .select("body//div[@class='error']/h1") .map(lambda text: text.replace("Trac", app_short), TEXT)
return stream | tx
def pre_process_request(self, req, handler):
def hwiki(*args, **kw):
def new_name(name):
new_name = wiki.new_name(name)
if new_name != name:
if not self._wiki_pages:
wiki_admin = WikiAdmin(self.env)
self._wiki_pages = wiki_admin.get_wiki_list()
if new_name in self._wiki_pages:
return new_name
return name
a = tuple([new_name(x) for x in args])
return req.href.__call__("wiki", *a, **kw)
req.href.wiki = hwiki
return handler
|
Apache License 2.0
|
luiszeni/boosted-oicr
|
code/utils/minibatch.py
|
get_minibatch
|
python
|
def get_minibatch(roidb, num_classes):
blobs = {k: [] for k in get_minibatch_blob_names()}
im_blob, im_scales = _get_image_blob(roidb)
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
blobs['data'] = im_blob
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0, num_classes), dtype=np.float32)
num_images = len(roidb)
for im_i in range(num_images):
labels, im_rois = _sample_rois(roidb[im_i], num_classes)
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(rois_blob_this_image * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
rois_blob_this_image = rois_blob_this_image[index, :]
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
labels_blob = np.vstack((labels_blob, labels))
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
return blobs, True
|
Given a roidb, construct a minibatch sampled from it.
|
https://github.com/luiszeni/boosted-oicr/blob/9c787808a3a0e5a2610cde7562eb04bc2ce050b9/code/utils/minibatch.py#L17-L57
|
import numpy as np
import numpy.random as npr
import cv2
from tasks.config import cfg
import utils.blob as blob_utils
def get_minibatch_blob_names(is_training=True):
blob_names = ['data', 'rois', 'labels']
return blob_names
|
MIT License
|
tmancal74/quantarhei
|
quantarhei/builders/pdb.py
|
PDBFile.get_Molecules
|
python
|
def get_Molecules(self, model=None):
if model is None:
return self.molecules
else:
molecules = []
res_name = model.pdbname
mollines = self._match_lines(by_recName="HETATM",
by_resName=res_name)
for mols in mollines:
rseq = int(line_resSeq(mols))
if rseq not in self._resSeqs:
self._resSeqs.append(rseq)
self._res_lines[rseq] = []
self._res_lines[rseq].append(mols)
count = -1
for mols in mollines:
rseq = int(line_resSeq(mols))
chainId = line_chainId(mols)
comb = chainId+str(rseq)
if comb not in self._uniqueIds:
count += 1
self._uniqueIds.append(comb)
self._unique_lines[comb] = []
self._unique_lines[comb].append(mols)
for rseq in self._uniqueIds:
m = Molecule(name=str(rseq), elenergies=model.default_energies)
r = model.position_of_center(data_type="PDB",
data=self._unique_lines[rseq])
m.position = r
d = model.transition_dipole(data_type="PDB",
data=self._unique_lines[rseq])
m.set_dipole(0,1,d)
m.model = self
m.data = self._unique_lines[rseq]
molecules.append(m)
self._reset_helpers()
self.molecules = molecules
return molecules
|
Returns all molecules corresponding to a given model
|
https://github.com/tmancal74/quantarhei/blob/54a40cc55cdedf86bf04a5d705227fe69461d408/quantarhei/builders/pdb.py#L57-L135
|
import numpy
from .molecules import Molecule
_resSeq_min = 22
_resSeq_max = 26
_chainId_min = 21
_chainId_max = 22
class PDBFile:
def __init__(self, fname=None):
self.lines = []
self.linecount = 0
self.molecules = []
self._resSeqs = []
self._uniqueIds = []
self._res_lines = dict()
self._unique_lines = dict()
if fname is not None:
self.linecount = self.load_file(fname)
else:
return
def _reset_helpers(self):
self._resSeqs = []
self._uniqueIds = []
self._res_lines = dict()
self._unique_lines = dict()
def load_file(self, fname):
with open(fname) as file:
k = 0
for line in file:
self.lines.append(line)
k += 1
return k
|
MIT License
|
guildai/guildai
|
guild/commands/help.py
|
help
|
python
|
def help(ctx, args):
from . import help_impl
help_impl.main(args, ctx)
|
Show help for a path or package.
By default shows information about the models defined in the
project.
To display the description for distributions generated using the
package command, specify the `--package-description` option.
|
https://github.com/guildai/guildai/blob/79d39402201168b7e94007d8e66ecf504e7aa71c/guild/commands/help.py#L50-L62
|
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
def _ac_path_or_package(incomplete, **_kw):
from . import packages_impl
packages = [pkg.project_name for pkg in packages_impl.packages(False)]
return sorted([pkg for pkg in packages if pkg.startswith(incomplete)]) + ["!!dir"]
@click.command()
@click.argument("path-or-package", required=False, autocompletion=_ac_path_or_package)
@click.option(
"--package-description", help="Show the package description.", is_flag=True
)
@click.option("--markdown", help="Show help using Markdown format", is_flag=True)
@click.option(
"--base-heading-level",
type=int,
default=1,
help="Base heading level for generated markdown (default is 1)",
)
@click.option(
"--title", default="Guild AI Help", help="Page title used for generating markdown"
)
@click.option(
"-n", "--no-pager", help="Do not use a pager when showing help.", is_flag=True
)
@click.pass_context
@click_util.use_args
|
Apache License 2.0
|
ansible/galaxy
|
galaxy/main/celerytasks/user_notifications.py
|
collection_new_survey
|
python
|
def collection_new_survey(collection_pk):
collection = models.Collection.objects.get(pk=collection_pk)
owners = _get_preferences(collection.namespace.owners.all())
full_name = '{}.{}'.format(collection.namespace.name, collection.name)
notification = NotificationManger(
email_template=new_survey_template,
preferences_name='notify_survey',
preferences_list=owners,
subject='Ansible Galaxy: new survey for {}'.format(full_name),
db_message='New survey for {}'.format(full_name),
collection=collection,
)
path = '/{}/{}/'.format(collection.namespace.name, collection.name)
ctx = {
'content_score': collection.community_score,
'type': 'collection',
'content_name': full_name,
'content_url': '{}{}'.format(notification.url, path),
}
notification.notify(ctx)
|
Send new survey notification to collection namespace owners.
|
https://github.com/ansible/galaxy/blob/e0e4b909171ddc6ca40f0ef2f7d4dce5680777ce/galaxy/main/celerytasks/user_notifications.py#L326-L350
|
import logging
import celery
from django.contrib.sites.models import Site
from django.conf import settings
from django.core import mail
from allauth.account.models import EmailAddress
from galaxy.main import models
LOG = logging.getLogger(__name__)
class NotificationManger(object):
def __init__(self, email_template, preferences_name, preferences_list,
subject, db_message=None, repo=None, collection=None):
self.email_template = email_template
self.preferences_name = preferences_name
self.preferences_list = preferences_list
self.subject = subject
self.url = settings.GALAXY_URL.format(
site=Site.objects.get_current().domain
)
self.repo = repo
self.collection = collection
if db_message is None:
self.db_message = subject
else:
self.db_message = db_message
def render_email(self, context):
text = self.email_template.format(**context)
footer = email_footer_template.format(
preferences_link='{}/me/preferences/'.format(self.url)
)
return text + footer
def send(self, email_message):
for user in self.preferences_list:
try:
if user.preferences['ui_' + self.preferences_name]:
models.UserNotification.objects.create(
user=user.user,
type=self.preferences_name,
message=self.db_message,
repository=self.repo,
collection=self.collection,
)
except Exception as e:
LOG.error(e)
try:
if user.preferences[self.preferences_name]:
email = EmailAddress.objects.filter(
primary=True,
user=user.user,
)
mail.send_mail(
self.subject,
email_message,
settings.GALAXY_NOTIFICATION_EMAIL,
[email[0].email],
fail_silently=False
)
except Exception as e:
LOG.error(e)
def notify(self, context):
email = self.render_email(context)
self.send(email)
def email_verification(email, code, username):
url = settings.GALAXY_URL.format(
site=Site.objects.get_current().domain
)
url += '/me/preferences/?verify=' + code
message = email_verification_template.format(
username=username,
url=url
)
mail.send_mail(
'Ansible Galaxy Please Confirm Your E-mail Address',
message,
settings.GALAXY_NOTIFICATION_EMAIL,
[email],
fail_silently=True
)
@celery.task
def collection_import(task_id, has_failed=False):
task = models.CollectionImport.objects.get(id=task_id)
if has_failed:
status = 'failed'
preference_name = 'notify_import_fail'
collection = None
else:
status = 'completed'
preference_name = 'notify_import_success'
collection = task.imported_version.collection
owners = _get_preferences(task.namespace.owners.all())
subject = f'Ansible Galaxy: import of {task.name} has {status}'
webui_title = f'Import {status}: {task.name} {task.version}'
notification = NotificationManger(
email_template=import_status_template,
preferences_name=preference_name,
preferences_list=owners,
subject=subject,
db_message=webui_title,
collection=collection,
)
ctx = {
'status': status,
'content_name': '{}.{}'.format(task.namespace.name, task.name),
'import_url': '{}/my-imports'.format(notification.url),
}
notification.notify(ctx)
@celery.task
def repo_import(task_id, user_initiated, has_failed=False):
task = models.ImportTask.objects.get(id=task_id)
repo = task.repository
owners = repo.provider_namespace.namespace.owners.all()
author = repo.provider_namespace.namespace.name
owners = _get_preferences(owners)
if has_failed:
preference = 'notify_import_fail'
status = 'failed'
else:
preference = 'notify_import_success'
status = 'succeeded'
subject = 'Ansible Galaxy: import of {} has {}'.format(repo.name, status)
db_message = 'Import {}: {}'.format(status, repo.name)
log_path = '/my-imports'
notification = NotificationManger(
email_template=import_status_template,
preferences_name=preference,
preferences_list=owners,
subject=subject,
db_message=db_message,
repo=repo
)
ctx = {
'status': status,
'content_name': '{}.{}'.format(author, repo.name),
'import_url': notification.url + log_path
}
notification.notify(ctx)
@celery.task
def collection_new_version(version_pk):
version = models.CollectionVersion.objects.get(pk=version_pk)
collection_followers = models.UserPreferences.objects.filter(
collections_followed__pk=version.collection.pk,
)
collection = version.collection
namespace_name = collection.namespace.name
full_name = '{}.{}'.format(namespace_name, collection.name)
version_number = version.version
notification = NotificationManger(
email_template=collection_new_version_template,
preferences_name='notify_content_release',
preferences_list=collection_followers,
subject=f'Ansible Galaxy: New version of {full_name}',
db_message=f'New version of {full_name}: {version_number}',
collection=collection,
)
path = '/{}/{}'.format(namespace_name, collection.name)
ctx = {
'namespace_name': namespace_name,
'content_name': full_name,
'version': version_number,
'content_url': '{}{}'.format(notification.url, path),
}
notification.notify(ctx)
@celery.task
def repo_update(repo_id):
followers = models.UserPreferences.objects.filter(
repositories_followed__pk=repo_id
)
repo = models.Repository.objects.get(id=repo_id)
author = repo.provider_namespace.namespace.name
notification = NotificationManger(
email_template=repo_update_template,
preferences_name='notify_content_release',
preferences_list=followers,
subject='Ansible Galaxy: New version of ' + repo.name,
db_message='New version of: {}'.format(repo.name),
repo=repo
)
path = '/{}/{}/'.format(repo.provider_namespace.namespace.name, repo.name)
ctx = {
'namespace_name': author,
'content_name': repo.name,
'content_url': notification.url + path
}
notification.notify(ctx)
@celery.task
def coll_author_release(version_pk):
version = models.CollectionVersion.objects.get(pk=version_pk)
author_followers = models.UserPreferences.objects.filter(
namespaces_followed=version.collection.namespace,
)
author = version.collection.namespace.name
full_name = '{}.{}'.format(author, version.collection.name)
notification = NotificationManger(
email_template=author_release_template,
preferences_name='notify_author_release',
preferences_list=author_followers,
subject=f'Ansible Galaxy: {author} has released a new collection',
db_message=f'New collection from {author}: {full_name}',
collection=version.collection,
)
path = '/{}/{}'.format(author, version.collection.name)
ctx = {
'author_name': author,
'type': 'collection',
'content_name': full_name,
'content_url': '{}{}'.format(notification.url, path),
}
notification.notify(ctx)
@celery.task
def repo_author_release(repo_id):
repo = models.Repository.objects.get(id=repo_id)
namespace = repo.provider_namespace.namespace
followers = models.UserPreferences.objects.filter(
namespaces_followed=namespace
)
author = repo.provider_namespace.namespace.name
notification = NotificationManger(
email_template=author_release_template,
preferences_name='notify_author_release',
preferences_list=followers,
subject='Ansible Galaxy: {} has released a new role'.format(
author
),
db_message='New release from {}: {}'.format(
author, repo.name
),
repo=repo
)
path = '/{}/{}/'.format(author, repo.name)
ctx = {
'author_name': author,
'type': 'role',
'content_name': repo.name,
'content_url': notification.url + path,
}
notification.notify(ctx)
@celery.task
|
Apache License 2.0
|
rustychris/stompy
|
stompy/grid/live_dt.py
|
LiveDtGridBase.refresh_metadata
|
python
|
def refresh_metadata(self):
super(LiveDtGridBase,self).refresh_metadata()
self.populate_dt()
|
Should be called when all internal state is changed outside
the mechanisms of add_X, delete_X, move_X, etc.
|
https://github.com/rustychris/stompy/blob/ef04d8b3ee9c9af827c87c72c7b50d365e5e567d/stompy/grid/live_dt.py#L139-L145
|
from __future__ import print_function
from collections import defaultdict, Iterable
import logging
log = logging.getLogger('stompy.live_dt')
import pdb
import numpy as np
from numpy.linalg import norm,solve
from matplotlib import collections
import matplotlib.pyplot as plt
from .. import utils
from ..utils import array_append
from ..spatial import field,robust_predicates
from . import orthomaker,trigrid,exact_delaunay
def ray_intersection(p0,vec,pA,pB):
d1a = np.array([pA[0]-p0[0],pA[1]-p0[1]])
A = np.array( [[vec[0], pB[0] - pA[0]],
[vec[1], pB[1] - pA[1]]] )
alpha_beta = solve(A,d1a)
return p0 + alpha_beta[0]*np.asarray(vec)
class MissingConstraint(Exception):
pass
def distance_left_of_line(pnt, qp1, qp2):
vec = qp2 - qp1
left_vec = np.array( [-vec[1],vec[0]] )
return (pnt[0] - qp1[0])*left_vec[0] + (pnt[1]-qp1[1])*left_vec[1]
class LiveDtGridNull(orthomaker.OrthoMaker):
has_dt = 0
pending_conflicts = []
def hold(self):
pass
def release(self):
pass
def delaunay_neighbors(self,n):
return []
LiveDtGrid=LiveDtGridNull
class LiveDtGridBase(orthomaker.OrthoMaker):
has_dt = 1
freeze=0
holding = 0
pending_conflicts = []
edges_to_release = None
scale_ratio_for_cutoff = 1.0
vh_dtype='object'
def __init__(self,*args,**kwargs):
super(LiveDtGridBase,self).__init__(*args,**kwargs)
self.populate_dt()
check_i = 0
def check(self):
return
print(" --checkplot %05i--"%self.check_i)
plt.figure(10)
plt.clf()
self.plot_dt()
if self.default_clip is not None:
self.plot_nodes()
plt.axis(self.default_clip)
plt.title("--checkplot %05i--"%self.check_i)
plt.savefig('tmp/dtframe%05i.png'%self.check_i)
self.check_i += 1
plt.close(10)
|
MIT License
|
storj/storj-python-sdk
|
storj/__init__.py
|
get_client
|
python
|
def get_client():
return Client(*read_config())
|
Returns a pre-configured Storj HTTP client.
Returns:
(:py:class:`storj.http.Client`): Storj HTTP client.
|
https://github.com/storj/storj-python-sdk/blob/440add68f153a6d5a8e05ea577774559f2ada6ba/storj/__init__.py#L20-L26
|
import io
import logging
from abc import ABCMeta
from .api import ecdsa_to_hex
from .configuration import read_config
from .http import Client
from .metadata import __version__
from .model import Bucket, File, Token
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
MIT License
|
pyasi/pybuildkite
|
pybuildkite/agents.py
|
Agents.stop_agent
|
python
|
def stop_agent(self, organization, agent_id, force=True):
body = {"force": force}
stop = "/stop"
return self.client.put(
self.path.format(organization) + agent_id + stop, body=body
)
|
Stop an agent
:param organization: Organization slug
:param agent_id: Agent id
:param force: Whether or not to force the agent to stop if processing a job
:return: no content
|
https://github.com/pyasi/pybuildkite/blob/75e378b6bdfcb0e57a77fb88e802495f2d9d510d/pybuildkite/agents.py#L62-L75
|
from posixpath import join as urljoin
from pybuildkite.client import Client
class Agents(Client):
def __init__(self, client, base_url):
self.client = client
self.path = urljoin(base_url, "organizations/{}/agents/")
def list_all(
self,
organization,
name=None,
hostname=None,
version=None,
page=0,
with_pagination=False,
):
query_params = {
"name": name,
"hostname": hostname,
"version": version,
"page": page,
}
return self.client.get(
self.path.format(organization),
query_params,
with_pagination=with_pagination,
)
def get_agent(self, organization, agent_id):
return self.client.get(self.path.format(organization) + agent_id)
|
BSD 2-Clause Simplified License
|
google/gazoo-device
|
gazoo_device/tests/unit_tests/package_registrar_test.py
|
BadPrimaryDeviceSignatureOverride.reboot
|
python
|
def reboot(self):
|
This method is missing required "no_wait", "method" arguments.
|
https://github.com/google/gazoo-device/blob/f333b386f5993c8d4c9e12c89ebb620a0c4f5506/gazoo_device/tests/unit_tests/package_registrar_test.py#L197-L198
|
import abc
import builtins
import importlib
import json
import os.path
import re
from unittest import mock
from gazoo_device import data_types
from gazoo_device import decorators
from gazoo_device import detect_criteria
from gazoo_device import errors
from gazoo_device import extensions
from gazoo_device import fire_manager
from gazoo_device import gdm_logger
from gazoo_device import package_registrar
from gazoo_device.base_classes import auxiliary_device
from gazoo_device.base_classes import gazoo_device_base
from gazoo_device.capabilities import event_parser_default
from gazoo_device.capabilities.interfaces import capability_base
from gazoo_device.switchboard import communication_types
from gazoo_device.tests.unit_tests.utils import fake_devices
from gazoo_device.tests.unit_tests.utils import unit_test_case
import immutabledict
logger = gdm_logger.get_logger()
_TEST_PACKAGE_NAME = "foo_package"
class GoodCapabilityBase(capability_base.CapabilityBase):
class AbstractAuxiliaryDevice(auxiliary_device.AuxiliaryDevice):
@abc.abstractmethod
def some_abstract_method(self):
class AbstractPrimaryDevice(gazoo_device_base.GazooDeviceBase):
@abc.abstractmethod
def some_abstract_method(self):
class AbstractVirtualDevice(gazoo_device_base.GazooDeviceBase):
@abc.abstractmethod
def some_abstract_method(self):
class AbstractCommunicationType(communication_types.CommunicationType):
@abc.abstractmethod
def some_abstract_method(self):
class AbstractCapabilityFlavorDefault(GoodCapabilityBase):
@abc.abstractmethod
def some_abstract_method(self):
class ClassNotInheritingFromInterface:
class GoodCommunicationType(communication_types.CommunicationType):
@classmethod
def get_comms_addresses(cls):
return []
def get_transport_list(self):
del self
return []
class GoodQueryKey(detect_criteria.QueryEnum):
some_valid_query = "some_valid_query"
another_valid_query = "another_valid_query"
class GoodPrimaryDevice(fake_devices.FakeGazooDeviceBase):
DEVICE_TYPE = "some_primary_device"
COMMUNICATION_TYPE = "GoodCommunicationType"
DETECT_MATCH_CRITERIA = immutabledict.immutabledict(
{GoodQueryKey.some_valid_query: "some_value"})
_OWNER_EMAIL = "gdm-authors@google.com"
class GoodVirtualDevice(fake_devices.FakeGazooDeviceBase):
DEVICE_TYPE = "some_virtual_device"
COMMUNICATION_TYPE = "GoodCommunicationType"
DETECT_MATCH_CRITERIA = immutabledict.immutabledict(
{GoodQueryKey.some_valid_query: "some_value"})
_OWNER_EMAIL = "gdm-authors@google.com"
class GoodAuxiliaryDevice(auxiliary_device.AuxiliaryDevice):
DEVICE_TYPE = "some_auxiliary_device"
COMMUNICATION_TYPE = "GoodCommunicationType"
DETECT_MATCH_CRITERIA = immutabledict.immutabledict(
{GoodQueryKey.some_valid_query: "some_value"})
_OWNER_EMAIL = "gdm-authors@google.com"
@decorators.LogDecorator(logger)
def get_console_configuration(self):
del self
return None
@classmethod
def is_connected(cls, device_config):
return True
@decorators.LogDecorator(logger)
def recover(self, error):
del self
@decorators.LogDecorator(logger)
def get_detection_info(self):
del self
return {}, {}
class GoodCapabilityDefault(GoodCapabilityBase):
class CapabilityWithSameNameBase(capability_base.CapabilityBase):
@classmethod
def get_capability_name(cls):
return GoodCapabilityBase.get_capability_name()
def good_detection_query(address, detect_logger, create_switchboard_func):
del address, detect_logger, create_switchboard_func
return True
def another_good_detection_query(address, detect_logger,
create_switchboard_func):
del address, detect_logger, create_switchboard_func
return "foobar"
class BadPrimaryDeviceNoLogDecorator(GoodPrimaryDevice):
def factory_reset(self):
class BadPrimaryDeviceSignatureOverride(GoodPrimaryDevice):
@decorators.LogDecorator(logger)
|
Apache License 2.0
|
aryanc403/remind
|
remind/cogs/meta.py
|
Meta.ping
|
python
|
async def ping(self, ctx):
start = time.perf_counter()
message = await ctx.send(':ping_pong: Pong!')
end = time.perf_counter()
duration = (end - start) * 1000
content = f'REST API latency: {int(duration)}ms\n'
await message.edit(content=content)
|
Replies to a ping.
|
https://github.com/aryanc403/remind/blob/c4be96a237b4d00e4f74519da6aa54a41f5422cb/remind/cogs/meta.py#L80-L88
|
import os
import subprocess
import sys
import time
import textwrap
from discord.ext import commands
from remind.util.discord_common import pretty_time_format
from remind.util import clist_api
from remind import constants
RESTART = 42
def git_history():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
branch = out.strip().decode('ascii')
out = _minimal_ext_cmd(['git', 'log', '--oneline', '-5'])
history = out.strip().decode('ascii')
return (
'Branch:\n' +
textwrap.indent(branch, ' ') +
'\nCommits:\n' +
textwrap.indent(history, ' ')
)
except OSError:
return "Fetching git info failed"
def check_if_superuser(ctx):
return ctx.author.id in constants.SUPER_USERS
class Meta(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.start_time = time.time()
@commands.group(brief='Bot control', invoke_without_command=True)
async def meta(self, ctx):
await ctx.send_help(ctx.command)
@meta.command(brief='Restarts Remind')
@commands.check(check_if_superuser)
async def restart(self, ctx):
await ctx.send('Restarting...')
os._exit(RESTART)
@meta.command(brief='Kill Remind')
@commands.check(check_if_superuser)
async def kill(self, ctx):
await ctx.send('Dying...')
os._exit(0)
@meta.command(brief='Is Remind up?')
|
MIT License
|
maxbbraun/accent
|
server/weather.py
|
Weather.is_partly_cloudy
|
python
|
def is_partly_cloudy(self, user):
return self._icon(user) in ['02d', '02n']
|
Checks if the current weather is partly cloudy.
|
https://github.com/maxbbraun/accent/blob/9fbb543e5261598a586e427afb68e86b29839f00/server/weather.py#L69-L72
|
from astral import AstralError
from cachetools import cached
from cachetools import TTLCache
from json.decoder import JSONDecodeError
from logging import info
from requests import get
from requests import RequestException
from firestore import DataError
from firestore import Firestore
OPEN_WEATHER_URL = ('https://api.openweathermap.org/data/2.5/onecall'
'?lat=%f&lon=%f&exclude=minutely,hourly,daily&appid=%s')
MAX_CACHE_SIZE = 100
CACHE_TTL_S = 60 * 60
class Weather(object):
def __init__(self, geocoder):
self._open_weather_api_key = Firestore().open_weather_api_key()
self._geocoder = geocoder
def _icon(self, user):
location = self._home_location(user)
return self._request_icon(location)
def _home_location(self, user):
try:
home = user.get('home')
return self._geocoder[home]
except (AstralError, KeyError) as e:
raise DataError(e)
@cached(cache=TTLCache(maxsize=MAX_CACHE_SIZE, ttl=CACHE_TTL_S))
def _request_icon(self, location):
request_url = OPEN_WEATHER_URL % (location.latitude,
location.longitude,
self._open_weather_api_key)
try:
response_json = get(request_url).json()
icon = response_json['current']['weather'][0]['icon']
except (RequestException, JSONDecodeError, KeyError) as e:
raise DataError(e)
info('Weather: %s' % icon)
return icon
def is_clear(self, user):
return self._icon(user) in ['01d', '01n']
|
MIT License
|
rapid7/vm-console-client-python
|
rapid7vmconsole/models/alert.py
|
Alert.maximum_alerts
|
python
|
def maximum_alerts(self):
return self._maximum_alerts
|
Gets the maximum_alerts of this Alert. # noqa: E501
The maximum number of alerts that will be issued. To disable maximum alerts, omit the property in the request or specify the property with a value of `null`. # noqa: E501
:return: The maximum_alerts of this Alert. # noqa: E501
:rtype: int
|
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/alert.py#L198-L206
|
import pprint
import re
import six
class Alert(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'enabled': 'bool',
'enabled_scan_events': 'ScanEvents',
'enabled_vulnerability_events': 'VulnerabilityEvents',
'id': 'int',
'links': 'list[Link]',
'maximum_alerts': 'int',
'name': 'str',
'notification': 'str'
}
attribute_map = {
'enabled': 'enabled',
'enabled_scan_events': 'enabledScanEvents',
'enabled_vulnerability_events': 'enabledVulnerabilityEvents',
'id': 'id',
'links': 'links',
'maximum_alerts': 'maximumAlerts',
'name': 'name',
'notification': 'notification'
}
def __init__(self, enabled=None, enabled_scan_events=None, enabled_vulnerability_events=None, id=None, links=None, maximum_alerts=None, name=None, notification=None):
self._enabled = None
self._enabled_scan_events = None
self._enabled_vulnerability_events = None
self._id = None
self._links = None
self._maximum_alerts = None
self._name = None
self._notification = None
self.discriminator = None
self.enabled = enabled
if enabled_scan_events is not None:
self.enabled_scan_events = enabled_scan_events
if enabled_vulnerability_events is not None:
self.enabled_vulnerability_events = enabled_vulnerability_events
if id is not None:
self.id = id
if links is not None:
self.links = links
if maximum_alerts is not None:
self.maximum_alerts = maximum_alerts
self.name = name
self.notification = notification
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, enabled):
if enabled is None:
raise ValueError("Invalid value for `enabled`, must not be `None`")
self._enabled = enabled
@property
def enabled_scan_events(self):
return self._enabled_scan_events
@enabled_scan_events.setter
def enabled_scan_events(self, enabled_scan_events):
self._enabled_scan_events = enabled_scan_events
@property
def enabled_vulnerability_events(self):
return self._enabled_vulnerability_events
@enabled_vulnerability_events.setter
def enabled_vulnerability_events(self, enabled_vulnerability_events):
self._enabled_vulnerability_events = enabled_vulnerability_events
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
@property
|
MIT License
|
markvdw/gpflow-inter-domain
|
GPflow/model.py
|
Model.compute_log_likelihood
|
python
|
def compute_log_likelihood(self):
return self.build_likelihood()
|
Compute the log likelihood of the model (uses AutoFlow on ``self.build_likelihood()``)
|
https://github.com/markvdw/gpflow-inter-domain/blob/0cf621e1896a3e1996f863b586c6cd2f795dd9f0/GPflow/model.py#L198-L200
|
from __future__ import print_function, absolute_import
import sys
import numpy as np
import tensorflow as tf
from scipy.optimize import minimize, OptimizeResult
from . import hmc, session
from ._settings import settings
from .param import Parameterized, AutoFlow, DataHolder
float_type = settings.dtypes.float_type
class ObjectiveWrapper(object):
def __init__(self, objective):
self._objective = objective
self._previous_x = None
def __call__(self, x):
f, g = self._objective(x)
g_is_fin = np.isfinite(g)
if np.all(g_is_fin):
self._previous_x = x
return f, g
else:
print("Warning: inf or nan in gradient: replacing with zeros")
return f, np.where(g_is_fin, g, 0.)
class Model(Parameterized):
def __init__(self, name='model'):
Parameterized.__init__(self)
self.scoped_keys.extend(['build_likelihood', 'build_prior'])
self._name = name
self._needs_recompile = True
self.num_fevals = 0
self._tf_optimizer_var_init = None
@property
def name(self):
return self._name
def __getstate__(self):
d = Parameterized.__getstate__(self)
for key in ['_graph', '_session', '_free_vars', '_objective', '_minusF', '_minusG', '_feed_dict_keys']:
d.pop(key, None)
return d
def __setstate__(self, d):
Parameterized.__setstate__(self, d)
self._needs_recompile = True
def set_optimizer_variables_value(self, val_dict):
self._tf_optimizer_var_init = val_dict
def _compile(self, optimizer=None):
self._optimizer = optimizer
self._graph = tf.Graph()
self._session = session.get_session(graph=self._graph,
output_file_name=settings.profiling.output_file_name + "_objective",
output_directory=settings.profiling.output_directory,
each_time=settings.profiling.each_time)
with self._graph.as_default():
self._free_vars = tf.Variable(self.get_free_state())
self.make_tf_array(self._free_vars)
with self.tf_mode():
f = self.build_likelihood() + self.build_prior()
g, = tf.gradients(f, self._free_vars)
self._minusF = tf.negative(f, name='objective')
self._minusG = tf.negative(g, name='grad_objective')
if optimizer is None:
opt_step = None
else:
with tf.variable_scope("gpflow-opt"):
opt_step = optimizer.minimize(self._minusF, var_list=[self._free_vars])
init = tf.global_variables_initializer()
self._session.run(init)
if optimizer is not None and self._tf_optimizer_var_init is not None:
_, var_dict = self.get_optimizer_variables()
for var_name, val in self._tf_optimizer_var_init.items():
self._session.run(var_dict[var_name].assign(val))
if settings.verbosity.tf_compile_verb:
print("compiling tensorflow function...")
sys.stdout.flush()
self._feed_dict_keys = self.get_feed_dict_keys()
def obj(x):
self.num_fevals += 1
feed_dict = {self._free_vars: x}
self.update_feed_dict(self._feed_dict_keys, feed_dict)
f, g = self._session.run([self._minusF, self._minusG],
feed_dict=feed_dict)
return f.astype(np.float64), g.astype(np.float64)
self._objective = obj
if settings.verbosity.tf_compile_verb:
print("done")
sys.stdout.flush()
self._needs_recompile = False
return opt_step
def get_optimizer_variables(self):
val_dict = {}
var_dict = {}
opt_vars = self._graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="gpflow-opt")
val_dict.update([("model." + v.name, self._session.run(v)) for v in opt_vars])
var_dict.update([("model." + v.name, v) for v in opt_vars])
return val_dict, var_dict
@AutoFlow()
def compute_log_prior(self):
return self.build_prior()
@AutoFlow()
|
Apache License 2.0
|
grappa-py/grappa
|
grappa/test.py
|
Test.__or__
|
python
|
def __or__(self, value):
return self.__overload__(value)
|
Overloads ``|`` as from left-to-right operator precedence expression.
|
https://github.com/grappa-py/grappa/blob/f1861e1572e68f031977e86a5d9eba1957bd164e/grappa/test.py#L198-L202
|
from .log import log
from .empty import empty
from .base import BaseTest
from .engine import Engine
from .context import Context
from .resolver import OperatorResolver
class Test(BaseTest):
_context = 0
_context_subject = empty
_global = False
def __init__(self, subject=empty):
self._engine = Engine()
self._ctx = Context()
self._ctx.subjects = []
self._ctx.subject = subject
self._ctx.chained = False
self._ctx.style = 'should'
@property
def should(self):
return self
@property
def expect(self):
return self
@property
def _root(self):
return test
def __call__(self, subject, overload=False):
self._ctx.subject = subject
return self._trigger() if overload else Test(subject)
def __getattr__(self, name):
if self._global:
subject = self._context_subject if self._context else empty
return Test(subject).__getattr__(name)
return OperatorResolver(self).resolve(name)
def _trigger(self):
log.debug('[test] trigger with context: {}'.format(self._ctx))
try:
err = self._engine.run(self._ctx)
except Exception as _err:
err = _err
finally:
self._engine.reset()
self._root._engine.reset()
if err:
raise err
return self
def _clone(self):
test = Test(self._ctx.subject)
test._ctx = self._ctx.clone()
test._engine = self._engine.clone()
return test
def _flush(self):
self.__init__()
def all(self, *tests):
def run_tests(subject):
for test in tests:
try:
test(subject, overload=True)
except Exception as err:
return err
return True
self._engine.add_assertion(run_tests)
return self
def any(self, *tests):
def run_tests(subject):
err = None
for test in tests:
try:
test(subject, overload=True)
except Exception as _err:
err = _err
else:
return True
return err
self._engine.add_assertion(run_tests)
return self
def __overload__(self, subject):
if isinstance(subject, Test):
fork = subject._clone()
fork._ctx.chained = True
fork._ctx.subject = self._ctx.subject
return fork._trigger()
return self.__call__(subject, overload=True)
|
MIT License
|
google/upvote
|
upvote/gae/lib/bit9/model.py
|
Model.delete
|
python
|
def delete(cls, id_, context):
logging.info('DELETE %s object with ID %s', cls.__name__, id_)
route = '{}/{}'.format(cls.ROUTE, id_)
response = context.ExecuteRequest(constants.METHOD.DELETE, api_route=route)
return cls.from_dict(response)
|
Deletes a model instance by ID.
|
https://github.com/google/upvote/blob/0b4477d40676a46ad58aaa7e14f9b13770d55c0c/upvote/gae/lib/bit9/model.py#L391-L397
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import six
from upvote.gae.lib.bit9 import constants
from upvote.gae.lib.bit9 import exceptions as excs
from upvote.gae.lib.bit9 import query
from upvote.gae.lib.bit9 import query_nodes
from absl import logging
class Property(object):
_PYTHON_TYPE = None
def __init__(self,
api_name,
repeated=False,
allow_update=False,
expands_to=None):
self._name = api_name
self._updatable = allow_update
self._expands_to = expands_to
self._repeated = repeated
self.model_cls_name = None
@property
def repeated(self):
return self._repeated
@property
def updatable(self):
return self._updatable
@property
def expandable(self):
return self._expands_to is not None
@property
def expands_to(self):
return self._expands_to
@property
def name(self):
return self._name
@classmethod
def _is_valid_value(cls, val):
return isinstance(val, cls._PYTHON_TYPE)
@classmethod
def raw_to_value(cls, raw):
return raw
@classmethod
def value_to_raw(cls, val):
if val is not None and not cls._is_valid_value(val):
raise ValueError('Invalid {} value: {}'.format(cls.__name__, val))
return val
@classmethod
def _value_to_query(cls, val):
if val is None:
return ''
elif not cls._is_valid_value(val):
raise ValueError('Invalid {} value: {}'.format(cls.__name__, val))
else:
return str(val)
def __eq__(self, other):
return query_nodes.FilterNode(self, ':', self._value_to_query(other))
def __ne__(self, other):
return query_nodes.FilterNode(self, '!', self._value_to_query(other))
def __gt__(self, other):
return query_nodes.FilterNode(self, '>', self._value_to_query(other))
def __lt__(self, other):
return query_nodes.FilterNode(self, '<', self._value_to_query(other))
def __neg__(self):
return query_nodes.OrderNode(self, ascending=False)
def __repr__(self):
return '{}.{}'.format(self.model_cls_name, self.name)
class StringProperty(Property):
_PYTHON_TYPE = six.string_types
class _IntegerProperty(Property):
_PYTHON_TYPE = int
_BIT_LENGTH = None
@classmethod
def _is_valid_value(cls, val):
try:
return val.bit_length() <= cls._BIT_LENGTH
except:
return False
class Int16Property(_IntegerProperty):
_BIT_LENGTH = 16
class Int32Property(_IntegerProperty):
_BIT_LENGTH = 32
class Int64Property(_IntegerProperty):
_BIT_LENGTH = 64
class DecimalProperty(Property):
_PYTHON_TYPE = float
class DoubleProperty(Property):
_PYTHON_TYPE = float
class BooleanProperty(Property):
_PYTHON_TYPE = bool
@classmethod
def _value_to_query(cls, val):
val = super(BooleanProperty, cls)._value_to_query(val)
return str(val).lower()
def __gt__(self, unused_other):
raise excs.QueryError('Unsupported operation for boolean properties')
def __lt__(self, unused_other):
raise excs.QueryError('Unsupported operation for boolean properties')
class DateTimeProperty(Property):
_PYTHON_TYPE = datetime.datetime
_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
_DATETIME_FORMAT_USEC = '%Y-%m-%dT%H:%M:%S.%fZ'
@classmethod
def raw_to_value(cls, raw):
if not raw:
return None
for format_ in (cls._DATETIME_FORMAT_USEC, cls._DATETIME_FORMAT):
try:
return datetime.datetime.strptime(raw, format_)
except ValueError:
pass
raise ValueError("Invalid DateTime value: '{}'".format(raw))
@classmethod
def value_to_raw(cls, val):
val = super(DateTimeProperty, cls).value_to_raw(val)
return val.strftime(cls._DATETIME_FORMAT_USEC)
@classmethod
def _value_to_query(cls, val):
if val is None:
return ''
super(DateTimeProperty, cls)._value_to_query(val)
return cls.value_to_raw(val)
class _MetaModel(type):
def __new__(mcs, name, parents, dct):
assert len(parents) == 1
if name != 'Model':
if 'ROUTE' in dct:
if not dct['ROUTE']:
raise excs.Error('Models must define ROUTE property')
elif not parents[0].ROUTE:
raise excs.Error('Models must define ROUTE property')
all_properties = {}
for attr_name, attr in six.iteritems(dct):
if isinstance(attr, Property):
attr.model_cls_name = name
all_properties[attr_name] = attr
cls = super(_MetaModel, mcs).__new__(mcs, name, parents, dct)
cls._KIND_MAP[name] = cls
cls._PROPERTIES = all_properties
return cls
class Model(six.with_metaclass(_MetaModel, object)):
ROUTE = None
_KIND_MAP = {}
_PROPERTIES = None
def __init__(self, **kwargs):
self._obj_dict = {}
self._prefix = None
for key, val in six.iteritems(kwargs):
prop = self._get_and_validate_property(key)
self._obj_dict[prop.name] = val
@classmethod
def from_dict(cls, obj_dict, prefix=None):
inst = cls()
if not isinstance(obj_dict, dict):
raise ValueError('Invalid object dict: %s' % (obj_dict,))
inst._obj_dict = obj_dict
inst._prefix = prefix
return inst
@classmethod
def _get_and_validate_property(cls,
prop_or_name,
require_updatable=False,
require_expandable=False):
if isinstance(prop_or_name, Property):
prop = prop_or_name
else:
prop = cls._PROPERTIES.get(prop_or_name)
if prop is None:
raise excs.PropertyError('Unknown property: {}'.format(prop_or_name))
if not cls.is_valid_property(prop):
raise excs.PropertyError(
'{} cannot be used with {}'.format(prop, cls.__name__))
elif require_updatable and not prop.updatable:
raise excs.PropertyError(
'Property {} may not be updated'.format(prop))
elif require_expandable and not prop.expandable:
raise excs.PropertyError(
'Property {} may not be expanded'.format(prop))
return prop
@classmethod
def is_valid_property(cls, prop):
return isinstance(prop, Property) and prop.model_cls_name == cls.__name__
@classmethod
def update(cls, id_, updated_properties, context):
prop_map = {
cls._get_and_validate_property(prop, require_updatable=True): val
for prop, val in six.iteritems(updated_properties)
}
update_str = ', '.join(
'{}="{}"'.format(prop, val) for prop, val in six.iteritems(prop_map))
logging.info(
'Updating %s object (id=%s): %s', cls.__name__, id_, update_str)
obj = cls.get(id_, context)
for prop, val in six.iteritems(prop_map):
setattr(obj, prop.name, val)
return obj.put(context)
def put(self, context, extra_query_args=None):
if extra_query_args:
args = [
'{}={}'.format(key, val)
for key, val in six.iteritems(extra_query_args)
]
else:
args = None
response = context.ExecuteRequest(
constants.METHOD.POST, api_route=self.ROUTE, query_args=args,
data=self.to_raw_dict())
return self.from_dict(response)
@classmethod
def get(cls, id_, context):
logging.info('GET %s object with ID %s', cls.__name__, id_)
route = '{}/{}'.format(cls.ROUTE, id_)
response = context.ExecuteRequest(constants.METHOD.GET, api_route=route)
return cls.from_dict(response)
@classmethod
|
Apache License 2.0
|
breakingbytes/simkit
|
simkit/core/data_sources.py
|
DataSource.saveas_json
|
python
|
def saveas_json(self, save_name):
meta = getattr(self, DataSourceBase._meta_attr)
param_file = getattr(self, DataSourceBase._param_file)
utc_mod_time = list(time.gmtime(os.path.getmtime(save_name)))
json_data = {'data': self.data, 'utc_mod_time': utc_mod_time,
'param_file': param_file,
'data_reader': meta.data_reader.__name__,
'data_source': self.__class__.__name__}
if not save_name.endswith('.json'):
save_name += '.json'
with open(save_name, 'w') as fp:
json.dump(json_data, fp, cls=SimKitJSONEncoder)
self._is_saved = True
|
Save :attr:`data`, :attr:`param_file`, original :attr:`data_reader`
and UTC modification time as keys in JSON file. If data is edited then
it should be saved using this method. Non-JSON data files are also
saved using this method.
:param save_name: Name to save JSON file as, ".json" is appended.
:type save_name: str
|
https://github.com/breakingbytes/simkit/blob/c247b6ecf46d727703c03cb0d987e35fd054eaa6/simkit/core/data_sources.py#L260-L286
|
from simkit.core import (
UREG, Registry, SimKitJSONEncoder, CommonBase, Parameter
)
from simkit.core.data_readers import JSONReader
from simkit.core.exceptions import (
UncertaintyPercentUnitsError, UncertaintyVarianceError
)
import json
import os
import time
from copy import copy
import numpy as np
DFLT_UNC = 1.0 * UREG('percent')
class DataParameter(Parameter):
_attrs = ['units', 'uncertainty', 'isconstant', 'timeseries']
class DataRegistry(Registry):
meta_names = ['uncertainty', 'variance', 'isconstant', 'timeseries',
'data_source']
def register(self, newdata, *args, **kwargs):
kwargs.update(zip(self.meta_names, args))
uncertainty = kwargs['uncertainty']
variance = kwargs['variance']
isconstant = kwargs['isconstant']
if uncertainty:
for k0, d in uncertainty.items():
for k1, v01 in d.items():
units = v01.units
if units != UREG('percent'):
keys = '%s-%s' % (k0, k1)
raise UncertaintyPercentUnitsError(keys, units)
if variance and uncertainty:
for k0, d in variance.items():
for k1, v01 in d.items():
keys = '%s-%s' % (k0, k1)
missing = k1 not in uncertainty[k0]
v2 = np.asarray(uncertainty[k0][k1].to('fraction').m) ** 2.0
if missing or not np.allclose(np.asarray(v01), v2):
raise UncertaintyVarianceError(keys, v01)
if isconstant:
for k, v in isconstant.items():
if not isinstance(v, bool):
classname = self.__class__.__name__
error_msg = ['%s meta "isconstant" should be' % classname,
'boolean, but it was "%s" for "%s".' % (v, k)]
raise TypeError(' '.join(error_msg))
super(DataRegistry, self).register(newdata, **kwargs)
class DataSourceBase(CommonBase):
_path_attr = 'data_path'
_file_attr = 'data_file'
_param_cls = DataParameter
_reader_attr = 'data_reader'
_enable_cache_attr = 'data_cache_enabled'
_attr_default = {_reader_attr: JSONReader, _enable_cache_attr: True}
def __new__(mcs, name, bases, attr):
if not CommonBase.get_parents(bases, DataSourceBase):
return super(DataSourceBase, mcs).__new__(mcs, name, bases, attr)
attr = mcs.set_meta(bases, attr)
meta = attr[mcs._meta_attr]
for ma, dflt in mcs._attr_default.items():
a = getattr(meta, ma, None)
if a is None:
setattr(meta, ma, dflt)
attr = mcs.set_param_file_or_parameters(attr)
return super(DataSourceBase, mcs).__new__(mcs, name, bases, attr)
class DataSource(metaclass=DataSourceBase):
__metaclass__ = DataSourceBase
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
meta = getattr(self, DataSourceBase._meta_attr)
parameters = getattr(self, DataSourceBase._param_attr)
filename = None
if meta.data_reader.is_file_reader:
if args:
filename = args[0]
elif kwargs:
filename = kwargs.get('filename')
self.filename = filename
self._is_saved = True
if meta.data_cache_enabled and self._is_cached():
data_reader_instance = JSONReader(parameters, meta)
else:
data_reader_instance = meta.data_reader(parameters, meta)
self.data = data_reader_instance.load_data(*args, **kwargs)
if meta.data_cache_enabled and not self._is_cached():
self.saveas_json(self.filename)
self.uncertainty = {}
self.variance = {}
self.isconstant = {}
self.timeseries = {}
self.data_source = dict.fromkeys(self.data, self.__class__.__name__)
self._raw_data = copy(self.data)
self.__prepare_data__()
for k0, d in self.uncertainty.items():
for k1, v01 in d.items():
self.variance[k0] = {k1: v01.to('fraction').m ** 2.0}
def __prepare_data__(self):
raise NotImplementedError('Data preparation not implemented. ' +
'Use ``pass`` if not required.')
def _is_cached(self, ext='.json'):
if not ext.startswith('.'):
ext = '.%s' % ext
cache_file = '%s%s' % (self.filename, ext)
return self.filename.endswith(ext) or os.path.exists(cache_file)
@property
def issaved(self):
return self._is_saved
|
BSD 3-Clause New or Revised License
|
iglpdc/dmrg101
|
dmrg101/utils/tridiagonal_solver/tridiagonal_solver.py
|
tridiagonal_solver
|
python
|
def tridiagonal_solver(d, e, eigenvectors = True):
if (d.size != e.size + 1):
raise TridiagonalException("d, and e have different sizes")
num_evals = d.size
evals = np.empty(num_evals)
evecs = np.empty((num_evals, num_evals))
r = lamRange(d, e, num_evals)
assert(len(r) == num_evals +1)
evals = eigenvals3(d, e, num_evals)
if eigenvectors:
for i in range(num_evals):
evals[i], evecs[:, i] = inversePower3(d, e, 1.00000001*evals[i])
return evals, evecs
|
Calculates the eigenvalues and eigenvectors of a tridiagonal and
symmetric matrix.
Parameters
----------
d : a numpy array with ndim = 1.
The elements of the diagonal of the tridiagonal matrix.
e : a numpy array with ndim = 1.
The off-diagonal elements of the tridiagonal matrix.
eigenvectors : a bool (optional).
Whether you want to calculate the eigenvectors.
Returns
-------
evals : a numpy array with ndim = 1.
The eigenvalues.
evecs : a numpy array with ndim = 2.
The eigenvectors.
Raises
------
TridiagonalException
if `d` and `e` have different sizes.
|
https://github.com/iglpdc/dmrg101/blob/aaf3913f5a616dc84c4100efbeb819648973c582/dmrg101/utils/tridiagonal_solver/tridiagonal_solver.py#L7-L56
|
import numpy as np
from tridiagonal_exceptions import TridiagonalException
from lamRange import *
from inversePower3 import *
from eigenvals3 import *
|
MIT License
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/universal_transformer_util.py
|
universal_transformer_skip
|
python
|
def universal_transformer_skip(layer_inputs,
step,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
state, inputs, memory = layer_inputs
new_state = step_preprocess(state, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope("rec_layer_%d" % i):
new_state = ffn_unit(attention_unit(new_state))
transformed_state = new_state
inputs.get_shape().assert_is_compatible_with(state.get_shape())
gate_inputs = []
if "s" in hparams.gates_inputs:
gate_inputs.append(state)
if "t" in hparams.gates_inputs:
gate_inputs.append(transformed_state)
if "i" in hparams.gates_inputs:
gate_inputs.append(inputs)
gate_ffn_layer = hparams.gate_ffn_layer
transform_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="transform",
bias_initializer=tf.constant_initializer(hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True)
if hparams.couple_carry_transform_gates:
carry_gate = tf.subtract(1.0, transform_gate, name="carry")
else:
carry_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="carry",
bias_initializer=tf.constant_initializer(-hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True)
contrib.summary().scalar("skip_transform_gate_layer",
tf.reduce_mean(transform_gate))
contrib.summary().scalar("skip_carry_gate_layer", tf.reduce_mean(carry_gate))
new_state = inputs * carry_gate + transformed_state * transform_gate
return new_state, inputs, memory
|
Universal Transformer with highway connection.
It transforms the state using attention and ffn and wrap this transformation
with a skip-all connection. (the new state is a combination of the state and
the inputs (original inputs) based on cary/transform gates.)
Observation:
Controlling the cary/transform gate with the original inputs works usually
better (i.e. hparams.gates_inputs="i")
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
|
https://github.com/tensorflow/tensor2tensor/blob/c22a226704e5887862bf9edd9f269892c9016ad4/tensor2tensor/models/research/universal_transformer_util.py#L684-L771
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
from six.moves import range
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
from tensor2tensor.utils import contrib
from tensor2tensor.utils import expert_utils
import tensorflow.compat.v1 as tf
def universal_transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
x = encoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
if nonpadding is not None:
padding = 1.0 - nonpadding
else:
padding = common_attention.attention_bias_to_padding(
encoder_self_attention_bias)
nonpadding = 1.0 - padding
pad_remover = None
if hparams.use_pad_remover and not common_layers.is_xla_compiled():
pad_remover = expert_utils.PadRemover(padding)
ffn_unit = functools.partial(
transformer_encoder_ffn_unit,
hparams=hparams,
nonpadding_mask=nonpadding,
pad_remover=pad_remover)
attention_unit = functools.partial(
transformer_encoder_attention_unit,
hparams=hparams,
encoder_self_attention_bias=encoder_self_attention_bias,
attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x, extra_output = universal_transformer_layer(
x, hparams, ffn_unit, attention_unit, pad_remover=pad_remover)
return common_layers.layer_preprocess(x, hparams), extra_output
def universal_transformer_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
x = decoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
ffn_unit = functools.partial(
transformer_decoder_ffn_unit,
hparams=hparams,
nonpadding_mask=nonpadding)
attention_unit = functools.partial(
transformer_decoder_attention_unit,
hparams=hparams,
encoder_output=encoder_output,
decoder_self_attention_bias=decoder_self_attention_bias,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x, extra_output = universal_transformer_layer(
x, hparams, ffn_unit, attention_unit)
return common_layers.layer_preprocess(x, hparams), extra_output
def universal_transformer_layer(x,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
def add_vanilla_transformer_layer(x, num_layers, name):
if hparams.add_position_timing_signal:
x = common_attention.add_timing_signal_1d(x)
for layer in range(num_layers):
with tf.variable_scope(name + "layer_%d" % layer):
x = ffn_unit(attention_unit(x))
return x
with tf.variable_scope("universal_transformer_%s" % hparams.recurrence_type):
if (hparams.mix_with_transformer and
"before_ut" in hparams.mix_with_transformer):
x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers,
"before_ut_")
if hparams.recurrence_type == "act":
output, extra_output = universal_transformer_act(
x, hparams, ffn_unit, attention_unit)
else:
ut_function, initializer = get_ut_layer(x, hparams, ffn_unit,
attention_unit, pad_remover)
output, _, extra_output = tf.foldl(
ut_function, tf.range(hparams.num_rec_steps),
initializer=initializer)
if (hparams.recurrence_type == "lstm" and
hparams.get("use_memory_as_final_state", False)):
output = extra_output
if (hparams.mix_with_transformer and
"after_ut" in hparams.mix_with_transformer):
output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers,
"after_ut_")
return output, extra_output
def get_ut_layer(x,
hparams,
ffn_unit,
attention_unit,
pad_remover=None):
if hparams.recurrence_type == "basic":
ut_initializer = (x, x, x)
ut_function = functools.partial(
universal_transformer_basic,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit)
elif hparams.recurrence_type == "highway":
ut_initializer = (x, x, x)
ut_function = functools.partial(
universal_transformer_highway,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "skip":
ut_initializer = (x, x, x)
ut_function = functools.partial(
universal_transformer_skip,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "dwa":
memory_size = hparams.num_rec_steps + 1
memory_empty = tf.zeros([memory_size] + common_layers.shape_list(x))
memory = fill_memory_slot(memory_empty, x, 0)
ut_initializer = (x, x, memory)
ut_function = functools.partial(
universal_transformer_depthwise_attention,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit)
elif hparams.recurrence_type == "gru":
ut_initializer = (x, x, x)
ut_function = functools.partial(
universal_transformer_with_gru_as_transition_function,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
elif hparams.recurrence_type == "lstm":
memory = tf.zeros(common_layers.shape_list(x))
ut_initializer = (x, x, memory)
ut_function = functools.partial(
universal_transformer_with_lstm_as_transition_function,
hparams=hparams,
ffn_unit=ffn_unit,
attention_unit=attention_unit,
pad_remover=pad_remover)
else:
raise ValueError("Unknown recurrence type: %s" % hparams.recurrence_type)
return ut_function, ut_initializer
def transformer_encoder_ffn_unit(x,
hparams,
nonpadding_mask=None,
pad_remover=None):
with tf.variable_scope("ffn"):
if hparams.transformer_ffn_type == "fc":
y = transformer.transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
pad_remover,
conv_padding="SAME",
nonpadding_mask=nonpadding_mask)
if hparams.transformer_ffn_type == "sepconv":
assert nonpadding_mask is not None, (
"The nonpadding_mask should be provided, otherwise the model uses "
"the leaked padding information to estimate the length!")
y = common_layers.sepconv_relu_sepconv(
common_layers.layer_preprocess(x, hparams),
filter_size=hparams.filter_size,
output_size=hparams.hidden_size,
first_kernel_size=(3, 1),
second_kernel_size=(5, 1),
padding="SAME",
nonpadding_mask=nonpadding_mask,
dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def transformer_encoder_attention_unit(x,
hparams,
encoder_self_attention_bias,
attention_dropout_broadcast_dims,
save_weights_to=None,
make_image_summary=True):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
save_weights_to=save_weights_to,
max_relative_position=hparams.max_relative_position,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
hard_attention_k=hparams.hard_attention_k)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def transformer_decoder_ffn_unit(x,
hparams,
nonpadding_mask=None):
with tf.variable_scope("ffn"):
if hparams.transformer_ffn_type == "fc":
y = transformer.transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
conv_padding="LEFT",
nonpadding_mask=nonpadding_mask)
if hparams.transformer_ffn_type == "sepconv":
y = common_layers.sepconv_relu_sepconv(
common_layers.layer_preprocess(x, hparams),
filter_size=hparams.filter_size,
output_size=hparams.hidden_size,
first_kernel_size=(3, 1),
second_kernel_size=(5, 1),
padding="LEFT",
nonpadding_mask=nonpadding_mask,
dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def transformer_decoder_attention_unit(x,
hparams,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
attention_dropout_broadcast_dims,
save_weights_to=None,
make_image_summary=True):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
save_weights_to=save_weights_to,
max_relative_position=hparams.max_relative_position,
cache=None,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
hard_attention_k=hparams.hard_attention_k)
x = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
hard_attention_k=hparams.hard_attention_k)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def universal_transformer_basic(layer_inputs,
step, hparams,
ffn_unit,
attention_unit):
state, inputs, memory = tf.unstack(layer_inputs, num=None, axis=0,
name="unstack")
new_state = step_preprocess(state, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope("rec_layer_%d" % i):
new_state = ffn_unit(attention_unit(new_state))
return new_state, inputs, memory
def universal_transformer_highway(layer_inputs,
step, hparams,
ffn_unit,
attention_unit,
pad_remover=None):
state, inputs, memory = layer_inputs
new_state = step_preprocess(state, step, hparams)
for i in range(hparams.num_inrecurrence_layers):
with tf.variable_scope("rec_layer_%d" % i):
new_state = ffn_unit(attention_unit(new_state))
transformed_state = new_state
gate_inputs = []
if "s" in hparams.gates_inputs:
gate_inputs.append(state)
if "t" in hparams.gates_inputs:
gate_inputs.append(transformed_state)
if "i" in hparams.gates_inputs:
gate_inputs.append(inputs)
gate_ffn_layer = hparams.gate_ffn_layer
transform_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="transform",
bias_initializer=tf.constant_initializer(hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True)
if hparams.couple_carry_transform_gates:
carry_gate = tf.subtract(1.0, transform_gate, name="carry")
else:
carry_gate = _ffn_layer_multi_inputs(
gate_inputs,
hparams,
ffn_layer_type=gate_ffn_layer,
name="carry",
bias_initializer=tf.constant_initializer(-hparams.transform_bias_init),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=True)
new_state = state * carry_gate + transformed_state * transform_gate
contrib.summary().scalar("highway_transform_gate_layer",
tf.reduce_mean(transform_gate))
contrib.summary().scalar("highway_carry_gate_layer",
tf.reduce_mean(carry_gate))
return new_state, inputs, memory
|
Apache License 2.0
|
altairengineering/pkr
|
pkr/ext/__init__.py
|
timeout_handler
|
python
|
def timeout_handler(*_):
raise TimeoutError()
|
Simple function for raising a timeout Exception
|
https://github.com/altairengineering/pkr/blob/64d94dff8c0dd2dd547a75d451c37bd36f170330/pkr/ext/__init__.py#L166-L168
|
import abc
from builtins import object
from builtins import str
import signal
import pkgutil
try:
from importlib.metadata import entry_points
except ModuleNotFoundError:
from importlib_metadata import entry_points
from pkr.cli.log import write
from pkr.utils import PkrException, get_pkr_path
class ExtMixin(metaclass=abc.ABCMeta):
@staticmethod
def setup(args, kard):
@staticmethod
def get_context_template_data():
@staticmethod
def post_up(effective_modules):
@staticmethod
def populate_kard():
@staticmethod
def configure_parser(parser):
class Extensions(object):
def __init__(self, features=None):
super(Extensions, self).__init__()
self.features = features
self._extensions = None
@property
def extensions(self):
if self._extensions is None:
self._extensions = self.list_all()
if self.features is not None:
return {
name: self._extensions[name] for name in self.features if name in self._extensions
}
return self._extensions
def __getattr__(self, attribute):
if not hasattr(ExtMixin, attribute):
return super(Extensions, self).__getattribute__(attribute)
if not self.features:
return lambda *args, **kw: ()
def wrapper(*args, **kwargs):
return [
output[1]
for output in map(
lambda ext: self._wrap_call(ext[0], ext[1], attribute, *args, *kwargs),
self.extensions.items(),
)
if output is not None
]
return wrapper
@staticmethod
def _wrap_call(name, extension, method_name, *args, **kwargs):
method = getattr(extension, method_name, None)
base_method = getattr(ExtMixin, method_name, None)
if method is None or method is base_method:
return
try:
return (name, method(*args, **kwargs))
except TimeoutError:
write(
'Extension "{}" raise timeout error, step "{}"'.format(extension.name, method_name)
)
raise
except PkrException:
raise
except Exception as exc:
write(
'Extension "{}" raise an unknown exception, step "{}": {}'.format(
extension.name, method_name, str(exc)
)
)
raise
def list(self):
return self.extensions.keys()
@classmethod
def _get_extension_class(cls, module):
for attr in dir(module):
ext_cls = getattr(module, attr)
try:
if issubclass(ext_cls, ExtMixin) and ext_cls != ExtMixin:
return ext_cls
except TypeError:
pass
@classmethod
def list_all(cls):
extensions = {}
for importer, package_name, _ in pkgutil.iter_modules(
[str(get_pkr_path() / "extensions")]
):
module = importer.find_module(package_name).load_module(package_name)
extensions[package_name] = cls._get_extension_class(module)
for entry in entry_points().get("pkr_extensions", ()) + entry_points().get(
"extensions", ()
):
if entry.name not in extensions:
extensions[entry.name] = entry.load()
return extensions
def __contains__(self, ext_name):
return ext_name in self.features
class TimeoutError(Exception):
|
Apache License 2.0
|
googleads/google-ads-python
|
google/ads/googleads/v7/services/services/customer_label_service/client.py
|
CustomerLabelServiceClient.__init__
|
python
|
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, CustomerLabelServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
if isinstance(transport, CustomerLabelServiceTransport):
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CustomerLabelServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
|
Instantiate the customer label service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CustomerLabelServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
|
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/google/ads/googleads/v7/services/services/customer_label_service/client.py#L262-L377
|
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials
from google.auth.transport import mtls
from google.auth.transport.grpc import SslCredentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.ads.googleads.v7.resources.types import customer_label
from google.ads.googleads.v7.services.types import customer_label_service
from google.rpc import status_pb2 as status
from .transports.base import CustomerLabelServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import CustomerLabelServiceGrpcTransport
class CustomerLabelServiceClientMeta(type):
_transport_registry = (
OrderedDict()
)
_transport_registry["grpc"] = CustomerLabelServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[CustomerLabelServiceTransport]:
if label:
return cls._transport_registry[label]
return next(iter(cls._transport_registry.values()))
class CustomerLabelServiceClient(metaclass=CustomerLabelServiceClientMeta):
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__(
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CustomerLabelServiceTransport:
return self._transport
@staticmethod
def customer_path(customer_id: str,) -> str:
return "customers/{customer_id}".format(customer_id=customer_id,)
@staticmethod
def parse_customer_path(path: str) -> Dict[str, str]:
m = re.match(r"^customers/(?P<customer_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def customer_label_path(customer_id: str, label_id: str,) -> str:
return "customers/{customer_id}/customerLabels/{label_id}".format(
customer_id=customer_id, label_id=label_id,
)
@staticmethod
def parse_customer_label_path(path: str) -> Dict[str, str]:
m = re.match(
r"^customers/(?P<customer_id>.+?)/customerLabels/(?P<label_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def label_path(customer_id: str, label_id: str,) -> str:
return "customers/{customer_id}/labels/{label_id}".format(
customer_id=customer_id, label_id=label_id,
)
@staticmethod
def parse_label_path(path: str) -> Dict[str, str]:
m = re.match(
r"^customers/(?P<customer_id>.+?)/labels/(?P<label_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
|
Apache License 2.0
|
tuturto/pyherc
|
src/pyherc/test/builders/effect.py
|
EffectBuilder.with_tick
|
python
|
def with_tick(self, tick):
self.tick = tick
return self
|
Set internal clock of the effect
:param tick: internal clock in ticks
:type tick: int
|
https://github.com/tuturto/pyherc/blob/4e7c72a4d80d335f7d3c48cecac96cd7105acac4/src/pyherc/test/builders/effect.py#L135-L143
|
from pyherc.data.effects import Effect, EffectHandle
class EffectHandleBuilder():
def __init__(self):
super().__init__()
self.trigger = 'on drink'
self.effect = 'heal'
self.parameters = None
self.charges = 1
self.icon = 0
self.title = 'title'
self.description = 'description'
def with_trigger(self, trigger):
self.trigger = trigger
return self
def with_effect(self, effect):
self.effect = effect
return self
def with_parameters(self, parameters):
self.parameters = parameters
return self
def with_charges(self, charges):
self.charges = charges
return self
def build(self):
effect = EffectHandle(trigger=self.trigger,
effect=self.effect,
parameters=self.parameters,
charges=self.charges)
return effect
class EffectBuilder():
def __init__(self):
super().__init__()
self.duration = 0
self.frequency = 0
self.tick = 0
self.effect_name = 'proto'
self.multiple_allowed = False
self.icon = 101
self.title = 'effect'
self.description = 'description'
def with_duration(self, duration):
self.duration = duration
return self
def with_frequency(self, frequency):
self.frequency = frequency
return self
|
MIT License
|
python-bonobo/bonobo
|
bonobo/util/resolvers.py
|
_parse_option
|
python
|
def _parse_option(option):
try:
key, val = option.split("=", 1)
except ValueError:
return option, True
try:
val = json.loads(val)
except json.JSONDecodeError:
pass
return key, val
|
Parse a 'key=val' option string into a python (key, val) pair
:param option: str
:return: tuple
|
https://github.com/python-bonobo/bonobo/blob/feb7ec850566ca3c2ccc139610201dbd237d6083/bonobo/util/resolvers.py#L32-L49
|
import json
import os
import runpy
import bonobo
from bonobo.util import cast
class _RequiredModule:
def __init__(self, dct):
self.__dict__ = dct
class _ModulesRegistry(dict):
@property
def pathname(self):
return os.getcwd()
def require(self, name):
if name not in self:
bits = name.split(".")
filename = os.path.join(self.pathname, *bits[:-1], bits[-1] + ".py")
self[name] = _RequiredModule(runpy.run_path(filename, run_name=name))
return self[name]
|
Apache License 2.0
|
yuxixie/rl-for-question-generation
|
discriminators/src/answerability/pretraining/fairseq/models/fairseq_model.py
|
BaseFairseqModel.prepare_for_onnx_export_
|
python
|
def prepare_for_onnx_export_(self, **kwargs):
def apply_prepare_for_onnx_export_(module):
if module != self and hasattr(module, 'prepare_for_onnx_export_'):
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
|
Make model exportable via ONNX trace.
|
https://github.com/yuxixie/rl-for-question-generation/blob/188cd7b04528e4f192023a596a072b3245c62838/discriminators/src/answerability/pretraining/fairseq/models/fairseq_model.py#L116-L122
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import FairseqDecoder, FairseqEncoder
class BaseFairseqModel(nn.Module):
def __init__(self):
super().__init__()
self._is_generation_fast = False
@staticmethod
def add_args(parser):
pass
@classmethod
def build_model(cls, args, task):
raise NotImplementedError
def get_targets(self, sample, net_output):
return sample['target']
def get_normalized_probs(self, net_output, log_probs, sample=None):
if hasattr(self, 'decoder'):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def max_positions(self):
return None
def max_decoder_positions(self):
return self.decoder.max_positions()
def load_state_dict(self, state_dict, strict=True):
self.upgrade_state_dict(state_dict)
super().load_state_dict(state_dict, strict)
def upgrade_state_dict(self, state_dict):
self.upgrade_state_dict_named(state_dict, '')
def upgrade_state_dict_named(self, state_dict, name):
assert state_dict is not None
def do_upgrade(m, prefix):
if len(prefix) > 0:
prefix += '.'
for n, c in m.named_children():
name = prefix + n
if hasattr(c, 'upgrade_state_dict_named'):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, 'upgrade_state_dict'):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def make_generation_fast_(self, **kwargs):
if self._is_generation_fast:
return
self._is_generation_fast = True
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except ValueError:
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module):
if module != self and hasattr(module, 'make_generation_fast_'):
module.make_generation_fast_(**kwargs)
self.apply(apply_make_generation_fast_)
def train(mode):
if mode:
raise RuntimeError('cannot train after make_generation_fast')
self.eval()
self.train = train
|
MIT License
|
borda/birl
|
birl/utilities/data_io.py
|
load_landmarks
|
python
|
def load_landmarks(path_file):
if not os.path.isfile(path_file):
logging.warning('missing landmarks "%s"', path_file)
return
_, ext = os.path.splitext(path_file)
if ext == '.csv':
return load_landmarks_csv(path_file)
if ext == '.pts':
return load_landmarks_pts(path_file)
logging.error('not supported landmarks file: %s', os.path.basename(path_file))
|
load landmarks in csv and txt format
:param str path_file: path to the input file
:return ndarray: np.array<np_points, dim> of landmarks points
>>> points = np.array([[1, 2], [3, 4], [5, 6]])
>>> save_landmarks('./sample_landmarks.csv', points)
>>> pts1 = load_landmarks('./sample_landmarks.csv')
>>> pts2 = load_landmarks('./sample_landmarks.pts')
>>> np.array_equal(pts1, pts2)
True
>>> os.remove('./sample_landmarks.csv')
>>> os.remove('./sample_landmarks.pts')
>>> # Wrong loading
>>> load_landmarks('./sample_landmarks.file')
>>> open('./sample_landmarks.file', 'w').close()
>>> load_landmarks('./sample_landmarks.file')
>>> os.remove('./sample_landmarks.file')
|
https://github.com/borda/birl/blob/cae694f52434e74386a9f1fd2007a218e3a3d670/birl/utilities/data_io.py#L58-L87
|
import logging
import os
import warnings
from functools import wraps
import cv2 as cv
import nibabel
import numpy as np
import pandas as pd
import SimpleITK as sitk
import yaml
from PIL import Image
from skimage.color import gray2rgb, rgb2gray
LANDMARK_COORDS = ['X', 'Y']
Image.MAX_IMAGE_PIXELS = None
def create_folder(path_folder, ok_existing=True):
path_folder = os.path.abspath(path_folder)
if not os.path.isdir(path_folder):
try:
os.makedirs(path_folder, mode=0o775)
except Exception:
logging.exception(
'Something went wrong (probably parallel access), the status of "%s" is %s',
path_folder,
os.path.isdir(path_folder),
)
path_folder = None
elif not ok_existing:
logging.warning('Folder already exists: %s', path_folder)
path_folder = False
return path_folder
|
BSD 3-Clause New or Revised License
|
stackstorm/st2contrib
|
packs/trello/sensors/list_actions_sensor.py
|
TrelloListSensor.cleanup
|
python
|
def cleanup(self):
pass
|
Run the sensor cleanup code (if any).
|
https://github.com/stackstorm/st2contrib/blob/095b021a31ba134728deb7c707240196d016e729/packs/trello/sensors/list_actions_sensor.py#L86-L90
|
import dateutil.parser
from trello import TrelloClient
from st2reactor.sensor.base import PollingSensor
class TrelloListSensor(PollingSensor):
TRIGGER = 'trello.new_action'
def __init__(self, sensor_service, config=None, poll_interval=None):
super(TrelloListSensor, self).__init__(sensor_service, config, poll_interval)
self._logger = self._sensor_service.get_logger(__name__)
list_actions_sensor = self._config.get('list_actions_sensor')
if not list_actions_sensor:
raise ValueError('[TrelloListSensor]: "list_sensor" config value is required!')
self._lists = list_actions_sensor.get('lists', [])
if not self._lists:
raise ValueError('[TrelloListSensor]'
'"lists" config value should have at least one entry!')
def setup(self):
pass
def poll(self):
self._logger.debug('[TrelloListSensor]: Entering into listen mode ...')
for trello_list_config in self._lists:
self._update_credentials_by_precedence(trello_list_config)
l = TrelloList(**trello_list_config)
self._logger.debug("[TrelloListSensor]: Processing queue for Trello list: '%s'"
% l.list_id)
actions = l.fetch_actions(
filter=trello_list_config.get('filter') or None,
since=self._sensor_service.get_value(l.key_name)
)
for action in reversed(actions):
self._logger.debug("[TrelloListSensor]: Found new action for Trello list: '%r'"
% action)
self._sensor_service.dispatch(trigger=self.TRIGGER, payload=action)
if is_date(action.get('date')):
self._sensor_service.set_value(l.key_name, action.get('date'))
def _update_credentials_by_precedence(self, trello_list_config):
if not trello_list_config.get('api_key'):
found_credentials = self._config['list_actions_sensor'] if self._config['list_actions_sensor'].get('api_key') else self._config
trello_list_config['api_key'] = found_credentials.get('api_key')
trello_list_config['token'] = found_credentials.get('token')
|
Apache License 2.0
|
thingsboard/python_tb_rest_client
|
tb_rest_client/models/models_pe/scheduler_event_info.py
|
SchedulerEventInfo.__init__
|
python
|
def __init__(self, additional_info=None, created_time=None, customer_id=None, id=None, name=None, owner_id=None, schedule=None, tenant_id=None, type=None):
self._additional_info = None
self._created_time = None
self._customer_id = None
self._id = None
self._name = None
self._owner_id = None
self._schedule = None
self._tenant_id = None
self._type = None
self.discriminator = None
if additional_info is not None:
self.additional_info = additional_info
if created_time is not None:
self.created_time = created_time
if customer_id is not None:
self.customer_id = customer_id
if id is not None:
self.id = id
if name is not None:
self.name = name
if owner_id is not None:
self.owner_id = owner_id
if schedule is not None:
self.schedule = schedule
if tenant_id is not None:
self.tenant_id = tenant_id
if type is not None:
self.type = type
|
SchedulerEventInfo - a model defined in Swagger
|
https://github.com/thingsboard/python_tb_rest_client/blob/87c6a3703974fc8a86e4c72c444168ee2b758ecb/tb_rest_client/models/models_pe/scheduler_event_info.py#L54-L83
|
import pprint
import re
import six
class SchedulerEventInfo(object):
swagger_types = {
'additional_info': 'str',
'created_time': 'int',
'customer_id': 'CustomerId',
'id': 'SchedulerEventId',
'name': 'str',
'owner_id': 'EntityId',
'schedule': 'str',
'tenant_id': 'TenantId',
'type': 'str'
}
attribute_map = {
'additional_info': 'additionalInfo',
'created_time': 'createdTime',
'customer_id': 'customerId',
'id': 'id',
'name': 'name',
'owner_id': 'ownerId',
'schedule': 'schedule',
'tenant_id': 'tenantId',
'type': 'type'
}
|
Apache License 2.0
|
ithinksw/philo
|
philo/contrib/sobol/search.py
|
get_search_instance
|
python
|
def get_search_instance(slug, search_arg):
search = registry[slug]
search_arg = search_arg.lower()
if USE_CACHE:
key = _make_cache_key(search, search_arg)
cached = cache.get(key)
if cached:
return cached
instance = search(search_arg)
instance.slug = slug
return instance
|
Returns a search instance for the given slug, either from the cache or newly-instantiated.
|
https://github.com/ithinksw/philo/blob/8a772dd4761e3a4b926358d6ebf87c9fc7033ba5/philo/contrib/sobol/search.py#L45-L56
|
import datetime
from hashlib import sha1
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db.models.options import get_verbose_name as convert_camelcase
from django.utils import simplejson as json
from django.utils.http import urlquote_plus
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.template import loader, Context, Template, TemplateDoesNotExist
from philo.contrib.sobol.utils import make_tracking_querydict
from philo.utils.registry import Registry
if getattr(settings, 'SOBOL_USE_EVENTLET', False):
try:
from eventlet.green import urllib2
except:
import urllib2
else:
import urllib2
__all__ = (
'Result', 'BaseSearch', 'DatabaseSearch', 'URLSearch', 'JSONSearch', 'GoogleSearch', 'registry', 'get_search_instance'
)
SEARCH_CACHE_SEED = 'philo_sobol_search_results'
USE_CACHE = getattr(settings, 'SOBOL_USE_CACHE', True)
registry = Registry()
def _make_cache_key(search, search_arg):
return sha1(SEARCH_CACHE_SEED + search.slug + search_arg).hexdigest()
|
ISC License
|
indeedsecurity/wes
|
wes/framework_plugins/common.py
|
JavaProcessor.filter_on_path
|
python
|
def filter_on_path(self, search_node, filter_type, tree=None):
if type(search_node) is list:
raise AttributeError("search_node shouldn't be a list. It should be a javalang node.")
base_path = tuple()
if tree:
base_path = self.find_path_to_element(tree, search_node)
if base_path is None:
raise LookupError("Error: Couldn't find node within tree, chances are we were given the incorrect tree.")
base_path = tuple(base_path[:-1])
filtered_results = []
for path, node in search_node:
if type(node) is filter_type:
filtered_results.append((base_path + path, node))
return filtered_results
|
This method is like the javalang filter method but it allows you to specify a path to search under. To do this
we use javalang's walk and then check the type of the nodes it encounters.
:param search_node: The path you would like to search under, This is the path that would be return within the
tuple from the javalang filter method if you
:param filter_type: The javalang type you would like to filter on
:param tree: The javalang compilationUnit, this should be provided if
you want absolute paths to the node
:return: List of tuples (path, node) of the filtered results
|
https://github.com/indeedsecurity/wes/blob/a5d707b3ba15855d66563ab41ac95eaecd0db444/wes/framework_plugins/common.py#L350-L378
|
import javalang
import glob
import os
import codecs
import re
from typed_ast import ast27, ast3, conversions, _ast3
import pickle
import logging
JAVA_PRIMITIVES = ["boolean", "byte", "char", "double", "int", "float", "long", "short"]
JAVA_DOT_LANG_IMPORTS = [
"java.lang.Boolean", "java.lang.Byte", "java.lang.Character", "java.lang.Class", "java.lang.ClassLoader",
"java.lang.ClassValue", "java.lang.Compiler", "java.lang.Double", "java.lang.Enum", "java.lang.Float",
"java.lang.InheritableThreadLocal", "java.lang.Integer", "java.lang.Long", "java.lang.Math", "java.lang.Number",
"java.lang.Object", "java.lang.Package", "java.lang.Process", "java.lang.ProcessBuilder", "java.lang.Runtime",
"java.lang.RuntimePermission", "java.lang.SecurityManager", "java.lang.Short", "java.lang.StackTraceElement",
"java.lang.StrictMath", "java.lang.String", "java.lang.StringBuffer", "java.lang.StringBuilder", "java.lang.System",
"java.lang.Thread", "java.lang.ThreadGroup", "java.lang.ThreadLocal", "java.lang.Throwable", "java.lang.Void"
]
logger = logging.getLogger(__name__)
class Framework:
def __init__(self):
pass
class JavaProcessor:
def __init__(self, working_dir):
self.working_dir = working_dir
self.web_context_dir = self._find_java_web_context()
self.java_compilation_units = {}
self.variable_lookup_table = {}
self.class_lookup_table = {}
self.method_invocation_lookup_table = {}
def load_project(self):
glob_path = os.path.join(self.working_dir, '**', '*.java')
project_files = glob.glob(glob_path, recursive=True)
project_files = list(filter(lambda x: os.path.isfile(x), project_files))
project_files = list(filter(lambda x: os.path.join(self.working_dir, 'test') not in x, project_files))
for f in project_files:
with codecs.open(f, 'r', 'utf-8', 'ignore') as fh:
code = fh.read()
try:
tree = javalang.parse.parse(code)
self.java_compilation_units[self.strip_work_dir(f)] = tree
except javalang.parser.JavaSyntaxError as e:
logger.warning("There was an error parsing '%s' with javalang: %s", self.strip_work_dir(f), e)
for filepath, tree in self.java_compilation_units.items():
self.variable_lookup_table.update(self._preprocess_java_literals(tree))
for filepath, tree in self.java_compilation_units.items():
self.variable_lookup_table.update(self._preprocess_java_variables(tree))
self.class_lookup_table.update(self._preprocess_java_classes(tree, filepath))
mis = self._preprocess_java_method_invocations(tree)
for k, v in mis.items():
if k in self.method_invocation_lookup_table:
self.method_invocation_lookup_table[k] += v
else:
self.method_invocation_lookup_table[k] = v
def strip_work_dir(self, path):
return path.split(self.working_dir, 1)[1][1:]
def resolve_node_fqn(self, path, member, qualifier=None):
compilation_unit = self.get_compilation_unit(path)
for java_import in compilation_unit.imports:
import_pkg = java_import.path.rsplit('.', 1)
if qualifier == "" or qualifier is None:
if import_pkg[1] == member:
return java_import.path
else:
if import_pkg[1] == qualifier:
return ".".join([java_import.path, member])
classes = list(filter(lambda x: type(x) is javalang.tree.ClassDeclaration, path))
class_names = list(map(lambda x: x.name, classes))
qualifier = ".".join(class_names) if len(classes) > 0 else None
if qualifier is not None:
if compilation_unit.package is None:
return ".".join([qualifier, member])
else:
return ".".join([compilation_unit.package.name, qualifier, member])
else:
if compilation_unit.package is None:
return member
else:
return ".".join([compilation_unit.package.name, member])
def _resolve_binary_operation(self, var):
element = var[1]
if hasattr(element, 'initializer'):
operandl = element.initializer.operandl
operandr = element.initializer.operandr
operator = element.initializer.operator
else:
operandl = element.operandl
operandr = element.operandr
operator = element.operator
if (type(operandl) in [javalang.tree.MemberReference, javalang.tree.Literal] and
type(operandr) in [javalang.tree.MemberReference, javalang.tree.Literal]):
left = None
right = None
if type(operandl) is javalang.tree.Literal:
left = operandl.value.strip('"\'')
elif type(operandl) is javalang.tree.MemberReference:
fqn = self.resolve_node_fqn(var[0], operandl.member, operandl.qualifier)
if fqn in self.variable_lookup_table:
left = self.variable_lookup_table[fqn]
if type(operandr) is javalang.tree.Literal:
right = operandr.value.strip('"\'')
elif type(operandr) is javalang.tree.MemberReference:
fqn = self.resolve_node_fqn(var[0], operandr.member, operandr.qualifier)
if fqn in self.variable_lookup_table:
right = self.variable_lookup_table[fqn]
if left and right:
if operator is "+":
return left.strip('"\'') + right.strip('"\'')
def _preprocess_java_literals(self, tree):
results = {}
variables = tree.filter(javalang.tree.VariableDeclarator)
for var in variables:
element = var[1]
if type(element.initializer) is javalang.tree.Literal:
value = element.initializer.value
if value[0] in ['"', "'"] and value[-1] in ['"', "'"] and len(value) > 2:
var_fqn = self.resolve_node_fqn(var[0], element.name)
results[var_fqn] = element.initializer.value.strip('"\'')
return results
def _preprocess_java_variables(self, tree):
results = {}
variables = tree.filter(javalang.tree.VariableDeclarator)
for var in variables:
element = var[1]
if type(element.initializer) is javalang.tree.MemberReference:
member_fdn = self.resolve_node_fqn(var[0], var[1].initializer.member, var[1].initializer.qualifier)
if member_fdn in self.variable_lookup_table:
var_fqn = self.resolve_node_fqn(var[0], element.name)
results[var_fqn] = self.variable_lookup_table[member_fdn]
elif type(element.initializer) is javalang.tree.BinaryOperation:
value = self._resolve_binary_operation(var)
if value:
var_fqn = self.resolve_node_fqn(var[0], element.name)
results[var_fqn] = value
return results
def _preprocess_java_method_invocations(self, tree):
method_invocations = {}
for path, mi in tree.filter(javalang.tree.MethodInvocation):
fqn = self.resolve_method_fqn(mi, path)
if fqn:
fqn = "{}({})".format(fqn, len(mi.arguments))
if fqn in method_invocations:
method_invocations[fqn] += [(path, mi)]
else:
method_invocations[fqn] = [(path, mi)]
return method_invocations
def resolve_method_fqn(self, node, path):
if type(node) is not javalang.tree.MethodInvocation:
return None
compilation_unit = self.get_compilation_unit(path)
imports = self.get_imports(path)
var_decl = self.get_variable_declaration(node, path)
if var_decl:
if type(var_decl) is javalang.tree.Import:
fqn = ".".join([var_decl.path, node.member])
elif type(var_decl) is javalang.tree.MethodDeclaration:
if compilation_unit.package is not None:
fqn = ".".join([compilation_unit.package.name, self.get_class_declaration(path).name, var_decl.name])
else:
fqn = ".".join([self.get_class_declaration(path).name, var_decl.name])
else:
fqn = self.resolve_type(var_decl.type.name, compilation_unit.package, imports)
fqn = ".".join([fqn, node.member])
else:
return None
return fqn
def _preprocess_java_classes(self, tree, filepath):
classes = {}
for path, cd in tree.filter(javalang.tree.ClassDeclaration):
classes_in_path = list(filter(lambda x: type(x) is javalang.tree.ClassDeclaration, path))
if classes_in_path:
if tree.package is not None:
fqn = ".".join([tree.package.name] + list(map(lambda x: x.name, classes_in_path)) + [cd.name])
else:
fqn = ".".join(list(map(lambda x: x.name, classes_in_path)) + [cd.name])
else:
if tree.package is not None:
fqn = ".".join([tree.package.name, cd.name])
else:
fqn = cd.name
classes[fqn] = (path, cd, filepath)
return classes
def find_path_to_element(self, tree, node):
node_pickle = pickle.dumps(node)
paths = []
for path, node in tree.filter(type(node)):
if pickle.dumps(node) == node_pickle:
paths.append(path)
if not paths:
return None
return paths[0] if len(paths) == 1 else paths
|
Apache License 2.0
|
icpc-jag/rime
|
rime/plugins/plus/basic_patch.py
|
Testset._TestSolutionWithChallengeCasesOne
|
python
|
def _TestSolutionWithChallengeCasesOne(self, solution, testcase, result,
ui):
case_result = yield self._TestOneCase(solution, testcase, ui)
result.results[testcase] = case_result
if (solution.expected_verdicts is None and
case_result.verdict == test.TestCaseResult.AC):
ui.console.PrintAction('TEST', solution,
'%s: Unexpectedly accepted'
% os.path.basename(testcase.infile),
progress=True)
yield False
elif (solution.expected_verdicts is not None and
case_result.verdict not in solution.expected_verdicts):
result.Finalize(False,
'%s: Unexpected Verdict (%s)' %
(os.path.basename(testcase.infile),
case_result.verdict),
notable_testcase=testcase)
ui.errors.Error(solution, result.detail)
if ui.options.keep_going:
yield False
else:
raise taskgraph.Bailout([False])
elif case_result.verdict not in (test.TestCaseResult.WA,
test.TestCaseResult.TLE,
test.TestCaseResult.RE):
result.Finalize(False,
'%s: Judge Error' % os.path.basename(
testcase.infile),
notable_testcase=testcase)
ui.errors.Error(solution, result.detail)
if ui.options.keep_going:
yield False
else:
raise taskgraph.Bailout([False])
ui.console.PrintAction('TEST', solution,
'%s: PASSED' % os.path.basename(
testcase.infile),
progress=True)
result.Finalize(True,
'%s: %s' % (os.path.basename(testcase.infile),
case_result.verdict),
notable_testcase=testcase)
yield True
|
Test a wrong solution which has specified challenge cases.
|
https://github.com/icpc-jag/rime/blob/bfad761387e22a308ead372ae49b624e6bfecbb3/rime/plugins/plus/basic_patch.py#L133-L177
|
import fnmatch
import hashlib
import json
import os.path
import signal
import subprocess
from rime.basic import codes as basic_codes
from rime.basic import consts
import rime.basic.targets.problem
import rime.basic.targets.project
import rime.basic.targets.solution
import rime.basic.targets.testset
from rime.basic import test
from rime.basic.util import test_summary
from rime.core import codes
from rime.core import targets
from rime.core import taskgraph
from rime.plugins.plus import rime_plus_version
from rime.util import files
libdir = None
def parseVersion(v):
return [int(d) for d in v.split('.')]
class Project(targets.registry.Project):
def __init__(self, *args, **kwargs):
super(Project, self).__init__(*args, **kwargs)
def PreLoad(self, ui):
super(Project, self).PreLoad(ui)
self.library_dir = None
self.project_defined = False
def _project(library_dir=None,
required_rime_plus_version=rime_plus_version):
if self.project_defined:
raise RuntimeError('project() is already defined.')
if (parseVersion(rime_plus_version) <
parseVersion(required_rime_plus_version)):
raise RuntimeError('installed rime-plus is too old.')
global libdir
libdir = os.path.join(
self.base_dir,
library_dir)
self.library_dir = libdir
self.project_defined = True
self.exports['project'] = _project
class Testset(targets.registry.Testset):
def __init__(self, *args, **kwargs):
super(Testset, self).__init__(*args, **kwargs)
def PreLoad(self, ui):
super(Testset, self).PreLoad(ui)
self.reactives = []
self.exports.update(
codes.CreateDictionary('%s_generator', self.generators,
src_dir=self.src_dir,
out_dir=self.out_dir,
wrapper=self._WrapDependency))
self.exports.update(
codes.CreateDictionary('%s_validator', self.validators,
src_dir=self.src_dir,
out_dir=self.out_dir,
wrapper=self._WrapDependency))
self.exports.update(
codes.CreateDictionary('%s_judge', self.judges,
src_dir=self.src_dir,
out_dir=self.out_dir,
wrapper=self._WrapDependency))
self.exports.update(
codes.CreateDictionary('%s_reactive', self.reactives,
src_dir=self.src_dir,
out_dir=self.out_dir,
wrapper=self._WrapDependency))
def _WrapDependency(self, code_class):
def Wrapped(src_name, src_dir, out_dir, dependency=[], variant=None,
*args, **kwargs):
code = code_class(src_name, src_dir, out_dir, *args, **kwargs)
code.dependency = dependency
code.variant = variant
return code
return Wrapped
@taskgraph.task_method
def _TestSolutionWithChallengeCases(self, solution, ui):
all_testcases = self.ListTestCases()
challenge_infiles = solution.challenge_cases
testcases = []
for infile in challenge_infiles:
matched_testcases = [
testcase for testcase in all_testcases
if fnmatch.fnmatch(os.path.basename(testcase.infile), infile)]
if not matched_testcases:
ui.errors.Error(solution,
'Challenge case not found: %s' % infile)
result = test.TestsetResult(self, solution, [])
result.Finalize(False,
'Challenge case not found: %s' % infile)
yield result
testcases.extend(
[t for t in matched_testcases if t.infile not in testcases])
result = test.TestsetResult(self, solution, testcases)
yield taskgraph.TaskBranch([
self._TestSolutionWithChallengeCasesOne(
solution, testcase, result, ui)
for testcase in testcases],
unsafe_interrupt=True)
if not result.IsFinalized():
result.Finalize(False,
'Unexpectedly accepted all challenge cases')
ui.errors.Error(solution, result.detail)
yield result
@taskgraph.task_method
|
MIT License
|
w1ll1am23/pyeconet
|
src/pyeconet/equipment/water_heater.py
|
WaterHeater.leak_installed
|
python
|
def leak_installed(self) -> bool:
leak = self._equipment_info.get("@LEAKINSTALLED")
if leak is not None:
return leak["value"] == 1
else:
return False
|
Return if heater has leak detection or not
|
https://github.com/w1ll1am23/pyeconet/blob/4727a5d219f8557a0b63768b8f727c7e270bdb63/src/pyeconet/equipment/water_heater.py#L74-L80
|
from datetime import datetime
import logging
from enum import Enum
from typing import List, Union
from pyeconet.errors import InvalidResponseFormat
from pyeconet.equipment import Equipment
_LOGGER = logging.getLogger(__name__)
class WaterHeaterOperationMode(Enum):
OFF = 1
ELECTRIC_MODE = 2
ENERGY_SAVING = 3
HEAT_PUMP_ONLY = 4
HIGH_DEMAND = 5
GAS = 6
ENERGY_SAVER = 7
PERFORMANCE = 8
VACATION = 9
ELECTRIC = 10
HEAT_PUMP = 11
UNKNOWN = 99
@staticmethod
def by_string(str_value: str):
_cleaned_string = str_value.rstrip().replace(" ", "_").upper()
if _cleaned_string == WaterHeaterOperationMode.OFF.name.upper():
return WaterHeaterOperationMode.OFF
elif _cleaned_string == WaterHeaterOperationMode.ELECTRIC_MODE.name.upper():
return WaterHeaterOperationMode.ELECTRIC_MODE
elif _cleaned_string == WaterHeaterOperationMode.ENERGY_SAVING.name.upper():
return WaterHeaterOperationMode.ENERGY_SAVING
elif _cleaned_string == WaterHeaterOperationMode.HEAT_PUMP_ONLY.name.upper():
return WaterHeaterOperationMode.HEAT_PUMP_ONLY
elif _cleaned_string == WaterHeaterOperationMode.HIGH_DEMAND.name.upper():
return WaterHeaterOperationMode.HIGH_DEMAND
elif _cleaned_string == WaterHeaterOperationMode.GAS.name.upper():
return WaterHeaterOperationMode.GAS
elif _cleaned_string == WaterHeaterOperationMode.ENERGY_SAVER.name.upper():
return WaterHeaterOperationMode.ENERGY_SAVING
elif _cleaned_string == WaterHeaterOperationMode.PERFORMANCE.name.upper():
return WaterHeaterOperationMode.PERFORMANCE
elif _cleaned_string == WaterHeaterOperationMode.VACATION.name.upper():
return WaterHeaterOperationMode.VACATION
elif _cleaned_string == WaterHeaterOperationMode.ELECTRIC.name.upper():
return WaterHeaterOperationMode.ELECTRIC_MODE
elif _cleaned_string == WaterHeaterOperationMode.HEAT_PUMP.name.upper():
return WaterHeaterOperationMode.HEAT_PUMP_ONLY
else:
_LOGGER.error("Unknown mode: [%s]", str_value)
return WaterHeaterOperationMode.UNKNOWN
class WaterHeater(Equipment):
def __init__(self, equipment_info: dict, api_interface) -> None:
super().__init__(equipment_info, api_interface)
self.energy_usage = None
self._energy_type = None
self.water_usage = None
@property
|
MIT License
|
gumyr/cq_warehouse
|
src/cq_warehouse/fastener.py
|
evaluate_parameter_dict
|
python
|
def evaluate_parameter_dict(
parameters: dict, is_metric: Optional[bool] = True,
) -> dict:
measurements = {}
for params, value in parameters.items():
if is_metric:
measurements[params] = metric_str_to_float(value)
else:
measurements[params] = imperial_str_to_float(value)
return measurements
|
Convert the strings in a parameter dictionary into dimensions
|
https://github.com/gumyr/cq_warehouse/blob/e27bdc32f2f8b8b33b6d6f736d03d15984795673/src/cq_warehouse/fastener.py#L144-L154
|
from abc import ABC, abstractmethod
from typing import Literal, Tuple, Optional, List, TypeVar, Union
from math import sin, cos, tan, radians, pi, degrees, sqrt
from functools import cache
import csv
import importlib.resources as pkg_resources
import cadquery as cq
import cq_warehouse
MM = 1
IN = 25.4 * MM
def polygon_diagonal(width: float, num_sides: Optional[int] = 6) -> float:
return width / cos(pi / num_sides)
def read_fastener_parameters_from_csv(filename: str) -> dict:
parameters = {}
with pkg_resources.open_text(cq_warehouse, filename) as csvfile:
reader = csv.DictReader(csvfile)
fieldnames = reader.fieldnames
for row in reader:
key = row[fieldnames[0]]
row.pop(fieldnames[0])
parameters[key] = row
return parameters
def is_safe(value: str) -> bool:
return len(value) <= 10 and all(c in "0123456789./ " for c in set(value))
def imperial_str_to_float(measure: str) -> float:
if is_safe(measure):
result = eval(measure.strip().replace(" ", "+")) * IN
else:
result = measure
return result
def decode_imperial_size(size: str) -> Tuple[float, float]:
imperial_numbered_sizes = {
"#0000": 0.0210 * IN,
"#000": 0.0340 * IN,
"#00": 0.0470 * IN,
"#0": 0.0600 * IN,
"#1": 0.0730 * IN,
"#2": 0.0860 * IN,
"#3": 0.0990 * IN,
"#4": 0.1120 * IN,
"#5": 0.1250 * IN,
"#6": 0.1380 * IN,
"#8": 0.1640 * IN,
"#10": 0.1900 * IN,
"#12": 0.2160 * IN,
}
sizes = size.split("-")
if size[0] == "#":
major_diameter = imperial_numbered_sizes[sizes[0]]
else:
major_diameter = imperial_str_to_float(sizes[0])
pitch = IN / (imperial_str_to_float(sizes[1]) / IN)
return (major_diameter, pitch)
def metric_str_to_float(measure: str) -> float:
if is_safe(measure):
result = eval(measure)
else:
result = measure
return result
def evaluate_parameter_dict_of_dict(
parameters: dict, is_metric: Optional[bool] = True,
) -> dict:
measurements = {}
for key, value in parameters.items():
measurements[key] = evaluate_parameter_dict(
parameters=value, is_metric=is_metric
)
return measurements
|
Apache License 2.0
|
joeyhendricks/quickpotato
|
QuickPotato/statistical/data.py
|
RawData.percentile_65th
|
python
|
def percentile_65th(self):
return np.percentile(np.array(self._response_times), 65)
|
Returns
-------
|
https://github.com/joeyhendricks/quickpotato/blob/5e33e64d77997b00a43f5573353138436b1f1a34/QuickPotato/statistical/data.py#L168-L175
|
from QuickPotato.database.queries import Crud
import numpy as np
class RawData(Crud):
def __init__(self, test_id, database_name):
super(RawData, self).__init__()
self.test_id = test_id
self.database_name = database_name
self._response_times = self.select_response_times(self.database_name, self.test_id)
def response_times(self):
return self._response_times
def normalized_response_times(self):
measurements = np.array(self._response_times)
return measurements[abs(measurements - np.mean(measurements)) < 2 * np.std(measurements)]
def average_response_time(self):
return sum(self._response_times) / len(self._response_times)
def maximum_outlier_in_response_times(self):
return max(self._response_times)
def minimum_outlier_in_response_times(self):
return min(self._response_times)
def percentile_5th(self):
return np.percentile(np.array(self._response_times), 5)
def percentile_10th(self):
return np.percentile(np.array(self._response_times), 10)
def percentile_15th(self):
return np.percentile(np.array(self._response_times), 15)
def percentile_20th(self):
return np.percentile(np.array(self._response_times), 20)
def percentile_25th(self):
return np.percentile(np.array(self._response_times), 25)
def percentile_30th(self):
return np.percentile(np.array(self._response_times), 30)
def percentile_35th(self):
return np.percentile(np.array(self._response_times), 35)
def percentile_40th(self):
return np.percentile(np.array(self._response_times), 40)
def percentile_45th(self):
return np.percentile(np.array(self._response_times), 45)
def percentile_50th(self):
return np.percentile(np.array(self._response_times), 50)
def percentile_55th(self):
return np.percentile(np.array(self._response_times), 55)
def percentile_60th(self):
return np.percentile(np.array(self._response_times), 60)
|
MIT License
|
charanpald/apgl
|
apgl/util/Util.py
|
Util.matrixPower
|
python
|
def matrixPower(A, n):
Parameter.checkClass(A, numpy.ndarray)
tol = 10**-10
lmbda, V = scipy.linalg.eig(A)
lmbda[numpy.abs(lmbda) <= tol] = 0
lmbda[numpy.abs(lmbda) > tol] = lmbda[numpy.abs(lmbda) > tol]**n
if n >= 0:
return (V*lmbda).dot(numpy.linalg.inv(V))
else:
A = scipy.linalg.pinv(A)
n = numpy.abs(n)
lmbda, V = scipy.linalg.eig(A)
lmbda[numpy.abs(lmbda) > tol] = lmbda[numpy.abs(lmbda) > tol]**n
return (V*lmbda).dot(numpy.linalg.inv(V))
|
Compute the matrix power of A using the exponent n. The computation simply
evaluated the eigendecomposition of A and then powers the eigenvalue
matrix accordingly.
Warning: if at least one eigen-value is negative, n should be an integer.
|
https://github.com/charanpald/apgl/blob/f8b71be2c49c62ace7ce5691216d2ff041ff978b/apgl/util/Util.py#L576-L598
|
from __future__ import print_function
import sys
import os
import numpy
from contextlib import contextmanager
import numpy.random as rand
import logging
import scipy.linalg
import scipy.sparse as sparse
import scipy.special
import pickle
from apgl.util.Parameter import Parameter
class Util(object):
def __init__(self):
pass
@staticmethod
def histogram(v):
if v.ndim != 1:
raise ValueError("Input must be a dimension 1 vector")
uniqElements = numpy.unique(v)
numElements = uniqElements.shape[0]
hist = numpy.zeros(numElements)
for i in range(0, numElements):
hist[i] = sum(v == uniqElements[i])
return (hist, uniqElements)
@staticmethod
def mode(v):
if v.ndim != 1:
raise ValueError("Input must be a dimension 1 vector")
uniqElements = numpy.unique(v)
freqs = numpy.zeros(uniqElements.shape[0])
for i in range(uniqElements.shape[0]):
freqs[i] = numpy.sum(v == uniqElements[i])
return uniqElements[numpy.argmax(freqs)]
@staticmethod
def sampleWithoutReplacement(sampleSize, totalSize):
perm = rand.permutation(totalSize)
perm = perm[0:sampleSize]
perm = numpy.sort(perm)
return perm
@staticmethod
def randNormalInt(mean, sd, min, max):
i = round(rand.normal(mean, sd));
while i<min or i>max:
i = round(random.normal(mean, sd));
return i
@staticmethod
def computeMeanVar(X):
mu = numpy.mean(X, 0)
X2 = X - mu
sigma = numpy.dot(X2.T, X2)/X.shape[0]
return (mu, sigma)
@staticmethod
def iterationStr(i, step, maxIter, preStr="Iteration: "):
outputStr = ""
if maxIter == 1:
outputStr = preStr + str(i) + " (1.0)"
elif i % step == 0:
outputStr = preStr + str(i) + " (" + str("%.3f" % (float(i)/(maxIter-1))) + ")"
elif i == maxIter-1:
outputStr = preStr + str(i) + " (" + str("%.3f" % (float(i)/(maxIter-1))) + ")"
else:
raise ValueError("Got invalid input: " + str((i, step, maxIter)))
return outputStr
@staticmethod
def printIteration(i, step, maxIter, preStr="Iteration: "):
if i % step == 0 or i==maxIter-1:
logging.debug(Util.iterationStr(i, step, maxIter, preStr))
@staticmethod
def printConciseIteration(i, step, maxIter, preStr="Iteration: "):
if i==0:
print(Util.iterationStr(i, step, maxIter, preStr), end=""),
elif i!=maxIter-1:
print(Util.iterationStr(i, step, maxIter, " "), end="")
else:
print(Util.iterationStr(i, step, maxIter, " "))
@staticmethod
def abstract():
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError("Method " + caller + ' must be implemented in subclass')
@staticmethod
def rank(A, tol=1e-8):
s = numpy.linalg.svd(A, compute_uv=False)
return numpy.sum(numpy.where(s>tol, 1, 0))
@staticmethod
def randomChoice(V, n=1):
Parameter.checkClass(V, numpy.ndarray)
if V.shape[0]==0:
return -1
if V.ndim == 1:
cumV = numpy.cumsum(V)
p = numpy.random.rand(n)*cumV[-1]
return numpy.searchsorted(cumV, p)
elif V.ndim == 2:
cumV = numpy.cumsum(V, 1)
P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T
inds = numpy.zeros(P.shape, numpy.int)
for i in range(P.shape[0]):
inds[i, :] = numpy.searchsorted(cumV[i, :], P[i, :])
return inds
else:
raise ValueError("Invalid number of dimensions")
@staticmethod
def fitPowerLaw(x, xmin):
x = x[x >= xmin]
n = x.shape[0]
lnSum = n / numpy.sum(numpy.log(x/xmin))
gamma = lnSum
return gamma
@staticmethod
def fitDiscretePowerLaw(x, xmins = None):
xmax = numpy.max(x)
if xmins == None:
xmin = numpy.max(numpy.array([numpy.min(x), 1]))
xmins = numpy.arange(xmin, xmax)
if xmins.shape[0] == 0:
return -1, -1, numpy.min(x)
alphas = numpy.arange(1.5, 3.5, 0.01)
ksAlpha = numpy.zeros((xmins.shape[0], 2))
for j in range(xmins.shape[0]):
xmin = xmins[j]
z = x[x >= xmin]
n = z.shape[0]
sumLogx = numpy.sum(numpy.log(z))
likelyhoods = numpy.zeros(alphas.shape[0])
for i in range(alphas.shape[0]):
likelyhoods[i] = -n*numpy.log(scipy.special.zeta(alphas[i], xmin)) -alphas[i]*sumLogx
k = numpy.argmax(likelyhoods)
cdf = numpy.cumsum(numpy.bincount(z)[xmin:xmax]/float(n))
fit = numpy.arange(xmin, xmax)**-alphas[k] /scipy.special.zeta(alphas[k], xmin)
fit = numpy.cumsum(fit)
ksAlpha[j, 0] = numpy.max(numpy.abs(cdf - fit))
ksAlpha[j, 1] = alphas[k]
i = numpy.argmin(ksAlpha[:, 0])
return ksAlpha[i, 0], ksAlpha[i, 1], xmins[i]
@staticmethod
def entropy(v):
items = numpy.unique(v)
infEnt = 0
for i in items:
prob = numpy.sum(v==i)/float(v.shape[0])
infEnt -= prob * numpy.log2(prob)
return infEnt
@staticmethod
def expandIntArray(v):
Parameter.checkClass(v, numpy.ndarray)
Parameter.checkList(v, Parameter.checkInt, [0, float('inf')])
w = numpy.zeros(numpy.sum(v), numpy.int)
currentInd = 0
for i in range(v.shape[0]):
w[currentInd:currentInd+v[i]] = i
currentInd += v[i]
return w
@staticmethod
def random2Choice(V, n=1):
Parameter.checkClass(V, numpy.ndarray)
if V.ndim == 1 and V.shape[0] != 2:
raise ValueError("Function only works on binary probabilities")
if V.ndim == 2 and V.shape[1] != 2:
raise ValueError("Function only works on binary probabilities")
if V.ndim == 1:
cumV = numpy.cumsum(V)
p = numpy.random.rand(n)*cumV[-1]
cumV2 = numpy.ones(n)*cumV[0] - p
return numpy.array(cumV2 <= 0, numpy.int)
elif V.ndim == 2:
cumV = numpy.cumsum(V, 1)
P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T
cumV2 = numpy.outer(cumV[:, 0], numpy.ones(n)) - P
return numpy.array(cumV2 <= 0, numpy.int)
else:
raise ValueError("Invalid number of dimensions")
@staticmethod
def loadPickle(filename):
file = open(filename, 'rb')
obj = pickle.load(file)
file.close()
return obj
@staticmethod
def savePickle(obj, filename, overwrite=True, debug=False):
if os.path.isfile(filename) and not overwrite:
raise IOError("File exists: " + filename)
file = open(filename, 'wb')
pickle.dump(obj, file)
file.close()
if debug:
logging.debug("Saved " + filename + " object type " + str(type(obj)))
@staticmethod
def incompleteCholesky(X, k):
if X.shape[0] != X.shape[1]:
raise ValueError("X must be a square matrix")
ell = X.shape[0]
R = numpy.zeros((k, ell))
d = numpy.diag(X)
aInd = numpy.argmax(d)
a = d[aInd]
nu = numpy.zeros(k)
for j in range(k):
nu[j] = numpy.sqrt(a)
for i in range(ell):
R[j, i] = (X[aInd, i] - R[:, i].T.dot(R[:, aInd]))/nu[j]
d[i] = d[i] - R[j, i]**2
aInd = numpy.argmax(d)
a = d[aInd]
return R
@staticmethod
def incompleteCholesky2(X, k):
ell = X.shape[0]
A = numpy.zeros((ell, k))
Xj = X
Xaj = numpy.zeros((ell, k))
for j in range(k):
d = numpy.diag(Xj)
ind = numpy.argmax(d)
A[ind, j] = 1/numpy.sqrt(Xj[ind, ind])
Xaj[:, j] = Xj.dot(A[:, j])
Xj = Xj - numpy.outer(Xaj[:, j], Xaj[:, j])/numpy.dot(A[:, j].T, Xaj[:, j])
return Xaj.T
@staticmethod
def indEig(s, U, inds):
U = U[:, inds]
s = s[inds]
return s, U
@staticmethod
def indSvd(P, s, Q, inds):
if inds.shape[0] != 0:
P = P[:, inds]
s = s[inds]
Q = Q.conj().T
Q = Q[:, inds]
else:
P = numpy.zeros((P.shape[0], 0))
s = numpy.zeros(0)
Q = Q.conj().T
Q = numpy.zeros((Q.shape[0], 0))
return P, s, Q
@staticmethod
def svd(A, eps=10**-8, tol=10**-8):
if A.shape[0] > A.shape[1]:
return Util.svd_from_eigh(A, eps)
else:
P, s, Qh = Util.svd_from_eigh(A.conj().T, eps, tol)
return Qh.conj().T, s.conj(), P.conj().T
@staticmethod
def svd_from_eigh(A, eps=10**-8, tol=10**-8):
AA = A.conj().T.dot(A)
lmbda, Q = scipy.linalg.eigh(AA + eps*numpy.eye(A.shape[1]))
lmbda = lmbda-eps
inds = numpy.arange(lmbda.shape[0])[lmbda>tol]
lmbda, Q = Util.indEig(lmbda, Q, inds)
sigma = lmbda**0.5
P = A.dot(Q) / sigma
Qh = Q.conj().T
if __debug__:
if not scipy.allclose(A, (P*sigma).dot(Qh), atol=tol):
logging.warn(" SVD obtained from EVD is too poor")
Parameter.checkArray(P, softCheck=True, arrayInfo="P in svd_from_eigh()")
if not Parameter.checkOrthogonal(P, tol=tol, softCheck=True, arrayInfo="P in svd_from_eigh()", investigate=True):
print("corresponding sigma: ", sigma)
Parameter.checkArray(sigma, softCheck=True, arrayInfo="sigma in svd_from_eigh()")
Parameter.checkArray(Qh, softCheck=True, arrayInfo="Qh in svd_from_eigh()")
if not Parameter.checkOrthogonal(Qh.conj().T, tol=tol, softCheck=True, arrayInfo="Qh.H in svd_from_eigh()"):
print("corresponding sigma: ", sigma)
return P, sigma, Qh
@staticmethod
def safeSvd(A, eps=10**-8, tol=10**-8):
if __debug__:
if not Parameter.checkArray(A, softCheck = True):
logging.info("... in Util.safeSvd")
try:
try:
P, sigma, Qh = scipy.linalg.svd(A, full_matrices=False)
except scipy.linalg.LinAlgError as e:
logging.warn(str(e))
raise Exception('SVD decomposition has to be computed from EVD decomposition')
inds = numpy.arange(sigma.shape[0])[sigma > tol]
if inds.shape[0] < sigma.shape[0]:
P, sigma, Q = Util.indSvd(P, sigma, Qh, inds)
Qh = Q.conj().T
if not scipy.allclose(A, (P*sigma).dot(Qh)):
logging.warn(" After cleaning singular values from scipy.linalg.svd, the SVD decomposition is too far from the original matrix")
raise Exception('SVD decomposition has to be computed from EVD decomposition')
if __debug__:
badAnswerFromScipySvd = False
if not Parameter.checkArray(P, softCheck=True, arrayInfo="P in Util.safeSvd()"):
badAnswerFromScipySvd = True
if not Parameter.checkArray(sigma, softCheck = True, arrayInfo="sigma in Util.safeSvd()"):
badAnswerFromScipySvd = True
if not Parameter.checkArray(Qh, softCheck = True, arrayInfo="Qh in Util.safeSvd()"):
badAnswerFromScipySvd = True
if badAnswerFromScipySvd:
logging.warn(" After cleaning singular values from scipy.linalg.svd, the SVD decomposition still contains 'NaN', 'inf' or complex values")
raise Exception('SVD decomposition has to be computed from EVD decomposition')
except Exception as inst:
if inst.args != ('SVD decomposition has to be computed from EVD decomposition',):
raise
logging.warn(" Using EVD method to compute the SVD.")
P, sigma, Qh = Util.svd(A, eps, tol)
if __debug__:
badAnswerFromUtilSvd = False
if not Parameter.checkArray(P, softCheck = True):
logging.info("... in P in Util.safeSvd")
badAnswerFromUtilSvd = True
if not Parameter.checkArray(sigma, softCheck = True):
logging.info("... in sigma in Util.safeSvd")
badAnswerFromUtilSvd = True
if not Parameter.checkArray(Qh, softCheck = True):
logging.info("... in Q in Util.safeSvd")
badAnswerFromUtilSvd = True
if badAnswerFromUtilSvd:
logging.warn(" SVD decomposition obtained from EVD decomposition contains 'NaN', 'inf' or real values")
from sandbox.util.ProfileUtils import ProfileUtils
if ProfileUtils.memory() > 10**9:
ProfileUtils.memDisplay(locals())
return P, sigma, Qh
@staticmethod
def safeEigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False, overwrite_b=False, turbo=True, eigvals=None, type=1):
try:
return scipy.linalg.eigh(a, b=b, lower=lower, eigvals_only=eigvals_only, overwrite_a=overwrite_a, overwrite_b=overwrite_b, turbo=turbo, eigvals=eigvals)
except:
if __debug__:
logging.warning(" scipy.linalg.eigh raised an error, scipy.linalg.eig() is used instead")
lmbda, q = scipy.linalg.eig(a, b=b, overwrite_a=overwrite_a, overwrite_b=overwrite_b)
if eigvals == None:
eigvals = (0, len(lmbda))
if eigvals_only:
return lmbda[eigvals[0]:eigvals[1]]
else :
return lmbda[eigvals[0]:eigvals[1]], q[eigvals[0]:eigvals[1]]
@staticmethod
def powerLawProbs(alpha, zeroVal=0.5, maxInt=100):
p = numpy.arange(0, maxInt, dtype=numpy.float)
p[0] = zeroVal
p = p ** -alpha
p /= p.sum()
return p
@staticmethod
|
BSD 3-Clause New or Revised License
|
gratipay/gratipay.com
|
gratipay/models/team/membership.py
|
Membership.get_memberships
|
python
|
def get_memberships(self, current_participant=None):
takes = self.compute_actual_takes()
members = []
for take in takes.values():
member = {}
member['participant_id'] = take['participant'].id
member['username'] = take['participant'].username
member['take'] = take['nominal_amount']
member['balance'] = take['balance']
member['percentage'] = take['percentage']
member['editing_allowed'] = False
member['is_current_user'] = False
if current_participant:
member['removal_allowed'] = current_participant.username == self.owner
if member['username'] == current_participant.username:
member['editing_allowed']= True
member['last_week'] = self.get_take_last_week_for(take['participant'])
members.append(member)
return members
|
Return a list of member dicts.
|
https://github.com/gratipay/gratipay.com/blob/dc4e953a8a5b96908e2f3ea7f8fef779217ba2b6/gratipay/models/team/membership.py#L38-L60
|
from __future__ import absolute_import, division, print_function, unicode_literals
from .takes import ZERO, PENNY
class Membership(object):
def add_member(self, participant, recorder):
self.set_take_for(participant, PENNY, recorder)
def remove_member(self, participant, recorder):
self.set_take_for(participant, ZERO, recorder)
@property
def nmembers(self):
return self.ndistributing_to
|
MIT License
|
i8enn/pydantic-odm
|
pydantic_odm/mixins.py
|
DBPydanticMixin.bulk_create
|
python
|
async def bulk_create(
cls, documents: Union[List[BaseModel], List["DictAny"]],
) -> List[DBPydanticMixin]:
collection = await cls.get_collection()
if not documents:
return []
if isinstance(documents[0], BaseModel):
documents = [
cls._encode_dict_to_mongo(d.dict())
for d in cast(List[BaseModel], documents)
]
documents = cast(List["DictAny"], documents)
await cls.pre_save_validation(documents, many=True)
result = await collection.insert_many(documents)
inserted_ids = result.inserted_ids
inserted_documents = []
for i, document_id in enumerate(inserted_ids):
document = cls.parse_obj(documents[i])
document.id = document_id
document._doc = cls._decode_mongo_documents(documents[i])
inserted_documents.append(document)
return inserted_documents
|
Create many documents
|
https://github.com/i8enn/pydantic-odm/blob/36868717995207301a5f7565a7dfd1893cf8c58a/pydantic_odm/mixins.py#L242-L268
|
from __future__ import annotations
import abc
from bson import ObjectId
from motor import motor_asyncio
from pydantic import BaseModel
from pymongo.collection import Collection, ReturnDocument
from typing import TYPE_CHECKING, Any, List, Optional, Union, cast
from .db import get_db_manager
from .decoders.mongodb import AbstractMongoDBDecoder, BaseMongoDBDecoder
from .encoders.mongodb import AbstractMongoDBEncoder, BaseMongoDBEncoder
from .types import ObjectIdStr
if TYPE_CHECKING:
from pydantic.typing import MappingIntStrAny
from pydantic.typing import AbstractSetIntStr, DictAny, DictIntStrAny, DictStrAny
class BaseDBMixin(BaseModel, abc.ABC):
id: Optional[ObjectIdStr] = None
_doc: "DictAny" = {}
_mongodb_encoder: AbstractMongoDBEncoder = BaseMongoDBEncoder()
_mongo_decoder: AbstractMongoDBDecoder = BaseMongoDBDecoder()
class Config:
allow_population_by_field_name = True
json_encoders: "DictAny" = {ObjectId: lambda v: ObjectIdStr(v)}
def __setattr__(self, key: Any, value: Any) -> Any:
if key not in ["_doc"]:
return super(BaseDBMixin, self).__setattr__(key, value)
self.__dict__[key] = value
return value
@classmethod
def _decode_mongo_documents(cls, document: "DictStrAny") -> "DictStrAny":
return cls._mongo_decoder(document)
@classmethod
def _encode_dict_to_mongo(cls, data: "DictStrAny") -> "DictStrAny":
return cls._mongodb_encoder(data)
def _encode_model_to_mongo(
self,
include: Union["AbstractSetIntStr", "DictIntStrAny"] = None,
exclude: Union["AbstractSetIntStr", "DictIntStrAny"] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> DictStrAny:
model_as_dict = self.dict(
include=include,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
return self._mongodb_encoder(model_as_dict)
def dict(
self,
*,
include: Union["AbstractSetIntStr", "MappingIntStrAny"] = None,
exclude: Union["AbstractSetIntStr", "MappingIntStrAny"] = None,
by_alias: bool = False,
skip_defaults: bool = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> DictStrAny:
if not exclude:
exclude = {"_doc"}
else:
exclude = {"_doc", *exclude}
return super(BaseDBMixin, self).dict(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
def _update_model_from__doc(self) -> BaseDBMixin:
new_obj = self.parse_obj(self._doc)
new_obj.id = self._doc.get("id")
for k, field in new_obj.__fields__.items():
field_default = getattr(field, "default", None)
self.__dict__[k] = getattr(new_obj, k, field_default)
return new_obj
class DBPydanticMixin(BaseDBMixin):
class Config:
collection: Optional[str] = None
database: Optional[str] = None
@classmethod
async def get_collection(cls) -> Collection:
db_name = getattr(cls.Config, "database", None)
collection_name = getattr(cls.Config, "collection", None)
if not db_name or not collection_name:
raise ValueError("Collection or db_name is not configured in Config class")
db_manager = get_db_manager()
if not db_manager:
raise RuntimeError("MongoDBManager not initialized")
db = db_manager[db_name]
if not db:
raise ValueError('"%s" is not found in MongoDBManager.databases' % db_name)
collection = db[collection_name]
if not collection:
collection = await db.create_collection(collection_name)
return collection
@staticmethod
async def pre_save_validation(
data: Union["DictAny", List["DictAny"]], many: bool = False
) -> Union["DictAny", List["DictAny"]]:
return data
@classmethod
async def create(cls, fields: Union["DictAny", BaseModel]) -> DBPydanticMixin:
if isinstance(fields, BaseModel):
fields = fields.dict(exclude_unset=True)
document = cls.parse_obj(fields)
await document.save()
return document
@classmethod
async def count(cls, query: DictStrAny = None) -> int:
if not query:
query = {}
query = cls._encode_dict_to_mongo(query)
collection = await cls.get_collection()
return await collection.count_documents(query)
@classmethod
async def find_one(cls, query: DictStrAny) -> DBPydanticMixin:
collection = await cls.get_collection()
query = cls._encode_dict_to_mongo(query)
result = await collection.find_one(query)
if result:
result = cls._decode_mongo_documents(result)
model = cls.parse_obj(result)
document_id = result.get("id")
model._doc = result
model.id = document_id
return model
return result
@classmethod
async def find_many(
cls, query: "DictStrAny", return_cursor: bool = False
) -> Union[List[DBPydanticMixin], motor_asyncio.AsyncIOMotorCursor]:
collection = await cls.get_collection()
query = cls._encode_dict_to_mongo(query)
cursor = collection.find(query)
if return_cursor:
return cursor
documents = []
async for _doc in cursor:
_doc = cls._decode_mongo_documents(_doc)
document = cls.parse_obj(_doc)
document._doc = _doc
documents.append(document)
return documents
@classmethod
async def update_many(
cls, query: "DictStrAny", fields: "DictAny", return_cursor: bool = False,
) -> Union[List[DBPydanticMixin], motor_asyncio.AsyncIOMotorCursor]:
await cls.pre_save_validation(fields, many=True)
collection = await cls.get_collection()
query = cls._encode_dict_to_mongo(query)
await collection.update_many(query, fields)
return await cls.find_many(query, return_cursor)
@classmethod
|
MIT License
|
vinteo/hass-opensprinkler
|
custom_components/opensprinkler/binary_sensor.py
|
ControllerSensorActive.unique_id
|
python
|
def unique_id(self) -> str:
return slugify(f"{self._entry.unique_id}_{self._entity_type}_{self._attr}")
|
Return a unique, Home Assistant friendly identifier for this entity.
|
https://github.com/vinteo/hass-opensprinkler/blob/92f8cb82daf77cde6cfeb3f9b55c6d91270ef83c/custom_components/opensprinkler/binary_sensor.py#L77-L79
|
import logging
from typing import Callable
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.util import slugify
from . import (
OpenSprinklerBinarySensor,
OpenSprinklerControllerEntity,
OpenSprinklerProgramEntity,
OpenSprinklerStationEntity,
)
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: dict,
async_add_entities: Callable,
):
entities = _create_entities(hass, entry)
async_add_entities(entities)
def _create_entities(hass: HomeAssistant, entry: dict):
entities = []
controller = hass.data[DOMAIN][entry.entry_id]["controller"]
coordinator = hass.data[DOMAIN][entry.entry_id]["coordinator"]
name = entry.data[CONF_NAME]
entities.append(
ControllerSensorActive(entry, name, "sensor_1", controller, coordinator)
)
entities.append(
ControllerSensorActive(entry, name, "sensor_2", controller, coordinator)
)
entities.append(
ControllerSensorActive(entry, name, "rain_delay", controller, coordinator)
)
for _, program in controller.programs.items():
entities.append(ProgramIsRunningBinarySensor(entry, name, program, coordinator))
for _, station in controller.stations.items():
entities.append(StationIsRunningBinarySensor(entry, name, station, coordinator))
return entities
class ControllerSensorActive(
OpenSprinklerControllerEntity, OpenSprinklerBinarySensor, BinarySensorEntity
):
def __init__(self, entry, name, sensor, controller, coordinator):
self._name = name
self._controller = controller
self._entity_type = "binary_sensor"
self._sensor = sensor
self._attr = sensor + "_active"
super().__init__(entry, name, coordinator)
@property
def name(self) -> str:
return f"{self._name} {self._attr.replace('_', ' ').title()}"
@property
|
MIT License
|
pybrain2/pybrain2
|
pybrain/utilities.py
|
subDict
|
python
|
def subDict(d, allowedkeys, flip=False):
res = {}
for k, v in list(d.items()):
if (k in allowedkeys) ^ flip:
res[k] = v
return res
|
Returns a new dictionary with a subset of the entries of d
that have on of the (dis-)allowed keys.
|
https://github.com/pybrain2/pybrain2/blob/33ead60704d126e58c10d458ddd1e5e5fd17b65d/pybrain/utilities.py#L660-L667
|
from __future__ import print_function
__author__ = 'Tom Schaul, tom@idsia.ch; Justin Bayer, bayerj@in.tum.de'
import gc
import pickle
import logging
import threading
import os
import operator
from itertools import count
from math import sqrt
from random import random, choice
from scipy import where, array, exp, zeros, size, mat, median
from functools import reduce
known_extensions = {
'mat': 'matlab',
'txt': 'ascii',
'svm': 'libsvm',
'pkl': 'pickle',
'nc' : 'netcdf' }
def abstractMethod():
raise NotImplementedError('Method not implemented!')
def drawIndex(probs, tolerant=False):
if not sum(probs) < 1.00001 or not sum(probs) > 0.99999:
if tolerant:
probs /= sum(probs)
else:
print((probs, 1 - sum(probs)))
raise ValueError()
r = random()
s = 0
for i, p in enumerate(probs):
s += p
if s > r:
return i
return choice(list(range(len(probs))))
def drawGibbs(vals, temperature=1.):
if temperature == 0:
m = max(vals)
best = []
for i, v in enumerate(vals):
if v == m:
best.append(i)
return choice(best)
else:
temp = vals / temperature
temp += 20 - max(temp)
if min(temp) < -20:
for i, v in enumerate(temp):
if v < -20:
temp[i] = -20
temp = exp(temp)
temp /= sum(temp)
return drawIndex(temp)
def iterCombinations(tup):
if len(tup) == 1:
for i in range(tup[0]):
yield (i,)
elif len(tup) > 1:
for prefix in iterCombinations(tup[:-1]):
for i in range(tup[-1]):
yield tuple(list(prefix) + [i])
def setAllArgs(obj, argdict):
xmlstore = isinstance(obj, XMLBuildable)
for n in list(argdict.keys()):
if hasattr(obj, n):
setattr(obj, n, argdict[n])
if xmlstore:
obj.argdict[n] = argdict[n]
else:
print(('Warning: parameter name', n, 'not found!'))
if xmlstore:
if not hasattr(obj, '_unknown_argdict'):
obj._unknown_argdict = {}
obj._unknown_argdict[n] = argdict[n]
def linscale(d, lim):
return (d - d.min())*(lim[1] - lim[0]) + lim[0]
def percentError(out, true):
arrout = array(out).flatten()
wrong = where(arrout != array(true).flatten())[0].size
return 100. * float(wrong) / float(arrout.size)
def formatFromExtension(fname):
_base, ext = os.path.splitext(fname)
if not ext:
return None
try:
format = known_extensions[ext.replace('.', '')]
except KeyError:
format = None
return format
class XMLBuildable(object):
argdict = None
def setArgs(self, **argdict):
if not self.argdict:
self.argdict = {}
setAllArgs(self, argdict)
class Serializable(object):
def saveToFileLike(self, flo, format=None, **kwargs):
format = 'pickle' if format is None else format
save = getattr(self, "save_%s" % format, None)
if save is None:
raise ValueError("Unknown format '%s'." % format)
save(flo, **kwargs)
@classmethod
def loadFromFileLike(cls, flo, format=None):
format = 'pickle' if format is None else format
load = getattr(cls, "load_%s" % format, None)
if load is None:
raise ValueError("Unknown format '%s'." % format)
return load(flo)
def saveToFile(self, filename, format=None, **kwargs):
if format is None:
format = formatFromExtension(filename)
with file(filename, 'wb') as fp:
self.saveToFileLike(fp, format, **kwargs)
@classmethod
def loadFromFile(cls, filename, format=None):
if format is None:
format = formatFromExtension(filename)
with file(filename, 'rbU') as fp:
obj = cls.loadFromFileLike(fp, format)
obj.filename = filename
return obj
def save_pickle(self, flo, protocol=0):
pickle.dump(self, flo, protocol)
@classmethod
def load_pickle(cls, flo):
return pickle.load(flo)
class Named(XMLBuildable):
_nameIds = count(0)
def getName(self):
logging.warning("Deprecated, use .name property instead.")
return self.name
def setName(self, newname):
logging.warning("Deprecated, use .name property instead.")
self.name = newname
def _getName(self):
if self._name is None:
self._name = self._generateName()
return self._name
def _setName(self, newname):
self._name = newname
_name = None
name = property(_getName, _setName)
def _generateName(self):
return "%s-%i" % (self.__class__.__name__, next(self._nameIds))
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.name)
def fListToString(a_list, a_precision=3):
from numpy import around
s_list = ", ".join(("%g" % around(x, a_precision)).ljust(a_precision+3)
for x in a_list)
return "[%s]" % s_list
def tupleRemoveItem(tup, index):
l = list(tup)
return tuple(l[:index] + l[index + 1:])
def confidenceIntervalSize(stdev, nbsamples):
return 2 * 1.98 * stdev / sqrt(nbsamples)
def trace(func):
def inner(*args, **kwargs):
print(("%s: %s, %s" % (func.__name__, args, kwargs)))
return func(*args, **kwargs)
return inner
def threaded(callback=lambda * args, **kwargs: None, daemonic=False):
def innerDecorator(func):
def inner(*args, **kwargs):
target = lambda: callback(func(*args, **kwargs))
t = threading.Thread(target=target)
t.setDaemon(daemonic)
t.start()
return inner
return innerDecorator
def garbagecollect(func):
def inner(*args, **kwargs):
result = func(*args, **kwargs)
gc.collect()
return result
return inner
def memoize(func):
cache = {}
def inner(*args, **kwargs):
args = tuple(args)
kwargs_set = frozenset(iter(kwargs.items()))
if (args, kwargs_set) in cache:
result = cache[args, kwargs_set]
else:
result = func(*args, **kwargs)
cache[args, kwargs_set] = result
return result
return inner
def storeCallResults(obj, verbose=False):
results = []
oldcall = obj.__class__.__call__
def newcall(*args, **kwargs):
result = oldcall(*args, **kwargs)
results.append(result)
if verbose:
print(result)
return result
obj.__class__.__call__ = newcall
return results
def multiEvaluate(repeat):
def decorator(func):
def inner(*args, **kwargs):
result = 0.
for dummy in range(repeat):
result += func(*args, **kwargs)
return result / repeat
return inner
return decorator
def _import(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise ImportError("No module named %s" % mod)
return mod
def int2gray(i):
return i ^ (i >> 1)
def gray2int(g, size):
res = 0
for i in reversed(list(range(size))):
gi = (g >> i) % 2
if i == size - 1:
bi = gi
else:
bi = bi ^ gi
res += bi * 2 ** i
return res
def asBinary(i):
if i > 1:
if i % 2 == 1:
return asBinary(i >> 1) + '1'
else:
return asBinary(i >> 1) + '0'
else:
return str(i)
def one_to_n(val, maxval):
a = zeros(maxval, float)
a[val] = 1.
return a
def n_to_one(arr):
return where(arr == 1)[0][0]
def canonicClassString(x):
if isinstance(x, object):
return repr(x.__class__).split("'")[1]
else:
return repr(x.__class__)
def decrementAny(tup):
res = []
for i, x in enumerate(tup):
if x > 0:
res.append(tuple(list(tup[:i]) + [x - 1] + list(tup[i + 1:])))
return res
def reachable(stepFunction, start, destinations, _alreadyseen=None):
if len(start) == 0 or len(destinations) == 0:
return {}
if _alreadyseen is None:
_alreadyseen = []
_alreadyseen.extend(start)
res = {}
for s in start:
if s in destinations:
res[s] = 0
start.remove(s)
new = set()
for s in start:
new.update(stepFunction(s))
new.difference_update(_alreadyseen)
ndestinations = list(destinations)
for s in list(new):
if s in destinations:
res[s] = 1
new.remove(s)
ndestinations.remove(s)
_alreadyseen.append(s)
deeper = reachable(stepFunction, new, ndestinations, _alreadyseen)
for k, val in list(deeper.items()):
res[k] = val + 1
return res
def flood(stepFunction, fullSet, initSet, relevant=None):
if fullSet is None:
flooded = set(initSet)
else:
full = set(fullSet)
flooded = full.intersection(set(initSet))
if relevant is None:
relevant = full.copy()
if relevant:
relevant = set(relevant)
change = flooded.copy()
while len(change)>0:
new = set()
for m in change:
if fullSet is None:
new.update(stepFunction(m))
else:
new.update(full.intersection(stepFunction(m)))
change = new.difference(flooded)
flooded.update(change)
if relevant is not None and relevant.issubset(flooded):
break
return list(flooded)
def crossproduct(ss, row=None, level=0):
if row is None:
row = []
if len(ss) > 1:
return reduce(operator.add,
[crossproduct(ss[1:], row + [i], level + 1) for i in ss[0]])
else:
return [row + [i] for i in ss[0]]
def permute(arr, permutation):
return array([arr[i] for i in permutation])
def permuteToBlocks(arr, blockshape):
if len(blockshape) < 2:
raise ValueError("Need more than one dimension.")
elif len(blockshape) == 2:
blockheight, blockwidth = blockshape
return permuteToBlocks2d(arr, blockheight, blockwidth)
elif len(blockshape) == 3:
blockdepth, blockheight, blockwidth = blockshape
return permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth)
else:
raise NotImplementedError("Only for dimensions 2 and 3.")
def permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth):
depth, height, width = arr.shape
arr_ = arr.reshape(height * depth, width)
arr_ = permuteToBlocks2d(arr_, blockheight, blockwidth)
arr_.shape = depth, height * width
return permuteToBlocks2d(arr_, blockdepth, blockwidth * blockheight)
def permuteToBlocks2d(arr, blockheight, blockwidth):
_height, width = arr.shape
arr = arr.flatten()
new = zeros(size(arr))
for i in range(size(arr)):
blockx = (i % width) / blockwidth
blocky = i / width / blockheight
blockoffset = blocky * width / blockwidth + blockx
blockoffset *= blockwidth * blockheight
inblockx = i % blockwidth
inblocky = (i / width) % blockheight
j = blockoffset + inblocky * blockwidth + inblockx
new[j] = arr[i]
return new
def triu2flat(m):
dim = m.shape[0]
res = zeros(dim * (dim + 1) / 2)
index = 0
for row in range(dim):
res[index:index + dim - row] = m[row, row:]
index += dim - row
return res
def flat2triu(a, dim):
res = zeros((dim, dim))
index = 0
for row in range(dim):
res[row, row:] = a[index:index + dim - row]
index += dim - row
return res
def blockList2Matrix(l):
dims = [m.shape[0] for m in l]
s = sum(dims)
res = zeros((s, s))
index = 0
for i in range(len(l)):
d = dims[i]
m = l[i]
res[index:index + d, index:index + d] = m
index += d
return res
def blockCombine(l):
l = [list(map(mat, row)) for row in l]
hdims = [m.shape[1] for m in l[0]]
hs = sum(hdims)
vdims = [row[0].shape[0] for row in l]
vs = sum(vdims)
res = zeros((hs, vs))
vindex = 0
for i, row in enumerate(l):
hindex = 0
for j, m in enumerate(row):
res[vindex:vindex + vdims[i], hindex:hindex + hdims[j]] = m
hindex += hdims[j]
vindex += vdims[i]
return res
def avgFoundAfter(decreasingTargetValues, listsOfActualValues, batchSize=1, useMedian=False):
from scipy import sum
numLists = len(listsOfActualValues)
longest = max(list(map(len, listsOfActualValues)))
res = [[0] for _ in range(numLists)]
for tval in decreasingTargetValues:
for li, l in enumerate(listsOfActualValues):
lres = res[li]
found = False
for i in range(lres[-1], len(l)):
if l[i] <= tval:
lres.append(i)
found = True
break
if not found:
lres.append(longest)
tmp = array(res)
if useMedian:
resx = median(tmp, axis=0)[1:]
else:
resx = sum(tmp, axis=0)[1:] / float(numLists)
return resx * batchSize
class DivergenceError(Exception):
def matchingDict(d, selection, require_existence=False):
for k, v in list(selection.items()):
if k in d:
if isinstance(v, list):
if d[k] not in v:
return False
else:
if d[k] != v:
return False
elif require_existence:
return False
return True
|
BSD 3-Clause New or Revised License
|
gradientinstitute/aboleth
|
aboleth/layers.py
|
EmbedVariational._build
|
python
|
def _build(self, X):
n_samples, (input_dim,) = self._get_X_dims(X)
W_shape, _ = self._weight_shapes(self.n_categories)
n_batch = tf.shape(X)[1]
self.pstd, self.qstd = initialise_stds(input_dim, self.output_dim,
self.prior_std0,
self.learn_prior, "embed")
self.pW = _make_prior(self.pstd, W_shape)
self.qW = _make_posterior(self.qstd, W_shape, self.full, "embed")
Wsamples = _sample_W(self.qW, n_samples)
features = tf.map_fn(lambda wx: tf.gather(*wx, axis=0), (Wsamples, X),
dtype=Wsamples.dtype)
f_dims = int(np.prod(features.shape[2:]))
Net = tf.reshape(features, [n_samples, n_batch, f_dims])
KL = kl_sum(self.qW, self.pW)
return Net, KL
|
Build the graph of this layer.
|
https://github.com/gradientinstitute/aboleth/blob/53a3de23dce4d607ffec92be936e83d2dd7ebb3c/aboleth/layers.py#L691-L717
|
import numpy as np
import tensorflow as tf
from aboleth.kernels import RBF, RBFVariational
from aboleth.random import seedgen
from aboleth.distributions import (norm_prior, norm_posterior, gaus_posterior,
kl_sum)
from aboleth.baselayers import Layer, MultiLayer
from aboleth.util import summary_histogram
from aboleth.initialisers import initialise_weights, initialise_stds
class InputLayer(MultiLayer):
def __init__(self, name, n_samples=1):
self.name = name
self.n_samples = n_samples
def _build(self, **kwargs):
X = kwargs[self.name]
Xs = _tile2samples(self.n_samples, X)
return Xs, 0.0
class SampleLayer(Layer):
def __call__(self, X):
rank = len(X.shape)
assert rank > 2, "SampleLayers require rank > 2 input Tensors, with" " the first axis being the random samples of the net."""
Net, KL = self._build(X)
return Net, KL
@staticmethod
def _get_X_dims(X):
n_samples = tf.to_int32(tf.shape(X)[0])
input_shape = X.shape[2:].as_list()
return n_samples, input_shape
class SampleLayer3(SampleLayer):
def __call__(self, X):
rank = len(X.shape)
assert rank == 3
Net, KL = super(SampleLayer3, self).__call__(X)
return Net, KL
class Activation(Layer):
def __init__(self, h=lambda X: X):
self.h = h
def _build(self, X):
Net = self.h(X)
KL = 0.
return Net, KL
class DropOut(Layer):
def __init__(self, keep_prob, independent=True, observation_axis=1,
alpha=False):
self.keep_prob = keep_prob
self.obsax = observation_axis
self.independent = independent
self.dropout = tf.contrib.nn.alpha_dropout if alpha else tf.nn.dropout
def _build(self, X):
noise_shape = None
if not self.independent:
noise_shape = tf.concat([tf.shape(X)[:self.obsax], [1],
tf.shape(X)[(self.obsax + 1):]], axis=0)
Net = self.dropout(X, self.keep_prob, noise_shape, seed=next(seedgen))
KL = 0.
return Net, KL
class MaxPool2D(Layer):
def __init__(self, pool_size, strides, padding='SAME'):
self.ksize = [1] + list(pool_size) + [1]
self.strides = [1] + list(strides) + [1]
self.padding = padding
def _build(self, X):
Net = tf.map_fn(lambda inputs: tf.nn.max_pool(inputs,
ksize=self.ksize,
strides=self.strides,
padding=self.padding), X)
KL = 0.
return Net, KL
class Flatten(Layer):
def _build(self, X):
flat_dim = np.product(X.shape[2:])
new_shape = tf.concat([tf.shape(X)[0:2], [flat_dim]], 0)
Net = tf.reshape(X, new_shape)
KL = 0.
return Net, KL
class RandomFourier(SampleLayer3):
def __init__(self, n_features, kernel):
self.n_features = n_features
self.kernel = kernel
def _build(self, X):
n_samples, (input_dim,) = self._get_X_dims(X)
dtype = X.dtype.as_numpy_dtype
P, KL = self.kernel.weights(input_dim, self.n_features, dtype)
Ps = tf.tile(tf.expand_dims(P, 0), [n_samples, 1, 1])
XP = tf.matmul(X, Ps)
Net = self._transformation(XP)
return Net, KL
def _transformation(self, XP):
real = tf.cos(XP)
imag = tf.sin(XP)
Net = tf.concat([real, imag], axis=-1) / np.sqrt(self.n_features)
return Net
class RandomArcCosine(RandomFourier):
def __init__(self, n_features, lenscale=None, p=1, variational=False,
learn_lenscale=False):
if variational:
kern = RBFVariational(lenscale=lenscale,
learn_lenscale=learn_lenscale)
else:
kern = RBF(lenscale=lenscale, learn_lenscale=learn_lenscale)
super().__init__(n_features=n_features, kernel=kern)
assert isinstance(p, int) and p >= 0
if p == 0:
self.pfunc = tf.sign
elif p == 1:
self.pfunc = lambda x: x
else:
self.pfunc = lambda x: tf.pow(x, p)
def _transformation(self, XP):
Net = np.sqrt(2. / self.n_features) * tf.nn.relu(self.pfunc(XP))
return Net
class Conv2DVariational(SampleLayer):
def __init__(self, filters, kernel_size, strides=(1, 1), padding='SAME',
prior_std='glorot', learn_prior=False, use_bias=True):
self.filters = filters
self.kernel_size = kernel_size
self.strides = [1] + list(strides) + [1]
self.padding = padding
self.use_bias = use_bias
self.prior_std0 = prior_std
self.learn_prior = learn_prior
def _build(self, X):
n_samples, (height, width, channels) = self._get_X_dims(X)
W_shp, b_shp = self._weight_shapes(channels)
receptive_field = np.product(W_shp[:-2])
n_inputs = receptive_field * channels
n_outputs = receptive_field * self.filters
self.pstd, self.qstd = initialise_stds(n_inputs, n_outputs,
self.prior_std0,
self.learn_prior, "conv2d")
self.pW = _make_prior(self.pstd, W_shp)
self.qW = _make_posterior(self.qstd, W_shp, False, "conv")
KL = kl_sum(self.qW, self.pW)
Wsamples = _sample_W(self.qW, n_samples, False)
Net = tf.map_fn(
lambda args: tf.nn.conv2d(*args,
padding=self.padding,
strides=self.strides),
elems=(X, Wsamples), dtype=tf.float32)
if self.use_bias:
self.pb = _make_prior(self.pstd, b_shp)
self.qb = _make_posterior(self.qstd, b_shp, False, "conv_bias")
KL += kl_sum(self.qb, self.pb)
bsamples = tf.reshape(_sample_W(self.qb, n_samples, False),
[n_samples, 1, 1, 1, self.filters])
Net += bsamples
return Net, KL
def _weight_shapes(self, channels):
weight_shape = self.kernel_size + (channels, self.filters)
bias_shape = (self.filters,)
return weight_shape, bias_shape
class DenseVariational(SampleLayer3):
def __init__(self, output_dim, prior_std=1., learn_prior=False, full=False,
use_bias=True):
self.output_dim = output_dim
self.full = full
self.use_bias = use_bias
self.prior_std0 = prior_std
self.learn_prior = learn_prior
def _build(self, X):
n_samples, (input_dim,) = self._get_X_dims(X)
W_shp, b_shp = self._weight_shapes(input_dim)
self.pstd, self.qstd = initialise_stds(input_dim, self.output_dim,
self.prior_std0,
self.learn_prior, "dense")
self.pW = _make_prior(self.pstd, W_shp)
self.qW = _make_posterior(self.qstd, W_shp, self.full, "dense")
KL = kl_sum(self.qW, self.pW)
Wsamples = _sample_W(self.qW, n_samples)
Net = tf.matmul(X, Wsamples)
if self.use_bias:
self.pb = _make_prior(self.pstd, b_shp)
self.qb = _make_posterior(self.qstd, b_shp, False, "dense_bias")
KL += kl_sum(self.qb, self.pb)
bsamples = tf.expand_dims(_sample_W(self.qb, n_samples), 1)
Net += bsamples
return Net, KL
def _weight_shapes(self, input_dim):
weight_shape = (self.output_dim, input_dim)
bias_shape = (self.output_dim,)
return weight_shape, bias_shape
class EmbedVariational(DenseVariational):
def __init__(self, output_dim, n_categories, prior_std=1.,
learn_prior=False, full=False):
assert n_categories >= 2, "Need 2 or more categories for embedding!"
self.output_dim = output_dim
self.n_categories = n_categories
self.full = full
self.prior_std0 = prior_std
self.learn_prior = learn_prior
|
Apache License 2.0
|
geoopt/geoopt
|
geoopt/manifolds/siegel/bounded_domain.py
|
BoundedDomain.origin
|
python
|
def origin(
self,
*size: Union[int, Tuple[int]],
dtype=None,
device=None,
seed: Optional[int] = 42
) -> torch.Tensor:
if dtype and dtype not in {torch.complex32, torch.complex64, torch.complex128}:
raise ValueError(
"dtype must be one of {torch.complex32, torch.complex64, torch.complex128}"
)
if dtype is None:
dtype = torch.complex128
return torch.zeros(*size, dtype=dtype, device=device)
|
Create points at the origin of the manifold in a deterministic way.
For the Bounded domain model, the origin is the zero matrix.
This is, a matrix whose real and imaginary parts are all zeros.
Parameters
----------
size : Union[int, Tuple[int]]
the desired shape
device : torch.device
the desired device
dtype : torch.dtype
the desired dtype
seed : Optional[int]
A parameter controlling deterministic randomness for manifolds that do not provide ``.origin``,
but provide ``.random``. (default: 42)
Returns
-------
torch.Tensor
|
https://github.com/geoopt/geoopt/blob/c0163cde17aa215aa0f34e833364ac918ec5e974/geoopt/manifolds/siegel/bounded_domain.py#L183-L218
|
from typing import Optional, Tuple, Union
import torch
from geoopt import linalg as lalg
from .siegel import SiegelManifold
from .upper_half import UpperHalf
from .vvd_metrics import SiegelMetricType
from ..siegel import csym_math as sm
__all__ = ["BoundedDomain"]
class BoundedDomain(SiegelManifold):
name = "Bounded Domain"
def __init__(
self, metric: SiegelMetricType = SiegelMetricType.RIEMANNIAN, rank: int = None
):
super().__init__(metric=metric, rank=rank)
def dist(
self, z1: torch.Tensor, z2: torch.Tensor, *, keepdim=False
) -> torch.Tensor:
uhsm_z1 = sm.cayley_transform(z1)
uhsm_z2 = sm.cayley_transform(z2)
return super().dist(uhsm_z1, uhsm_z2)
def egrad2rgrad(self, z: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
a = get_id_minus_conjugate_z_times_z(z)
return lalg.sym(a @ u @ a)
def projx(self, z: torch.Tensor) -> torch.Tensor:
z = super().projx(z)
evalues, s = sm.takagi_eig(z)
eps = sm.EPS[evalues.dtype]
evalues_tilde = torch.clamp(evalues, max=1 - eps)
diag_tilde = torch.diag_embed(evalues_tilde).type_as(z)
z_tilde = s.conj() @ diag_tilde @ s.conj().transpose(-1, -2)
batch_wise_mask = torch.all(evalues < 1 - eps, dim=-1, keepdim=True)
already_in_space_mask = batch_wise_mask.unsqueeze(-1).expand_as(z)
return torch.where(already_in_space_mask, z, z_tilde)
def inner(
self, z: torch.Tensor, u: torch.Tensor, v=None, *, keepdim=False
) -> torch.Tensor:
if v is None:
v = u
identity = sm.identity_like(z)
conj_z = z.conj()
inv_id_minus_conjz_z = sm.inverse(identity - (conj_z @ z))
inv_id_minus_z_conjz = sm.inverse(identity - (z @ conj_z))
res = inv_id_minus_conjz_z @ u @ inv_id_minus_z_conjz @ v.conj()
return lalg.trace(res, keepdim=keepdim)
def _check_point_on_manifold(self, x: torch.Tensor, *, atol=1e-5, rtol=1e-5):
if not self._check_matrices_are_symmetric(x, atol=atol, rtol=rtol):
return False, "Matrices are not symmetric"
id_minus_zz = get_id_minus_conjugate_z_times_z(x)
ok = torch.all(sm.eigvalsh(id_minus_zz) > 0)
reason = None if ok else "'Id - overline{Z}Z' is not definite positive"
return ok, reason
def random(self, *size, dtype=None, device=None, **kwargs) -> torch.Tensor:
points = UpperHalf().random(*size, dtype=dtype, device=device, **kwargs)
return sm.inverse_cayley_transform(points)
|
Apache License 2.0
|
olitheolix/aiokubernetes
|
aiokubernetes/models/v1beta2_scale.py
|
V1beta2Scale.status
|
python
|
def status(self, status):
self._status = status
|
Sets the status of this V1beta2Scale.
current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. # noqa: E501
:param status: The status of this V1beta2Scale. # noqa: E501
:type: V1beta2ScaleStatus
|
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1beta2_scale.py#L177-L186
|
import pprint
import re
from aiokubernetes.models.v1_object_meta import V1ObjectMeta
from aiokubernetes.models.v1beta2_scale_spec import V1beta2ScaleSpec
from aiokubernetes.models.v1beta2_scale_status import V1beta2ScaleStatus
class V1beta2Scale(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta2ScaleSpec',
'status': 'V1beta2ScaleStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
self._kind = kind
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = metadata
@property
def spec(self):
return self._spec
@spec.setter
def spec(self, spec):
self._spec = spec
@property
def status(self):
return self._status
@status.setter
|
Apache License 2.0
|
jest-community/jest-pytest
|
src/__tests__/integration/home-assistant/homeassistant/components/binary_sensor/hikvision.py
|
HikvisionData.cam_id
|
python
|
def cam_id(self):
return self.camdata.get_id
|
Return device id.
|
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/binary_sensor/hikvision.py#L156-L158
|
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.helpers.event import track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.components.binary_sensor import (
BinarySensorDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_NAME, CONF_USERNAME, CONF_PASSWORD,
CONF_SSL, EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START,
ATTR_LAST_TRIP_TIME, CONF_CUSTOMIZE)
REQUIREMENTS = ['pyhik==0.1.8']
_LOGGER = logging.getLogger(__name__)
CONF_IGNORED = 'ignored'
CONF_DELAY = 'delay'
DEFAULT_PORT = 80
DEFAULT_IGNORED = False
DEFAULT_DELAY = 0
ATTR_DELAY = 'delay'
DEVICE_CLASS_MAP = {
'Motion': 'motion',
'Line Crossing': 'motion',
'Field Detection': 'motion',
'Video Loss': None,
'Tamper Detection': 'motion',
'Shelter Alarm': None,
'Disk Full': None,
'Disk Error': None,
'Net Interface Broken': 'connectivity',
'IP Conflict': 'connectivity',
'Illegal Access': None,
'Video Mismatch': None,
'Bad Video': None,
'PIR Alarm': 'motion',
'Face Detection': 'motion',
'Scene Change Detection': 'motion',
'I/O': None,
'Unattended Baggage': 'motion',
'Attended Baggage': 'motion',
'Recording Failure': None,
}
CUSTOMIZE_SCHEMA = vol.Schema({
vol.Optional(CONF_IGNORED, default=DEFAULT_IGNORED): cv.boolean,
vol.Optional(CONF_DELAY, default=DEFAULT_DELAY): cv.positive_int
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CUSTOMIZE, default={}):
vol.Schema({cv.string: CUSTOMIZE_SCHEMA}),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
customize = config.get(CONF_CUSTOMIZE)
if config.get(CONF_SSL):
protocol = 'https'
else:
protocol = 'http'
url = '{}://{}'.format(protocol, host)
data = HikvisionData(hass, url, port, name, username, password)
if data.sensors is None:
_LOGGER.error("Hikvision event stream has no data, unable to setup")
return False
entities = []
for sensor, channel_list in data.sensors.items():
for channel in channel_list:
if data.type == 'NVR':
sensor_name = '{}_{}'.format(
sensor.replace(' ', '_'), channel[1])
else:
sensor_name = sensor.replace(' ', '_')
custom = customize.get(sensor_name.lower(), {})
ignore = custom.get(CONF_IGNORED)
delay = custom.get(CONF_DELAY)
_LOGGER.debug("Entity: %s - %s, Options - Ignore: %s, Delay: %s",
data.name, sensor_name, ignore, delay)
if not ignore:
entities.append(HikvisionBinarySensor(
hass, sensor, channel[1], data, delay))
add_entities(entities)
class HikvisionData(object):
def __init__(self, hass, url, port, name, username, password):
from pyhik.hikvision import HikCamera
self._url = url
self._port = port
self._name = name
self._username = username
self._password = password
self.camdata = HikCamera(
self._url, self._port, self._username, self._password)
if self._name is None:
self._name = self.camdata.get_name
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self.stop_hik)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, self.start_hik)
def stop_hik(self, event):
self.camdata.disconnect()
def start_hik(self, event):
self.camdata.start_stream()
@property
def sensors(self):
return self.camdata.current_event_states
@property
|
MIT License
|
hetida/hetida-designer
|
runtime/hetdesrun/component/load.py
|
import_func_from_code
|
python
|
def import_func_from_code(
code: str, uuid: UUID, func_name: str, register_module: bool = True
) -> Union[Callable, Coroutine]:
module_path = module_path_from_uuid_and_code(uuid, code)
mod = ModuleType(module_path)
if register_module:
sys.modules[
module_path
] = mod
try:
exec(code, mod.__dict__)
except SyntaxError as e:
logger.info(
"Syntax Error during importing function %s with code module id %s",
func_name,
str(uuid),
)
raise ComponentCodeImportError(
"Could not import code due to Syntax Errors"
) from e
except Exception as e:
logger.info(
"Exception during importing function %s with code module id %s: %s",
func_name,
str(uuid),
str(e),
)
raise ComponentCodeImportError("Could not import code due to Exception") from e
func: Union[Coroutine, Callable] = getattr(mod, func_name)
return func
|
Imports function from provided code
Allows to register code as a new module at a module path whic is
generated from uuid and code.
|
https://github.com/hetida/hetida-designer/blob/4a306ea855fb6f009f5180cf8befe09365c71fd8/runtime/hetdesrun/component/load.py#L44-L84
|
from types import ModuleType
from typing import Callable, Coroutine, Union, Tuple, Optional
from uuid import UUID
import sys
import logging
import importlib
class ComponentCodeImportError(Exception):
logger = logging.getLogger(__name__)
base_module_path = "hetdesrun_loaded_components"
sys.modules[base_module_path] = ModuleType(base_module_path, "base module")
def module_path_from_uuid_and_code(uuid: UUID, code: str) -> str:
return (
base_module_path
+ ".by_uuid_"
+ str(uuid).replace("-", "_")
+ "_hash_"
+ hash_code(code)
)
def hash_code(code: str) -> str:
return hex(hash(code)).replace("-", "_m_")
|
MIT License
|
jjongbloets/julestk
|
julesTk/utils/modals.py
|
Dialog.footer
|
python
|
def footer(self, parent):
return True
|
Build the buttons of the dialog, parent refers to parent frame
|
https://github.com/jjongbloets/julestk/blob/3b9da87633edcfc4a9dd9a91e3382ef105fbfdf4/julesTk/utils/modals.py#L97-L99
|
from julesTk import view, controller
from julesTk.view.window import Window
__author__ = "Joeri Jongbloets <joeri@jongbloets.net>"
class ModalWindow(Window):
STATE_BLOCKED = 4
def __init__(self, parent, ctrl):
super(ModalWindow, self).__init__(parent, ctrl)
self.application.register_hook("APP_CLOSE", self.hide)
def _prepare(self):
raise NotImplementedError
def _show(self):
super(ModalWindow, self)._show()
self.transient(self.parent)
self.grab_set()
self._block()
def block(self):
self._view_state = self.STATE_BLOCKED
return self._block()
def _block(self):
self.update()
self.root.wait_window(self)
def _hide(self):
return False
def _close(self):
self.application.remove_hook("APP_CLOSE", self.hide)
super(ModalWindow, self)._close()
def is_blocked(self):
return self._view_state == self.STATE_BLOCKED
class Dialog(ModalWindow):
def __init__(self, parent, ctrl):
super(Dialog, self).__init__(parent, ctrl)
self._response = None
@property
def response(self):
return self._response
def _prepare(self):
self.grid()
self.configure_column(self, 0)
self.configure_row(self, [0, 1, 2])
fmh = self.add_widget(
"header", view.ttk.Frame(self)
)
self.header(fmh)
self.configure_grid(fmh, row=0, column=0)
fmb = self.add_widget(
"body", view.ttk.Frame(self)
)
self.body(fmb)
self.configure_grid(fmb, row=1, column=0)
fmf = self.add_widget(
"footer", view.ttk.Frame(self)
)
self.footer(fmf)
self.configure_grid(fmf, row=2, column=0)
def header(self, parent):
return True
def body(self, parent):
return True
|
MIT License
|
trevor/calendarserver
|
txdav/base/datastore/file.py
|
FileMetaDataMixin.md5
|
python
|
def md5(self):
try:
return str(self.properties()[PropertyName.fromElement(TwistedGETContentMD5)])
except KeyError:
try:
data = self._path.open().read()
except IOError:
return None
md5 = hashlib.md5(data).hexdigest()
return md5
|
The MD5 hex digest of this object's content.
@rtype: C{str}
|
https://github.com/trevor/calendarserver/blob/c9970b06a70445ca75b62e3d170c26bc897a035e/txdav/base/datastore/file.py#L239-L256
|
from __future__ import print_function
from zope.interface.declarations import implements
from twisted.python import hashlib
from twext.python.log import Logger
from twext.enterprise.ienterprise import AlreadyFinishedError
from txweb2.dav.resource import TwistedGETContentMD5
from txdav.idav import IDataStoreObject
from txdav.base.propertystore.base import PropertyName
from txdav.xml.element import GETContentType
def isValidName(name):
return not name.startswith(".")
def hidden(path):
return path.sibling('.' + path.basename())
def writeOperation(thunk):
def inner(self, *a, **kw):
if self._transaction._termination is not None:
raise RuntimeError(
"%s.%s is a write operation, but transaction already %s"
% (self, thunk.__name__, self._transaction._termination))
return thunk(self, *a, **kw)
return inner
class DataStore(object):
log = Logger()
_transactionClass = None
def __init__(self, path):
self._path = path
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._path.path)
def newTransaction(self, name='no name'):
return self._transactionClass(self)
class _CommitTracker(object):
def __init__(self, name):
self.name = name
self.done = False
self.info = []
def __del__(self):
if not self.done and self.info:
print("**** UNCOMMITTED TRANSACTION (%s) BEING GARBAGE COLLECTED ****") % (
self.name,
)
for info in self.info:
print(" "), info
print("---- END OF OPERATIONS")
class DataStoreTransaction(object):
log = Logger()
def __init__(self, dataStore, name):
self._dataStore = dataStore
self._termination = None
self._operations = []
self._postCommitOperations = []
self._postAbortOperations = []
self._tracker = _CommitTracker(name)
def store(self):
return self._dataStore
def addOperation(self, operation, name):
self._operations.append(operation)
self._tracker.info.append(name)
def _terminate(self, mode):
if self._termination is not None:
raise AlreadyFinishedError("already %s" % (self._termination,))
self._termination = mode
self._tracker.done = True
def abort(self):
self._terminate("aborted")
for operation in self._postAbortOperations:
operation()
def commit(self):
self._terminate("committed")
self.committed = True
undos = []
for operation in self._operations:
try:
undo = operation()
if undo is not None:
undos.append(undo)
except:
self.log.debug("Undoing DataStoreTransaction")
for undo in undos:
try:
undo()
except:
self.log.error("Cannot undo DataStoreTransaction")
raise
for operation in self._postCommitOperations:
operation()
def postCommit(self, operation):
self._postCommitOperations.append(operation)
def postAbort(self, operation):
self._postAbortOperations.append(operation)
class FileMetaDataMixin(object):
implements(IDataStoreObject)
def name(self):
return self._path.basename()
def contentType(self):
try:
return self.properties()[PropertyName.fromElement(GETContentType)].mimeType()
except KeyError:
return None
|
Apache License 2.0
|
numba/numba
|
numba/np/npyimpl.py
|
_Kernel.cast
|
python
|
def cast(self, val, fromty, toty):
if (isinstance(fromty, types.Complex) and
not isinstance(toty, types.Complex)):
newty = fromty.underlying_float
attr = self.context.get_getattr(fromty, 'real')
val = attr(self.context, self.builder, fromty, val, 'real')
fromty = newty
return self.context.cast(self.builder, val, fromty, toty)
|
Numpy uses cast semantics that are different from standard Python
(for example, it does allow casting from complex to float).
This method acts as a patch to context.cast so that it allows
complex to real/int casts.
|
https://github.com/numba/numba/blob/8d4559a83b7b12da9121c030b8e3780874204a34/numba/np/npyimpl.py#L407-L425
|
import math
import sys
import itertools
from collections import namedtuple
from llvmlite.llvmpy import core as lc
import numpy as np
import operator
from numba.np import arrayobj, ufunc_db, numpy_support
from numba.core.imputils import Registry, impl_ret_new_ref, force_error_model
from numba.core import typing, types, utils, cgutils, callconv
from numba.np.numpy_support import (
ufunc_find_matching_loop, select_array_wrapper, from_dtype, _ufunc_loop_sig
)
from numba.core.typing import npydecl
from numba.core.extending import overload, intrinsic
from numba.core import errors
from numba.cpython import builtins
registry = Registry('npyimpl')
lower = registry.lower
class _ScalarIndexingHelper(object):
def update_indices(self, loop_indices, name):
pass
def as_values(self):
pass
class _ScalarHelper(object):
def __init__(self, ctxt, bld, val, ty):
self.context = ctxt
self.builder = bld
self.val = val
self.base_type = ty
intpty = ctxt.get_value_type(types.intp)
self.shape = [lc.Constant.int(intpty, 1)]
lty = ctxt.get_data_type(ty) if ty != types.boolean else lc.Type.int(1)
self._ptr = cgutils.alloca_once(bld, lty)
def create_iter_indices(self):
return _ScalarIndexingHelper()
def load_data(self, indices):
return self.val
def store_data(self, indices, val):
self.builder.store(val, self._ptr)
@property
def return_val(self):
return self.builder.load(self._ptr)
class _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper',
('array', 'indices'))):
def update_indices(self, loop_indices, name):
bld = self.array.builder
intpty = self.array.context.get_value_type(types.intp)
ONE = lc.Constant.int(lc.Type.int(intpty.width), 1)
indices = loop_indices[len(loop_indices) - len(self.indices):]
for src, dst, dim in zip(indices, self.indices, self.array.shape):
cond = bld.icmp(lc.ICMP_UGT, dim, ONE)
with bld.if_then(cond):
bld.store(src, dst)
def as_values(self):
bld = self.array.builder
return [bld.load(index) for index in self.indices]
class _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder',
'shape', 'strides', 'data',
'layout', 'base_type', 'ndim',
'return_val'))):
def create_iter_indices(self):
intpty = self.context.get_value_type(types.intp)
ZERO = lc.Constant.int(lc.Type.int(intpty.width), 0)
indices = []
for i in range(self.ndim):
x = cgutils.alloca_once(self.builder, lc.Type.int(intpty.width))
self.builder.store(ZERO, x)
indices.append(x)
return _ArrayIndexingHelper(self, indices)
def _load_effective_address(self, indices):
return cgutils.get_item_pointer2(self.context,
self.builder,
data=self.data,
shape=self.shape,
strides=self.strides,
layout=self.layout,
inds=indices)
def load_data(self, indices):
model = self.context.data_model_manager[self.base_type]
ptr = self._load_effective_address(indices)
return model.load_from_data_pointer(self.builder, ptr)
def store_data(self, indices, value):
ctx = self.context
bld = self.builder
store_value = ctx.get_value_as_data(bld, self.base_type, value)
assert ctx.get_data_type(self.base_type) == store_value.type
bld.store(store_value, self._load_effective_address(indices))
def _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'):
if isinstance(tyinp, types.Optional):
oty = tyinp
tyinp = tyinp.type
inp = ctxt.cast(bld, inp, oty, tyinp)
if isinstance(tyinp, types.ArrayCompatible):
ary = ctxt.make_array(tyinp)(ctxt, bld, inp)
shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)
strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)
return _ArrayHelper(ctxt, bld, shape, strides, ary.data,
tyinp.layout, tyinp.dtype, tyinp.ndim, inp)
elif (types.unliteral(tyinp) in types.number_domain | {types.boolean}
or isinstance(tyinp, types.scalars._NPDatetimeBase)):
return _ScalarHelper(ctxt, bld, inp, tyinp)
else:
raise NotImplementedError('unsupported type for {0}: {1}'.format(where,
str(tyinp)))
_broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp),
types.intp, types.CPointer(types.intp))
def _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape):
if src_ndim > dest_ndim:
return 0
else:
src_index = 0
dest_index = dest_ndim - src_ndim
while src_index < src_ndim:
src_dim_size = src_shape[src_index]
dest_dim_size = dest_shape[dest_index]
if dest_dim_size != 1:
if src_dim_size != dest_dim_size and src_dim_size != 1:
return -(dest_index + 1)
elif src_dim_size != 1:
dest_shape[dest_index] = src_dim_size
src_index += 1
dest_index += 1
return dest_index
def _build_array(context, builder, array_ty, input_types, inputs):
input_types = [x.type if isinstance(x, types.Optional) else x
for x in input_types]
intp_ty = context.get_value_type(types.intp)
def make_intp_const(val):
return context.get_constant(types.intp, val)
ZERO = make_intp_const(0)
ONE = make_intp_const(1)
src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
"src_shape")
dest_ndim = make_intp_const(array_ty.ndim)
dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
"dest_shape")
dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index)
for index in range(array_ty.ndim))
for dest_shape_addr in dest_shape_addrs:
builder.store(ONE, dest_shape_addr)
for arg_number, arg in enumerate(inputs):
if not hasattr(arg, "ndim"):
continue
arg_ndim = make_intp_const(arg.ndim)
for index in range(arg.ndim):
builder.store(arg.shape[index],
cgutils.gep_inbounds(builder, src_shape, index))
arg_result = context.compile_internal(
builder, _broadcast_onto, _broadcast_onto_sig,
[arg_ndim, src_shape, dest_ndim, dest_shape])
with cgutils.if_unlikely(builder,
builder.icmp(lc.ICMP_SLT, arg_result, ONE)):
msg = "unable to broadcast argument %d to output array" % (
arg_number,)
loc = errors.loc_info.get('loc', None)
if loc is not None:
msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line)
context.call_conv.return_user_exc(builder, ValueError, (msg,))
real_array_ty = array_ty.as_array
dest_shape_tup = tuple(builder.load(dest_shape_addr)
for dest_shape_addr in dest_shape_addrs)
array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty,
dest_shape_tup)
array_wrapper_index = select_array_wrapper(input_types)
array_wrapper_ty = input_types[array_wrapper_index]
try:
array_wrap = context.get_function('__array_wrap__',
array_ty(array_wrapper_ty, real_array_ty))
except NotImplementedError:
if array_wrapper_ty.array_priority != types.Array.array_priority:
raise
out_val = array_val._getvalue()
else:
wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())
out_val = array_wrap(builder, wrap_args)
ndim = array_ty.ndim
shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)
strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)
return _ArrayHelper(context, builder, shape, strides, array_val.data,
array_ty.layout, array_ty.dtype, ndim,
out_val)
def _unpack_output_types(ufunc, sig):
if ufunc.nout == 1:
return [sig.return_type]
else:
return list(sig.return_type)
def _unpack_output_values(ufunc, builder, values):
if ufunc.nout == 1:
return [values]
else:
return cgutils.unpack_tuple(builder, values)
def _pack_output_values(ufunc, context, builder, typ, values):
if ufunc.nout == 1:
return values[0]
else:
return context.make_tuple(builder, typ, values)
def numpy_ufunc_kernel(context, builder, sig, args, ufunc, kernel_class):
arguments = [_prepare_argument(context, builder, arg, tyarg)
for arg, tyarg in zip(args, sig.args)]
if len(arguments) < ufunc.nin:
raise RuntimeError(
"Not enough inputs to {}, expected {} got {}"
.format(ufunc.__name__, ufunc.nin, len(arguments)))
for out_i, ret_ty in enumerate(_unpack_output_types(ufunc, sig)):
if ufunc.nin + out_i >= len(arguments):
if isinstance(ret_ty, types.ArrayCompatible):
output = _build_array(context, builder, ret_ty, sig.args, arguments)
else:
output = _prepare_argument(
context, builder,
lc.Constant.null(context.get_value_type(ret_ty)), ret_ty)
arguments.append(output)
elif context.enable_nrt:
context.nrt.incref(builder, ret_ty, args[ufunc.nin + out_i])
inputs = arguments[:ufunc.nin]
outputs = arguments[ufunc.nin:]
assert len(outputs) == ufunc.nout
outer_sig = _ufunc_loop_sig(
[a.base_type for a in outputs],
[a.base_type for a in inputs]
)
kernel = kernel_class(context, builder, outer_sig)
intpty = context.get_value_type(types.intp)
indices = [inp.create_iter_indices() for inp in inputs]
loopshape = outputs[0].shape
with cgutils.loop_nest(builder, loopshape, intp=intpty) as loop_indices:
vals_in = []
for i, (index, arg) in enumerate(zip(indices, inputs)):
index.update_indices(loop_indices, i)
vals_in.append(arg.load_data(index.as_values()))
vals_out = _unpack_output_values(ufunc, builder, kernel.generate(*vals_in))
for val_out, output in zip(vals_out, outputs):
output.store_data(loop_indices, val_out)
out = _pack_output_values(ufunc, context, builder, sig.return_type, [o.return_val for o in outputs])
return impl_ret_new_ref(context, builder, sig.return_type, out)
class _Kernel(object):
def __init__(self, context, builder, outer_sig):
self.context = context
self.builder = builder
self.outer_sig = outer_sig
|
BSD 2-Clause Simplified License
|
fos/fos
|
applications/bundle_picker.py
|
TrackLabeler.unset_state
|
python
|
def unset_state(self):
glDisable(GL_DEPTH_TEST)
glDisable(GL_BLEND)
glDisable(GL_LINE_SMOOTH)
|
Close communication with hardware.
Disable what was enabled during set_state().
|
https://github.com/fos/fos/blob/8d33bf0cd60292ad5164973b5285122acbc03b86/applications/bundle_picker.py#L447-L455
|
import numpy as np
import nibabel as nib
from fos import Actor
from fos import Window, Scene
from fos.actor import Axes, Text3D
from fos.modelmat import screen_to_model
import fos.interact.collision as cll
from pyglet.gl import *
from pyglet.lib import load_library
from dipy.segment.quickbundles import QuickBundles
from dipy.io.dpy import Dpy
from dipy.io.pickles import load_pickle
from dipy.viz.colormap import orient2rgb
from dipy.tracking.metrics import downsample
import copy
import cPickle as pickle
glib=load_library('GL')
from dipy.tracking.vox2track import track_counts
import Tkinter, tkFileDialog
from PySide.QtCore import Qt
question_message="""
>>>>Track Labeler
P : select/unselect the representative track.
E : expand/collapse the selected tracks
F : keep selected tracks rerun QuickBundles and hide everything else.
A : select all representative tracks which are currently visible.
I : invert selected tracks to unselected
H : hide/show all representative tracks.
>>>Mouse
Left Button: keep pressed with dragging - rotation
Scrolling : zoom
Shift + Scrolling : fast zoom
Right Button : panning - translation
Shift + Right Button : fast panning - translation
>>>General
F1 : Fullscreen.
F2 : Next time frame.
F3 : Previous time frame.
F4 : Automatic rotation.
F12 : Reset camera.
ESC: Exit.
? : Print this help information.
"""
def track2rgb(track):
return orient2rgb(track[0] - track[-1])
class TrackLabeler(Actor):
def __init__(self, name,qb, tracks, reps='exemplars',colors=None, vol_shape=None, virtuals_line_width=5.0, tracks_line_width=2.0, virtuals_alpha=1.0, tracks_alpha=0.6, affine=None, verbose=False):
super(TrackLabeler, self).__init__(name)
if affine is None: self.affine = np.eye(4, dtype = np.float32)
else: self.affine = affine
self.mouse_x=None
self.mouse_y=None
self.cache = {}
self.qb = qb
self.reps = reps
if self.reps=='virtuals':
self.virtuals=qb.virtuals()
if self.reps=='exemplars':
self.virtuals,self.ex_ids = qb.exemplars()
self.virtuals_alpha = virtuals_alpha
self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count = self.compute_buffers(self.virtuals, self.virtuals_alpha)
self.tracks = tracks
self.tracks_alpha = tracks_alpha
self.tracks_ids = np.arange(len(self.tracks), dtype=np.int)
self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count = self.compute_buffers(self.tracks, self.tracks_alpha)
self.min = np.min(self.tracks_buffer,axis=0)
self.max = np.max(self.tracks_buffer,axis=0)
self.vertices=self.tracks_buffer
print('MBytes %f' % (self.tracks_buffer.nbytes/2.**20,))
self.position = (0,0,0)
self.selected = []
self.virtuals_line_width = virtuals_line_width
self.tracks_line_width = tracks_line_width
self.old_color = {}
self.hide_virtuals = False
self.expand = False
self.verbose = verbose
self.tracks_visualized_first = np.array([], dtype='i4')
self.tracks_visualized_count = np.array([], dtype='i4')
self.history = [[self.qb, self.tracks, self.tracks_ids, self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count, self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count]]
self.vol_shape = vol_shape
if self.vol_shape !=None:
self.virtuals_shifted =[downsample(t+np.array(self.vol_shape)/2.,30) for t in self.virtuals]
else:
self.virtuals_shifted=None
def compute_buffers(self, tracks, alpha):
tracks_buffer = np.ascontiguousarray(np.concatenate(tracks).astype('f4'))
tracks_colors = np.ascontiguousarray(self.compute_colors(tracks, alpha))
tracks_count = np.ascontiguousarray(np.array([len(v) for v in tracks],dtype='i4'))
tracks_first = np.ascontiguousarray(np.r_[0,np.cumsum(tracks_count)[:-1]].astype('i4'))
if isinstance(tracks_count,tuple): print '== count'
if isinstance(tracks_first,tuple): print '== first'
return tracks_buffer, tracks_colors, tracks_first, tracks_count
def compute_colors(self, tracks, alpha):
assert(type(tracks)==type([]))
tot_vertices = np.sum([len(curve) for curve in tracks])
color = np.empty((tot_vertices,4), dtype='f4')
counter = 0
for curve in tracks:
color[counter:counter+len(curve),:3] = track2rgb(curve).astype('f4')
counter += len(curve)
color[:,3] = alpha
return color
def draw(self):
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
if not self.hide_virtuals:
glVertexPointer(3,GL_FLOAT,0,self.virtuals_buffer.ctypes.data)
glColorPointer(4,GL_FLOAT,0,self.virtuals_colors.ctypes.data)
glLineWidth(self.virtuals_line_width)
glPushMatrix()
if isinstance(self.virtuals_first, tuple): print '>> first Tuple'
if isinstance(self.virtuals_count, tuple): print '>> count Tuple'
glib.glMultiDrawArrays(GL_LINE_STRIP,
self.virtuals_first.ctypes.data,
self.virtuals_count.ctypes.data,
len(self.virtuals))
glPopMatrix()
if self.expand and self.tracks_visualized_first.size > 0:
glVertexPointer(3,GL_FLOAT,0,self.tracks_buffer.ctypes.data)
glColorPointer(4,GL_FLOAT,0,self.tracks_colors.ctypes.data)
glLineWidth(self.tracks_line_width)
glPushMatrix()
glib.glMultiDrawArrays(GL_LINE_STRIP,
self.tracks_visualized_first.ctypes.data,
self.tracks_visualized_count.ctypes.data,
len(self.tracks_visualized_count))
glPopMatrix()
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glLineWidth(1.)
glDisable(GL_DEPTH_TEST)
glDisable(GL_BLEND)
glDisable(GL_LINE_SMOOTH)
def process_mouse_position(self,x,y):
self.mouse_x=x
self.mouse_y=y
def process_pickray(self,near,far):
pass
def update(self,dt):
pass
def select_track(self, ids):
color_selected = np.array([1.0, 1.0, 1.0, 1.0], dtype='f4')
if ids == 'all':
ids = range(len(self.virtuals))
elif np.isscalar(ids):
ids = [ids]
for id in ids:
if not id in self.old_color:
self.old_color[id] = self.virtuals_colors[self.virtuals_first[id]:self.virtuals_first[id]+self.virtuals_count[id],:].copy()
new_color = np.ones(self.old_color[id].shape, dtype='f4') * color_selected
if self.verbose: print("Storing old color: %s" % self.old_color[id][0])
self.virtuals_colors[self.virtuals_first[id]:self.virtuals_first[id]+self.virtuals_count[id],:] = new_color
self.selected.append(id)
def unselect_track(self, ids):
if ids == 'all':
ids = range(len(self.virtuals))
elif np.isscalar(ids):
ids = [ids]
for id in ids:
if id in self.old_color:
self.virtuals_colors[self.virtuals_first[id]:self.virtuals_first[id]+self.virtuals_count[id],:] = self.old_color[id]
if self.verbose: print("Setting old color: %s" % self.old_color[id][0])
self.old_color.pop(id)
if id in self.selected:
self.selected.remove(id)
else:
print('WARNING: unselecting id %s but not in %s' % (id, self.selected))
def invert_tracks(self):
tmp_selected=list(set(range(len(self.virtuals))).difference(set(self.selected)))
self.unselect_track('all')
self.selected=[]
self.select_track(tmp_selected)
def process_messages(self,messages):
msg=messages['key_pressed']
if msg!=None:
self.process_keys(msg,None)
msg=messages['mouse_position']
if msg!=None:
self.process_mouse_position(*msg)
def process_keys(self,symbol,modifiers):
prev_selected = copy.copy(self.selected)
if symbol == Qt.Key_P:
print 'P'
id = self.picking_virtuals(symbol, modifiers)
print('Track id %d' % id)
if prev_selected.count(id) == 0:
self.select_track(id)
else:
self.unselect_track(id)
if self.verbose:
print 'Selected:'
print self.selected
if symbol==Qt.Key_E:
print 'E'
if self.verbose: print("Expand/collapse selected clusters.")
if not self.expand and len(self.selected)>0:
tracks_selected = []
for tid in self.selected: tracks_selected += self.qb.label2tracksids(tid)
self.tracks_visualized_first = np.ascontiguousarray(self.tracks_first[tracks_selected, :])
self.tracks_visualized_count = np.ascontiguousarray(self.tracks_count[tracks_selected, :])
self.expand = True
else:
self.expand = False
elif symbol == Qt.Key_F and len(self.selected) > 0:
print 'F'
self.freeze()
elif symbol == Qt.Key_A:
print 'A'
print('Select/unselect all virtuals')
if len(self.selected) < len(self.virtuals):
self.select_track('all')
else:
self.unselect_track('all')
elif symbol == Qt.Key_I:
print 'I'
print('Invert selection')
print self.selected
self.invert_tracks()
elif symbol == Qt.Key_H:
print 'H'
print('Hide/show virtuals.')
self.hide_virtuals = not self.hide_virtuals
elif symbol == Qt.Key_S:
print 'S'
print('Save selected tracks_ids as pickle file.')
self.tracks_ids_to_be_saved = self.tracks_ids
if len(self.selected)>0:
self.tracks_ids_to_be_saved = self.tracks_ids[np.concatenate([self.qb.label2tracksids(tid) for tid in self.selected])]
print("Saving %s tracks." % len(self.tracks_ids_to_be_saved))
root = Tkinter.Tk()
root.withdraw()
pickle.dump(self.tracks_ids_to_be_saved,
tkFileDialog.asksaveasfile(),
protocol=pickle.HIGHEST_PROTOCOL)
elif symbol == Qt.Key_Question:
print question_message
elif symbol == Qt.Key_B:
print 'B'
print('Go back in the freezing history.')
if len(self.history) > 1:
self.history.pop()
self.qb, self.tracks, self.tracks_ids, self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count, self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count = self.history[-1]
if self.reps=='virtuals':
self.virtuals=qb.virtuals()
if self.reps=='exemplars':
self.virtuals, self.ex_ids = self.qb.exemplars()
print len(self.virtuals), 'virtuals'
self.selected = []
self.old_color = {}
self.expand = False
self.hide_virtuals = False
elif symbol == Qt.Key_G:
print 'G'
print('Get tracks from mask.')
ids = self.maskout_tracks()
self.select_track(ids)
def freeze(self):
print("Freezing current expanded real tracks, then doing QB on them, then restarting.")
print("Selected virtuals: %s" % self.selected)
tracks_frozen = []
tracks_frozen_ids = []
for tid in self.selected:
print tid
part_tracks = self.qb.label2tracks(self.tracks, tid)
part_tracks_ids = self.qb.label2tracksids(tid)
print("virtual %s represents %s tracks." % (tid, len(part_tracks)))
tracks_frozen += part_tracks
tracks_frozen_ids += part_tracks_ids
print "frozen tracks size:", len(tracks_frozen)
print "Computing quick bundles...",
self.unselect_track('all')
self.tracks = tracks_frozen
self.tracks_ids = self.tracks_ids[tracks_frozen_ids]
root = Tkinter.Tk()
root.wm_title('QuickBundles threshold')
ts = ThresholdSelector(root, default_value=self.qb.dist_thr/2.0)
root.wait_window()
self.qb = QuickBundles(self.tracks, dist_thr=ts.value, pts=self.qb.pts)
self.qb.dist_thr = ts.value
if self.reps=='virtuals':
self.virtuals=qb.virtuals()
if self.reps=='exemplars':
self.virtuals,self.ex_ids = self.qb.exemplars()
print len(self.virtuals), 'virtuals'
self.virtuals_buffer, self.virtuals_colors, self.virtuals_first, self.virtuals_count = self.compute_buffers(self.virtuals, self.virtuals_alpha)
self.tracks_buffer, self.tracks_colors, self.tracks_first, self.tracks_count = self.compute_buffers(self.tracks, self.tracks_alpha)
self.selected = []
self.old_color = {}
self.expand = False
self.history.append([self.qb,
self.tracks,
self.tracks_ids,
self.virtuals_buffer,
self.virtuals_colors,
self.virtuals_first,
self.virtuals_count,
self.tracks_buffer,
self.tracks_colors,
self.tracks_first,
self.tracks_count])
if self.vol_shape is not None:
print("Shifting!")
self.virtuals_shifted = [downsample(t + np.array(self.vol_shape) / 2., 30) for t in self.virtuals]
else:
self.virtuals_shifted = None
def picking_virtuals(self, symbol,modifiers, min_dist=1e-3):
x, y = self.mouse_x, self.mouse_y
near = screen_to_model(x, y, 0)
far = screen_to_model(x, y, 1)
tmp = np.array([cll.mindistance_segment2track_info(near, far, xyz) for xyz in self.virtuals])
line_distance, screen_distance = tmp[:,0], tmp[:,1]
if False:
closest_to_line_idx = np.argsort(line_distance)
closest_to_line_thresholded_bool = line_distance[closest_to_line_idx] < min_dist
if (closest_to_line_thresholded_bool).any():
return closest_to_line_idx[np.argmin(screen_distance[closest_to_line_thresholded_bool])]
else:
return closest_to_line_idx[0]
else:
return np.argmin(line_distance + screen_distance)
def set_state(self):
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
|
BSD 3-Clause New or Revised License
|
udacity/carnd-semantic-segmentation
|
project_tests.py
|
_assert_tensor_shape
|
python
|
def _assert_tensor_shape(tensor, shape, display_name):
assert tf.assert_rank(tensor, len(shape), message='{} has wrong rank'.format(display_name))
tensor_shape = tensor.get_shape().as_list() if len(shape) else []
wrong_dimension = [ten_dim for ten_dim, cor_dim in zip(tensor_shape, shape)
if cor_dim is not None and ten_dim != cor_dim]
assert not wrong_dimension, '{} has wrong shape. Found {}'.format(display_name, tensor_shape)
|
Check whether the tensor and another shape match in shape
:param tensor: TF Tensor
:param shape: Some array
:param display_name: Name of tensor to print if assertions fail
|
https://github.com/udacity/carnd-semantic-segmentation/blob/78240ad8cfe45b9e4a4336ee1ad923614ba1f17f/project_tests.py#L41-L55
|
import sys
import os
from copy import deepcopy
from glob import glob
from unittest import mock
import numpy as np
import tensorflow as tf
def test_safe(func):
def func_wrapper(*args):
with tf.Graph().as_default():
result = func(*args)
print('Tests Passed')
return result
return func_wrapper
def _prevent_print(function, params):
sys.stdout = open(os.devnull, "w")
function(**params)
sys.stdout = sys.__stdout__
|
MIT License
|
swissdatasciencecenter/renku-python
|
renku/core/management/config.py
|
_get_global_config_dir
|
python
|
def _get_global_config_dir():
return click.get_app_dir(APP_NAME, force_posix=True)
|
Return user's config directory.
|
https://github.com/swissdatasciencecenter/renku-python/blob/5e43e2eff67cdf20fc2805799fe2822e23bc503d/renku/core/management/config.py#L37-L39
|
import configparser
import os
from io import StringIO
from pathlib import Path
import attr
import click
import portalocker
from renku.core.models.enums import ConfigFilter
APP_NAME = "Renku"
RENKU_HOME = ".renku"
|
Apache License 2.0
|
stephenmcd/mezzanine
|
mezzanine/utils/admin.py
|
SingletonAdmin.changelist_view
|
python
|
def changelist_view(self, *args, **kwargs):
try:
singleton = self.model.objects.get()
except self.model.MultipleObjectsReturned:
return super().changelist_view(*args, **kwargs)
except self.model.DoesNotExist:
return redirect(admin_url(self.model, "add"))
return redirect(admin_url(self.model, "change", singleton.id))
|
Redirect to the add view if no records exist or the change
view if the singleton instance exists.
|
https://github.com/stephenmcd/mezzanine/blob/c0c3d7694bf261a394c0ec7cf69e3d16de097690/mezzanine/utils/admin.py#L39-L50
|
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from mezzanine.utils.urls import admin_url
class SingletonAdmin(admin.ModelAdmin):
def handle_save(self, request, response):
form_valid = isinstance(response, HttpResponseRedirect)
if request.POST.get("_save") and form_valid:
return redirect("admin:index")
return response
def add_view(self, *args, **kwargs):
try:
singleton = self.model.objects.get()
except (self.model.DoesNotExist, self.model.MultipleObjectsReturned):
kwargs.setdefault("extra_context", {})
kwargs["extra_context"]["singleton"] = True
response = super().add_view(*args, **kwargs)
return self.handle_save(args[0], response)
return redirect(admin_url(self.model, "change", singleton.id))
|
BSD 2-Clause Simplified License
|
microsoft/qlib
|
qlib/utils/__init__.py
|
get_pre_trading_date
|
python
|
def get_pre_trading_date(trading_date, future=False):
return get_date_by_shift(trading_date, -1, future=future)
|
get previous trading date
----------
date : pandas.Timestamp
current date
|
https://github.com/microsoft/qlib/blob/7c31012b507a3823117bddcc693fc64899460b2a/qlib/utils/__init__.py#L610-L616
|
from __future__ import division
from __future__ import print_function
import os
import pickle
import re
import sys
import copy
import json
import yaml
import redis
import bisect
import shutil
import difflib
import hashlib
import warnings
import datetime
import requests
import tempfile
import importlib
import contextlib
import collections
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Dict, Union, Tuple, Any, Text, Optional
from types import ModuleType
from urllib.parse import urlparse
from ..config import C
from ..log import get_module_logger, set_log_with_config
log = get_module_logger("utils")
def get_redis_connection():
return redis.StrictRedis(host=C.redis_host, port=C.redis_port, db=C.redis_task_db)
def read_bin(file_path: Union[str, Path], start_index, end_index):
file_path = Path(file_path.expanduser().resolve())
with file_path.open("rb") as f:
ref_start_index = int(np.frombuffer(f.read(4), dtype="<f")[0])
si = max(ref_start_index, start_index)
if si > end_index:
return pd.Series(dtype=np.float32)
f.seek(4 * (si - ref_start_index) + 4)
count = end_index - si + 1
data = np.frombuffer(f.read(4 * count), dtype="<f")
series = pd.Series(data, index=pd.RangeIndex(si, si + len(data)))
return series
def np_ffill(arr: np.array):
mask = np.isnan(arr.astype(float))
idx = np.where(~mask, np.arange(mask.shape[0]), 0)
np.maximum.accumulate(idx, out=idx)
return arr[idx]
def lower_bound(data, val, level=0):
left = 0
right = len(data)
while left < right:
mid = (left + right) // 2
if val <= data[mid][level]:
right = mid
else:
left = mid + 1
return left
def upper_bound(data, val, level=0):
left = 0
right = len(data)
while left < right:
mid = (left + right) // 2
if val >= data[mid][level]:
left = mid + 1
else:
right = mid
return left
def requests_with_retry(url, retry=5, **kwargs):
while retry > 0:
retry -= 1
try:
res = requests.get(url, timeout=1, **kwargs)
assert res.status_code in {200, 206}
return res
except AssertionError:
continue
except Exception as e:
log.warning("exception encountered {}".format(e))
continue
raise Exception("ERROR: requests failed!")
def parse_config(config):
if not isinstance(config, str):
return config
if os.path.exists(config):
with open(config, "r") as f:
return yaml.safe_load(f)
try:
return yaml.safe_load(config)
except BaseException:
raise ValueError("cannot parse config!")
def drop_nan_by_y_index(x, y, weight=None):
mask = ~y.isna().any(axis=1)
x = x[mask]
y = y[mask]
if weight is not None:
weight = weight[mask]
return x, y, weight
def hash_args(*args):
string = json.dumps(args, sort_keys=True, default=str)
return hashlib.md5(string.encode()).hexdigest()
def parse_field(field):
if not isinstance(field, str):
field = str(field)
return re.sub(r"\$(\w+)", r'Feature("\1")', re.sub(r"(\w+\s*)\(", r"Operators.\1(", field))
def get_module_by_module_path(module_path: Union[str, ModuleType]):
if isinstance(module_path, ModuleType):
module = module_path
else:
if module_path.endswith(".py"):
module_name = re.sub("^[^a-zA-Z_]+", "", re.sub("[^0-9a-zA-Z_]", "", module_path[:-3].replace("/", "_")))
module_spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(module_spec)
sys.modules[module_name] = module
module_spec.loader.exec_module(module)
else:
module = importlib.import_module(module_path)
return module
def get_callable_kwargs(config: Union[dict, str], default_module: Union[str, ModuleType] = None) -> (type, dict):
if isinstance(config, dict):
if isinstance(config["class"], str):
module = get_module_by_module_path(config.get("module_path", default_module))
_callable = getattr(module, config["class" if "class" in config else "func"])
else:
_callable = config["class"]
kwargs = config.get("kwargs", {})
elif isinstance(config, str):
module = get_module_by_module_path(default_module)
_callable = getattr(module, config)
kwargs = {}
else:
raise NotImplementedError(f"This type of input is not supported")
return _callable, kwargs
get_cls_kwargs = get_callable_kwargs
def init_instance_by_config(
config: Union[str, dict, object],
default_module=None,
accept_types: Union[type, Tuple[type]] = (),
try_kwargs: Dict = {},
**kwargs,
) -> Any:
if isinstance(config, accept_types):
return config
if isinstance(config, str):
pr = urlparse(config)
if pr.scheme == "file":
with open(os.path.join(pr.netloc, pr.path), "rb") as f:
return pickle.load(f)
klass, cls_kwargs = get_callable_kwargs(config, default_module=default_module)
try:
return klass(**cls_kwargs, **try_kwargs, **kwargs)
except (TypeError,):
return klass(**cls_kwargs, **kwargs)
@contextlib.contextmanager
def class_casting(obj: object, cls: type):
orig_cls = obj.__class__
obj.__class__ = cls
yield
obj.__class__ = orig_cls
def compare_dict_value(src_data: dict, dst_data: dict):
class DateEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, (datetime.datetime, datetime.date)):
return o.strftime("%Y-%m-%d %H:%M:%S")
return json.JSONEncoder.default(self, o)
src_data = json.dumps(src_data, indent=4, sort_keys=True, cls=DateEncoder)
dst_data = json.dumps(dst_data, indent=4, sort_keys=True, cls=DateEncoder)
diff = difflib.ndiff(src_data, dst_data)
changes = [line for line in diff if line.startswith("+ ") or line.startswith("- ")]
return changes
def get_or_create_path(path: Optional[Text] = None, return_dir: bool = False):
if path:
if return_dir and not os.path.exists(path):
os.makedirs(path)
elif not return_dir:
xpath = os.path.abspath(os.path.join(path, ".."))
if not os.path.exists(xpath):
os.makedirs(xpath)
else:
temp_dir = os.path.expanduser("~/tmp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
if return_dir:
_, path = tempfile.mkdtemp(dir=temp_dir)
else:
_, path = tempfile.mkstemp(dir=temp_dir)
return path
@contextlib.contextmanager
def save_multiple_parts_file(filename, format="gztar"):
if filename.startswith("~"):
filename = os.path.expanduser(filename)
file_path = os.path.abspath(filename)
if os.path.exists(file_path):
raise FileExistsError("ERROR: file exists: {}, cannot be create the directory.".format(file_path))
os.makedirs(file_path)
yield file_path
tar_file = shutil.make_archive(file_path, format=format, root_dir=file_path)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.rename(tar_file, file_path)
@contextlib.contextmanager
def unpack_archive_with_buffer(buffer, format="gztar"):
temp_dir = os.path.expanduser("~/tmp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
with tempfile.NamedTemporaryFile("wb", delete=False, dir=temp_dir) as fp:
fp.write(buffer)
file_path = fp.name
try:
tar_file = file_path + ".tar.gz"
os.rename(file_path, tar_file)
os.makedirs(file_path)
shutil.unpack_archive(tar_file, format=format, extract_dir=file_path)
yield file_path
except Exception as e:
log.error(str(e))
finally:
if os.path.exists(tar_file):
os.unlink(tar_file)
if os.path.exists(file_path):
shutil.rmtree(file_path)
@contextlib.contextmanager
def get_tmp_file_with_buffer(buffer):
temp_dir = os.path.expanduser("~/tmp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
with tempfile.NamedTemporaryFile("wb", delete=True, dir=temp_dir) as fp:
fp.write(buffer)
file_path = fp.name
yield file_path
def remove_repeat_field(fields):
fields = copy.deepcopy(fields)
_fields = set(fields)
return sorted(_fields, key=fields.index)
def remove_fields_space(fields: [list, str, tuple]):
if isinstance(fields, str):
return fields.replace(" ", "")
return [i.replace(" ", "") for i in fields if isinstance(i, str)]
def normalize_cache_fields(fields: [list, tuple]):
return sorted(remove_repeat_field(remove_fields_space(fields)))
def normalize_cache_instruments(instruments):
if isinstance(instruments, (list, tuple, pd.Index, np.ndarray)):
instruments = sorted(list(instruments))
else:
if "market" in instruments:
pass
else:
instruments = {k: sorted(v) for k, v in instruments.items()}
return instruments
def is_tradable_date(cur_date):
from ..data import D
return str(cur_date.date()) == str(D.calendar(start_time=cur_date, future=True)[0].date())
def get_date_range(trading_date, left_shift=0, right_shift=0, future=False):
from ..data import D
start = get_date_by_shift(trading_date, left_shift, future=future)
end = get_date_by_shift(trading_date, right_shift, future=future)
calendar = D.calendar(start, end, future=future)
return calendar
def get_date_by_shift(trading_date, shift, future=False, clip_shift=True, freq="day"):
from qlib.data import D
cal = D.calendar(future=future, freq=freq)
if pd.to_datetime(trading_date) not in list(cal):
raise ValueError("{} is not trading day!".format(str(trading_date)))
_index = bisect.bisect_left(cal, trading_date)
shift_index = _index + shift
if shift_index < 0 or shift_index >= len(cal):
if clip_shift:
shift_index = np.clip(shift_index, 0, len(cal) - 1)
else:
raise IndexError(f"The shift_index({shift_index}) of the trading day ({trading_date}) is out of range")
return cal[shift_index]
def get_next_trading_date(trading_date, future=False):
return get_date_by_shift(trading_date, 1, future=future)
|
MIT License
|
llnl/maestrowf
|
maestrowf/interfaces/script/lsfscriptadapter.py
|
LSFScriptAdapter.submit
|
python
|
def submit(self, step, path, cwd, job_map=None, env=None):
args = ["bsub"]
args += ["-cwd", cwd, "<", path]
cmd = " ".join(args)
LOGGER.debug("cwd = %s", cwd)
LOGGER.debug("Command to execute: %s", cmd)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=cwd, env=env)
output, err = p.communicate()
retcode = p.wait()
output = output.decode("utf-8")
if retcode == 0:
LOGGER.info("Submission returned status OK.")
return SubmissionRecord(
SubmissionCode.OK, retcode,
re.search('[0-9]+', output).group(0))
else:
LOGGER.warning("Submission returned an error.")
return SubmissionRecord(SubmissionCode.ERROR, retcode, -1)
|
Submit a script to the LSF scheduler.
:param step: The StudyStep instance this submission is based on.
:param path: Local path to the script to be executed.
:param cwd: Path to the current working directory.
:param job_map: A dictionary mapping step names to their job
identifiers.
:param env: A dict containing a modified environment for execution.
:returns: The return status of the submission command and job
identiifer.
|
https://github.com/llnl/maestrowf/blob/b172457b6d20c01cbfa29dfc691ff20ddaadfe09/maestrowf/interfaces/script/lsfscriptadapter.py#L187-L221
|
import getpass
import logging
from math import ceil
import os
import re
from subprocess import PIPE, Popen
from maestrowf.abstracts.interfaces import SchedulerScriptAdapter
from maestrowf.abstracts.enums import CancelCode, JobStatusCode, State, SubmissionCode
from maestrowf.interfaces.script import CancellationRecord, SubmissionRecord
LOGGER = logging.getLogger(__name__)
class LSFScriptAdapter(SchedulerScriptAdapter):
NOJOB_REGEX = re.compile(r"^No\s")
key = "lsf"
def __init__(self, **kwargs):
super(LSFScriptAdapter, self).__init__()
self.add_batch_parameter("host", kwargs.pop("host"))
self.add_batch_parameter("bank", kwargs.pop("bank"))
self.add_batch_parameter("queue", kwargs.pop("queue"))
self.add_batch_parameter("nodes", kwargs.pop("nodes", "1"))
reservation = kwargs.get("reservation", None)
if reservation:
self.add_batch_parameter("reservation", reservation)
self._header = {
"nodes": "#BSUB -nnodes {nodes}",
"queue": "#BSUB -q {queue}",
"bank": "#BSUB -G {bank}",
"walltime": "#BSUB -W {walltime}",
"job-name": "#BSUB -J {job-name}",
"output": "#BSUB -o {output}",
"reservation": "#BSUB -U {reservation}",
"error": "#BSUB -e {error}",
}
self._cmd_flags = {
"cmd": "jsrun --bind rs",
"ntasks": "--tasks_per_rs {procs} --cpu_per_rs {procs}",
"nodes": "--nrs",
"gpus": "-g",
"reservation": "-J",
}
self._extension = "lsf.sh"
def get_header(self, step):
batch_header = dict(self._batch)
batch_header["nodes"] = step.run.get("nodes", self._batch["nodes"])
batch_header["job-name"] = step.name.replace(" ", "_")
batch_header["output"] = "{}.%J.out".format(batch_header["job-name"])
batch_header["error"] = "{}.%J.err".format(batch_header["job-name"])
batch_header.update(
{
resource: value for (resource, value) in step.run.items()
if value
}
)
walltime = step.run.get("walltime")
wt_split = walltime.split(":")
if len(wt_split) == 3:
seconds_minutes = ceil(float(wt_split[2])/60)
total_minutes = int(wt_split[1]) + seconds_minutes
hours = int(wt_split[0]) + int(total_minutes/60)
total_minutes %= 60
walltime = "{:02d}:{:02d}".format(hours, int(total_minutes))
batch_header["walltime"] = walltime
modified_header = ["#!{}".format(self._exec)]
for key, value in self._header.items():
if key in batch_header:
modified_header.append(value.format(**batch_header))
return "\n".join(modified_header)
def get_parallelize_command(self, procs, nodes=None, **kwargs):
args = [self._cmd_flags["cmd"]]
if nodes:
_nodes = nodes
args += [
self._cmd_flags["nodes"],
str(nodes)
]
else:
_nodes = 1
_procs = int(procs)/int(_nodes)
args += [
self._cmd_flags["ntasks"].format(procs=_procs)
]
gpus = kwargs.get("gpus", 0)
if gpus:
args += [
self._cmd_flags["gpus"],
str(gpus)
]
return " ".join(args)
|
MIT License
|
openstack/senlin
|
senlin/tests/drivers/os_test/keystone_v3.py
|
KeystoneClient.get_senlin_endpoint
|
python
|
def get_senlin_endpoint(self):
region = cfg.CONF.default_region_name
base = self.conn.session.get_endpoint(service_type='clustering',
interface='public',
region=region)
return base
|
Get Senlin service endpoint.
|
https://github.com/openstack/senlin/blob/390779ca1e08f819683e79993696f945f1c0393e/senlin/tests/drivers/os_test/keystone_v3.py#L147-L156
|
from oslo_config import cfg
from oslo_log import log
from senlin.drivers import base
from senlin.drivers import sdk
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class KeystoneClient(base.DriverBase):
def __init__(self, params):
super(KeystoneClient, self).__init__(params)
self.conn = sdk.create_connection(params)
self.session = self.conn.session
@sdk.translate_exception
def trust_get_by_trustor(self, trustor, trustee=None, project=None):
filters = {'trustor_user_id': trustor}
trusts = [t for t in self.conn.identity.trusts(**filters)]
for trust in trusts:
if (trustee and trust.trustee_user_id != trustee):
continue
if (project and trust.project_id != project):
continue
return trust
return None
@sdk.translate_exception
def trust_create(self, trustor, trustee, project, roles=None,
impersonation=True):
if roles:
role_list = [{'name': role} for role in roles]
else:
role_list = []
params = {
'trustor_user_id': trustor,
'trustee_user_id': trustee,
'project_id': project,
'impersonation': impersonation,
'allow_redelegation': True,
'roles': role_list
}
result = self.conn.identity.create_trust(**params)
return result
@classmethod
@sdk.translate_exception
def get_token(cls, **creds):
access_info = sdk.authenticate(**creds)
return access_info['token']
@classmethod
@sdk.translate_exception
def get_user_id(cls, **creds):
access_info = sdk.authenticate(**creds)
return access_info['user_id']
@classmethod
def get_service_credentials(cls, **kwargs):
creds = {
'auth_url': CONF.authentication.auth_url,
'username': CONF.authentication.service_username,
'password': CONF.authentication.service_password,
'project_name': CONF.authentication.service_project_name,
'user_domain_name': cfg.CONF.authentication.service_user_domain,
'project_domain_name':
cfg.CONF.authentication.service_project_domain,
'verify': cfg.CONF.authentication.verify_ssl,
'interface': cfg.CONF.authentication.interface,
}
creds.update(**kwargs)
return creds
@sdk.translate_exception
def validate_regions(self, regions):
region_list = self.conn.identity.regions()
known = [r['id'] for r in region_list]
validated = []
for r in regions:
if r in known:
validated.append(r)
else:
LOG.warning('Region %s is not found.', r)
return validated
@sdk.translate_exception
|
Apache License 2.0
|
openstack/ceilometer
|
ceilometer/event/trait_plugins.py
|
SplitterTraitPlugin.__init__
|
python
|
def __init__(self, separator=".", segment=0, max_split=None, **kw):
LOG.warning('split plugin is deprecated, '
'add ".`split(%(sep)s, %(segment)d, '
'%(max_split)d)`" to your jsonpath instead' %
dict(sep=separator,
segment=segment,
max_split=(-1 if max_split is None
else max_split)))
self.separator = separator
self.segment = segment
self.max_split = max_split
super(SplitterTraitPlugin, self).__init__(**kw)
|
Setup how do split the field.
:param separator: String to split on. default "."
:param segment: Which segment to return. (int) default 0
:param max_split: Limit number of splits. Default: None (no limit)
|
https://github.com/openstack/ceilometer/blob/4d2ad5bc7aa8f632b3aad3f612ddb95abb2cb77b/ceilometer/event/trait_plugins.py#L108-L126
|
import abc
from oslo_log import log
from oslo_utils import timeutils
LOG = log.getLogger(__name__)
class TraitPluginBase(object, metaclass=abc.ABCMeta):
support_return_all_values = False
def __init__(self, **kw):
super(TraitPluginBase, self).__init__()
@abc.abstractmethod
def trait_values(self, match_list):
class SplitterTraitPlugin(TraitPluginBase):
support_return_all_values = True
|
Apache License 2.0
|
johntruckenbrodt/pyrosar
|
pyroSAR/gamma/util.py
|
calibrate
|
python
|
def calibrate(id, directory, replace=False, logpath=None, outdir=None, shellscript=None):
cname = type(id).__name__
if cname == 'CEOS_PSR':
for image in id.getGammaImages(directory):
if image.endswith('_slc'):
isp.radcal_SLC(SLC=image,
SLC_par=image + '.par',
CSLC=image + '_cal',
CSLC_par=image + '_cal.par',
K_dB=id.meta['k_dB'],
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
par2hdr(image + '_cal.par', image + '_cal.hdr')
elif cname == 'EORC_PSR':
for image in id.getGammaImages(directory):
pol = re.search('[HV]{2}', os.path.basename(image)).group(0)
if image.endswith('_mli'):
isp.radcal_MLI(MLI=image,
MLI_par=image + '.par',
OFF_par='-',
CMLI=image + '_cal',
antenna='-',
rloss_flag=0,
ant_flag=0,
refarea_flag=1,
sc_dB=0,
K_dB=id.meta['k_dB'],
pix_area=image + '_cal_pix_ell',
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
par2hdr(image + '.par', image + '_cal.hdr')
par2hdr(image + '.par', image + '_cal_pix_ell' + '.hdr')
os.rename(image + '.par', image + '_cal.par')
elif cname == 'ESA':
k_db = {'ASAR': 55., 'ERS1': 58.24, 'ERS2': 59.75}[id.sensor]
inc_ref = 90. if id.sensor == 'ASAR' else 23.
candidates = [x for x in id.getGammaImages(directory) if re.search('_pri$', x)]
for image in candidates:
out = image.replace('pri', 'grd')
isp.radcal_PRI(PRI=image,
PRI_par=image + '.par',
GRD=out,
GRD_par=out + '.par',
K_dB=k_db,
inc_ref=inc_ref,
logpath=logpath,
outdir=outdir,
shellscript=shellscript)
par2hdr(out + '.par', out + '.hdr')
if replace:
for item in [image, image + '.par', image + '.hdr']:
if os.path.isfile(item):
os.remove(item)
elif cname == 'SAFE':
log.info('calibration already performed during import')
else:
raise NotImplementedError('calibration for class {} is not implemented yet'.format(cname))
|
radiometric calibration of SAR scenes
Parameters
----------
id: ~pyroSAR.drivers.ID
an SAR scene object of type pyroSAR.ID or any subclass
directory: str
the directory to search for GAMMA calibration candidates
replace: bool
replace the input images by the new files? If True, the input images will be deleted.
logpath: str or None
a directory to write command logfiles to
outdir: str or None
the directory to execute the command in
shellscript: str or None
a file to write the GAMMA commands to in shell format
Returns
-------
|
https://github.com/johntruckenbrodt/pyrosar/blob/abfaa1fc3e12ee6457ae643ffc3cbf878fe80c7f/pyroSAR/gamma/util.py#L50-L135
|
import os
import re
import shutil
import zipfile as zf
from datetime import datetime
from urllib.error import URLError
from spatialist import haversine
from spatialist.ancillary import union, finder
from ..S1 import OSV
from ..drivers import ID, identify, identify_many
from . import ISPPar, Namespace, par2hdr
from ..ancillary import multilook_factors, hasarg, groupby
from pyroSAR.examine import ExamineSnap
from .auxil import do_execute
import logging
log = logging.getLogger(__name__)
try:
from .api import diff, disp, isp, lat
except ImportError:
pass
|
MIT License
|
jborean93/pyspnego
|
src/spnego/_credssp.py
|
CredSSPProxy._step_accept
|
python
|
def _step_accept(
self,
in_token: typing.Optional[bytes],
) -> typing.Generator[bytes, bytes, None]:
in_token = yield from self._step_tls(in_token)
auth_request = TSRequest.unpack(in_token)
version = min(auth_request.version, _CREDSSP_VERSION)
log.debug("Negotiated CredSSP version: %d" % version)
try:
log.debug("Starting CredSSP authentication phase")
if not self._auth_context:
self._auth_context = spnego.server(hostname=self._hostname, service=self._service,
protocol='negotiate', options=self._options)
round = 0
while True:
round += 1
nego_out_token = None
if auth_request.nego_tokens:
nego_out_token = self._auth_context.step(auth_request.nego_tokens[0].nego_token)
if auth_request.pub_key_auth:
break
auth_response = TSRequest(_CREDSSP_VERSION, nego_tokens=NegoData(nego_out_token))
auth_request = yield from self._yield_ts_request(auth_response, "Authentication %d" % round)
except SpnegoError as e:
log.warning("Received CredSSP error when accepting credentials: %s", e)
if version in [3, 4] or version >= 6:
error_token = TSRequest(_CREDSSP_VERSION, error_code=e.nt_status)
yield from self._yield_ts_request(error_token, "Authentication failure")
return
actual_key = self._auth_context.unwrap(auth_request.pub_key_auth).data
public_key = self._tls_context.public_key
expected_key = _get_pub_key_auth(public_key, 'initiate', nonce=auth_request.client_nonce)
if actual_key != expected_key:
raise BadBindingsError(context_msg="Public key verification failed, potential man in the middle attack")
nego_token = NegoData(nego_out_token) if nego_out_token else None
server_key = self._auth_context.wrap(_get_pub_key_auth(public_key, 'accept',
nonce=auth_request.client_nonce)).data
pub_key_response = TSRequest(_CREDSSP_VERSION, nego_tokens=nego_token, pub_key_auth=server_key)
auth_request = yield from self._yield_ts_request(pub_key_response, "Public key exchange")
if not auth_request.auth_info:
raise InvalidTokenError(context_msg="No credential received on CredSSP TSRequest from initiator")
credential = TSCredentials.unpack(self._auth_context.unwrap(auth_request.auth_info).data)
self._client_credential = credential.credentials
self._complete = True
|
The acceptor authentication steps of CredSSP.
|
https://github.com/jborean93/pyspnego/blob/6a9eea4ab1a039bf57dc2761564ef995251a142e/src/spnego/_credssp.py#L371-L430
|
import base64
import hashlib
import logging
import os
import re
import shutil
import ssl
import struct
import tempfile
import typing
import spnego
from spnego._context import (
ContextProxy,
ContextReq,
UnwrapResult,
WinRMWrapResult,
WrapResult,
split_username,
)
from spnego._credssp_structures import (
NegoData,
TSCredentials,
TSPasswordCreds,
TSRequest,
)
from spnego._text import to_text
from spnego.channel_bindings import GssChannelBindings
from spnego.exceptions import (
BadBindingsError,
ErrorCode,
FeatureMissingError,
InvalidTokenError,
NativeError,
NegotiateOptions,
OperationNotAvailableError,
SpnegoError,
)
from spnego.tls import (
CredSSPTLSContext,
default_tls_context,
generate_tls_certificate,
get_certificate_public_key,
)
log = logging.getLogger(__name__)
_CREDSSP_VERSION = 6
_X509_CERTIFICATE: typing.Optional[typing.Tuple[bytes, bytes, bytes]] = None
def _create_tls_context(
usage: str,
) -> CredSSPTLSContext:
log.debug("Creating TLS context")
ctx = default_tls_context()
if usage == "accept":
global _X509_CERTIFICATE
if not _X509_CERTIFICATE:
_X509_CERTIFICATE = generate_tls_certificate()
cert_pem, key_pem, public_key = _X509_CERTIFICATE
temp_dir = tempfile.mkdtemp()
try:
cert_path = os.path.join(temp_dir, 'ca.pem')
with open(cert_path, mode='wb') as fd:
fd.write(cert_pem)
fd.write(key_pem)
ctx.context.load_cert_chain(cert_path)
ctx.public_key = public_key
finally:
shutil.rmtree(temp_dir)
return ctx
def _get_pub_key_auth(
pub_key: bytes,
usage: str,
nonce: typing.Optional[bytes] = None,
) -> bytes:
if nonce:
direction = b'Client-To-Server' if usage == 'initiate' else b'Server-To-Client'
hash_input = (b'CredSSP %s Binding Hash\x00' % direction) + nonce + pub_key
key_auth = hashlib.sha256(hash_input).digest()
elif usage == 'accept':
first_byte = struct.unpack("B", pub_key[0:1])[0]
key_auth = struct.pack("B", first_byte + 1) + pub_key[1:]
else:
key_auth = pub_key
return key_auth
def _tls_trailer_length(
data_length: int,
protocol: str,
cipher_suite: str,
) -> int:
if protocol == 'TLSv1.3':
trailer_length = 17
elif re.match(r'^.*[-_]GCM[-_][\w\d]*$', cipher_suite):
trailer_length = 16
else:
hash_algorithm = cipher_suite.split('-')[-1]
hash_length = {
'MD5': 16,
'SHA': 20,
'SHA256': 32,
'SHA384': 48
}.get(hash_algorithm, 0)
pre_pad_length = data_length + hash_length
if "RC4" in cipher_suite:
padding_length = 0
elif "DES" in cipher_suite or "3DES" in cipher_suite:
padding_length = 8 - (pre_pad_length % 8)
else:
padding_length = 16 - (pre_pad_length % 16)
trailer_length = (pre_pad_length + padding_length) - data_length
return trailer_length
def _wrap_ssl_error(context):
def decorator(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except ssl.SSLError as e:
raise SpnegoError(error_code=ErrorCode.failure, context_msg="%s: %s" % (context, e)) from e
return wrapped
return decorator
class CredSSPProxy(ContextProxy):
def __init__(
self,
username: str,
password: str,
hostname: typing.Optional[str] = None,
service: typing.Optional[str] = None,
channel_bindings: typing.Optional[GssChannelBindings] = None,
context_req: ContextReq = ContextReq.default,
usage: str = 'initiate',
protocol: str = 'credssp',
options: NegotiateOptions = NegotiateOptions.none,
**kwargs: typing.Any,
) -> None:
super(CredSSPProxy, self).__init__(username, password, hostname, service, channel_bindings, context_req, usage,
protocol, options, False)
if options & NegotiateOptions.session_key:
raise FeatureMissingError(NegotiateOptions.session_key)
self._hostname = hostname
self._service = service
self._options = options & ~NegotiateOptions.wrapping_winrm
self._auth_context: typing.Optional[ContextProxy] = kwargs.get("credssp_negotiate_context", None)
self._client_credential = None
self._complete = False
self._step_gen = None
self._tls_context: CredSSPTLSContext
if "credssp_tls_context" in kwargs:
self._tls_context = kwargs["credssp_tls_context"]
if usage == "accept" and not self._tls_context.public_key:
raise OperationNotAvailableError(context_msg="Provided tls context does not have a public key set")
else:
self._tls_context = _create_tls_context(usage)
self._in_buff = ssl.MemoryBIO()
self._out_buff = ssl.MemoryBIO()
self._tls_object = self._tls_context.context.wrap_bio(self._in_buff, self._out_buff,
server_side=(usage == "accept"))
@classmethod
def available_protocols(cls, options=None):
return ['credssp']
@classmethod
def iov_available(cls):
return False
@property
def client_principal(self):
return self._auth_context.client_principal if self._auth_context else None
@property
def client_credential(self):
return self._client_credential
@property
def complete(self):
return self._complete
@property
def negotiated_protocol(self):
return self._auth_context.negotiated_protocol if self._auth_context else None
@property
def session_key(self):
raise OperationNotAvailableError(context_msg='CredSSP does not have a session key to share')
def step(self, in_token=None):
log.debug("CredSSP step input: %s", to_text(base64.b64encode(in_token or b"")))
if not self._step_gen:
self._step_gen = getattr(self, '_step_%s' % self.usage)(in_token)
in_token = None
out_token = None
try:
out_token = self._step_gen.send(in_token)
except StopIteration:
pass
log.debug("CredSSP step output: %s", to_text(base64.b64encode(out_token or b"")))
return out_token
def _step_initiate(
self,
in_token: typing.Optional[bytes],
) -> typing.Generator[bytes, bytes, None]:
yield from self._step_tls(in_token)
server_certificate = self._tls_object.getpeercert(True)
public_key = get_certificate_public_key(server_certificate)
log.debug("Starting CredSSP authentication phase")
if not self._auth_context:
self._auth_context = spnego.client(self.username, self.password, hostname=self._hostname,
service=self._service, protocol='negotiate', options=self._options)
round = 0
out_token = self._auth_context.step()
while True:
round += 1
auth_request = TSRequest(_CREDSSP_VERSION, nego_tokens=NegoData(out_token))
auth_response = yield from self._yield_ts_request(auth_request, "Authentication %d" % round)
out_token = self._auth_context.step(auth_response.nego_tokens[0].nego_token)
if self._auth_context.complete or b"NTLMSSP\x00\x03\x00\x00\x00" in out_token:
break
version = min(auth_response.version, _CREDSSP_VERSION)
log.debug("Negotiated CredSSP version: %d" % version)
pub_key_nego_token = NegoData(out_token) if out_token else None
nonce = os.urandom(32) if version > 4 else None
pub_value = _get_pub_key_auth(public_key, 'initiate', nonce=nonce)
pub_key_request = TSRequest(version=_CREDSSP_VERSION, nego_tokens=pub_key_nego_token, client_nonce=nonce,
pub_key_auth=self._auth_context.wrap(pub_value).data)
pub_key_response = yield from self._yield_ts_request(pub_key_request, "Public key exchange")
if not pub_key_response.pub_key_auth:
raise InvalidTokenError(context_msg="Acceptor did not response with pubKeyAuth info.")
if pub_key_response.nego_tokens:
self._auth_context.step(pub_key_response.nego_tokens[0].nego_token)
response_key = self._auth_context.unwrap(pub_key_response.pub_key_auth).data
expected_key = _get_pub_key_auth(public_key, 'accept', nonce=nonce)
if expected_key != response_key:
raise BadBindingsError(context_msg="Public key verification failed, potential man in the middle attack")
domain, username = split_username(self.username)
ts_password = TSPasswordCreds(domain or u'', username, self.password)
enc_credentials = self._auth_context.wrap(TSCredentials(ts_password).pack()).data
credential_request = TSRequest(_CREDSSP_VERSION, auth_info=enc_credentials)
self._complete = True
yield from self._yield_ts_request(credential_request, "Credential exchange")
|
MIT License
|
spatialaudio/jackclient-python
|
src/jack.py
|
Client.transport_reposition_struct
|
python
|
def transport_reposition_struct(self, position):
_check(_lib.jack_transport_reposition(self._ptr, position),
'Error re-positioning transport')
|
Request a new transport position.
May be called at any time by any client. The new position takes
effect in two process cycles. If there are slow-sync clients
and the transport is already rolling, it will enter the
`STARTING` state and begin invoking their sync callbacks
(see `set_sync_callback()`) until ready.
This function is realtime-safe.
Parameters
----------
position : jack_position_t
Requested new transport position. This is the same
structure as returned by `transport_query_struct()`.
See Also
--------
transport_query_struct, transport_locate
|
https://github.com/spatialaudio/jackclient-python/blob/769afb577fcd8ac052288496156ba4820a82a7eb/src/jack.py#L618-L640
|
__version__ = '0.5.3'
from ctypes.util import find_library as _find_library
import errno as _errno
import platform as _platform
import warnings as _warnings
from _jack import ffi as _ffi
if _platform.system() == 'Windows':
if _platform.architecture()[0] == '64bit':
_libname = _find_library('libjack64')
else:
_libname = _find_library('libjack')
else:
_libname = _find_library('jack')
if _libname is None:
raise OSError('JACK library not found')
_lib = _ffi.dlopen(_libname)
_AUDIO = b'32 bit float mono audio'
_MIDI = b'8 bit raw midi'
STOPPED = _lib.JackTransportStopped
ROLLING = _lib.JackTransportRolling
STARTING = _lib.JackTransportStarting
NETSTARTING = _lib.JackTransportNetStarting
PROPERTY_CREATED = _lib.PropertyCreated
PROPERTY_CHANGED = _lib.PropertyChanged
PROPERTY_DELETED = _lib.PropertyDeleted
POSITION_BBT = _lib.JackPositionBBT
POSITION_TIMECODE = _lib.JackPositionTimecode
POSITION_BBT_FRAME_OFFSET = _lib.JackBBTFrameOffset
POSITION_AUDIO_VIDEO_RATIO = _lib.JackAudioVideoRatio
POSITION_VIDEO_FRAME_OFFSET = _lib.JackVideoFrameOffset
_SUCCESS = 0
_FAILURE = 1
def _decode(cdata):
return _ffi.string(cdata).decode()
for name in dir(_lib):
if name.startswith('JACK_METADATA_'):
try:
globals()[name[5:]] = _decode(getattr(_lib, name))
except _ffi.error:
pass
else:
del name
class JackError(Exception):
class JackErrorCode(JackError):
def __init__(self, message, code):
self.message = message
self.code = code
def __str__(self):
return '{} ({})'.format(self.message, self.code)
class JackOpenError(JackError):
def __init__(self, name, status):
self.name = name
self.status = status
def __str__(self):
return 'Error initializing "{}": {}'.format(self.name, self.status)
class Client(object):
def __init__(self, name, use_exact_name=False, no_start_server=False,
servername=None, session_id=None):
status = _ffi.new('jack_status_t*')
options = _lib.JackNullOption
optargs = []
if use_exact_name:
options |= _lib.JackUseExactName
if no_start_server:
options |= _lib.JackNoStartServer
if servername:
options |= _lib.JackServerName
optargs.append(_ffi.new('char[]', servername.encode()))
if session_id:
options |= _lib.JackSessionID
optargs.append(_ffi.new('char[]', session_id.encode()))
self._ptr = _lib.jack_client_open(name.encode(), options, status,
*optargs)
self._status = Status(status[0])
if not self._ptr:
raise JackOpenError(name, self._status)
self._inports = Ports(self, _AUDIO, _lib.JackPortIsInput)
self._outports = Ports(self, _AUDIO, _lib.JackPortIsOutput)
self._midi_inports = Ports(self, _MIDI, _lib.JackPortIsInput)
self._midi_outports = Ports(self, _MIDI, _lib.JackPortIsOutput)
self._keepalive = []
self._position = _ffi.new('jack_position_t*')
_ptr = _ffi.NULL
def __enter__(self):
self.activate()
return self
def __exit__(self, *args):
self.deactivate()
self.close()
def __del__(self):
self.close()
@property
def name(self):
return _decode(_lib.jack_get_client_name(self._ptr))
@property
def uuid(self):
uuid = _ffi.gc(_lib.jack_client_get_uuid(self._ptr), _lib.jack_free)
if not uuid:
raise JackError('Unable to get UUID')
return _decode(uuid)
@property
def samplerate(self):
return _lib.jack_get_sample_rate(self._ptr)
@property
def blocksize(self):
return _lib.jack_get_buffer_size(self._ptr)
@blocksize.setter
def blocksize(self, blocksize):
_check(_lib.jack_set_buffer_size(self._ptr, blocksize),
'Error setting JACK blocksize')
@property
def status(self):
return self._status
@property
def realtime(self):
return bool(_lib.jack_is_realtime(self._ptr))
@property
def frames_since_cycle_start(self):
return _lib.jack_frames_since_cycle_start(self._ptr)
@property
def frame_time(self):
return _lib.jack_frame_time(self._ptr)
@property
def last_frame_time(self):
return _lib.jack_last_frame_time(self._ptr)
@property
def inports(self):
return self._inports
@property
def outports(self):
return self._outports
@property
def midi_inports(self):
return self._midi_inports
@property
def midi_outports(self):
return self._midi_outports
def owns(self, port):
port = self._get_port_ptr(port)
return bool(_lib.jack_port_is_mine(self._ptr, port))
def activate(self):
_check(_lib.jack_activate(self._ptr), 'Error activating JACK client')
def deactivate(self, ignore_errors=True):
err = _lib.jack_deactivate(self._ptr)
if not ignore_errors:
_check(err, 'Error deactivating JACK client')
def cpu_load(self):
return _lib.jack_cpu_load(self._ptr)
def close(self, ignore_errors=True):
if self._ptr:
err = _lib.jack_client_close(self._ptr)
self._ptr = _ffi.NULL
if not ignore_errors:
_check(err, 'Error closing JACK client')
def connect(self, source, destination):
if isinstance(source, Port):
source = source.name
if isinstance(destination, Port):
destination = destination.name
err = _lib.jack_connect(self._ptr, source.encode(),
destination.encode())
if err == _errno.EEXIST:
raise JackErrorCode('Connection {0!r} -> {1!r} '
'already exists'.format(source, destination),
err)
_check(err,
'Error connecting {0!r} -> {1!r}'.format(source, destination))
def disconnect(self, source, destination):
if isinstance(source, Port):
source = source.name
if isinstance(destination, Port):
destination = destination.name
_check(_lib.jack_disconnect(
self._ptr, source.encode(), destination.encode()),
"Couldn't disconnect {0!r} -> {1!r}".format(source, destination))
def transport_start(self):
_lib.jack_transport_start(self._ptr)
def transport_stop(self):
_lib.jack_transport_stop(self._ptr)
@property
def transport_state(self):
return TransportState(_lib.jack_transport_query(self._ptr, _ffi.NULL))
@property
def transport_frame(self):
return _lib.jack_get_current_transport_frame(self._ptr)
@transport_frame.setter
def transport_frame(self, frame):
_check(_lib.jack_transport_locate(self._ptr, frame),
'Error locating JACK transport')
def transport_locate(self, frame):
_warnings.warn(
'transport_locate() is deprecated, use transport_frame',
DeprecationWarning)
self.transport_frame = frame
def transport_query(self):
state, pos = self.transport_query_struct()
return TransportState(state), position2dict(pos)
def transport_query_struct(self):
state = _lib.jack_transport_query(self._ptr, self._position)
return state, self._position
|
MIT License
|
enviraldesign/geopix
|
GP/Assets/code/modules/hashUtils.py
|
generate_hash_from_dict
|
python
|
def generate_hash_from_dict(d, blacklist=None, whitelist=None,
raw=False):
if not isinstance(d, dict):
raise TypeError('Please provide a dictionary.')
if blacklist is not None:
validate_blackwhitelist(d, blacklist)
if whitelist is not None:
validate_blackwhitelist(d, whitelist)
raw_string = _generate_string_from_dict(d, blacklist, whitelist, prefix='d')
if raw:
return raw_string
else:
return hashlib.md5(raw_string.encode('utf-8')).hexdigest()
|
Generate an md5 hash from a (nested) dictionary.
Takes care of extracting nested dictionaries, iterables and
avoids rounding errors of floats. Makes sure keys are read in a
unique order. A blacklist of keys can be passed, that can contain
keys which should be excluded from the hash. If a whitelist is
given, only keys appearing in the whitelist are used to generate
the hash. All strings are converted to unicode, i.e., the hash
does not distinguish between strings provided in ascii or unicode
format. Lists, np.ndarrays and tuples are treated equally, i.e., an
array-like item [1,2,3], np.array([1,2,3]) or (1,2,3) will lead
to the same hash if they are of the same type.
Parameters
----------
d : dict
Dictionary to compute the hash from.
blacklist : list, optional
List of keys which *are not* used for generating the hash.
Keys of subdirectories can be provided by specifying
the full path of keys in a tuple.
If None, no keys will be ignored.
whitelist : list, optional
List of keys which *are* used for generating the hash.
Keys of subdirectories can be provided by specifying
the full path of keys in a tuple.
If None, all keys will be used.
Blacklist overrules whitelist, i.e., keys appearing in the
blacklist will definitely not be used.
raw : bool, optional
if True, return the unhashed string.
Returns
-------
: string
The hash generated from the dictionary, or the unhashed string if
raw is True.
Example
-------
>>> from_ _dicthash import generate_hash_from_dict
>>> d = {'a': 'asd', 'b': 0.12, 3: {'c': [3, 4, 5]}}
>>> generate_hash_from_dict(d)
'd748bbf148db514911ed0bf215729d01'
|
https://github.com/enviraldesign/geopix/blob/9d20fb111cf774051f5ab5785cc3eb23f2232bcd/GP/Assets/code/modules/hashUtils.py#L135-L195
|
import hashlib
import warnings
FLOAT_FACTOR = 1e15
FLOOR_SMALL_FLOATS = False
warnings.simplefilter('default', category=UserWarning)
try:
basestring
except NameError:
basestring = str
def _save_convert_float_to_int(x):
if abs(x) > 0. and abs(x) < 1. / FLOAT_FACTOR:
if not FLOOR_SMALL_FLOATS:
raise ValueError('Float too small for safe conversion to '
'integer.')
else:
x = 0.
warnings.warn('Float too small for safe conversion to'
'integer. Rounding down to zero.', UserWarning)
return int(x * FLOAT_FACTOR)
def _unpack_value(value, prefix='', whitelist=None, blacklist=None):
try:
return _generate_string_from_dict(value,
blacklist=blacklist,
whitelist=whitelist,
prefix=prefix + 'd')
except AttributeError:
try:
return prefix + _generate_string_from_iterable(value, prefix='i')
except TypeError:
if isinstance(value, float):
return prefix + str(_save_convert_float_to_int(value))
else:
return prefix + str(value)
def _generate_string_from_iterable(l, prefix=''):
if isinstance(l, basestring):
return ''.join((prefix, str(l)))
else:
return ''.join(_unpack_value(value, prefix='') for value in l)
def _generate_string_from_dict(d, blacklist, whitelist, prefix=''):
if whitelist is None:
whitelist = list(d.keys())
if blacklist is not None:
whitelist = set(whitelist).difference(blacklist)
if len(whitelist) > 0:
return ''.join(_unpack_value(d[key],
whitelist=filter_blackwhitelist(whitelist, key),
blacklist=filter_blackwhitelist(blacklist, key),
prefix=prefix + str(key)) for
key in sorted(filter_blackwhitelist(whitelist, None), key=str))
else:
return ''
|
MIT License
|
crespo-otero-group/fromage
|
fromage/utils/mol/_cell_operations.py
|
trans_from_rad
|
python
|
def trans_from_rad(self, clust_rad):
a_perp = np.cross(self.vectors[1], self.vectors[2])
b_perp = np.cross(self.vectors[2], self.vectors[0])
c_perp = np.cross(self.vectors[0], self.vectors[1])
perp = np.array([a_perp / np.linalg.norm(a_perp), b_perp /
np.linalg.norm(b_perp), c_perp / np.linalg.norm(c_perp)])
trans_count = np.array([0, 0, 0])
supercell_vectors = np.zeros((3,3))
distances = np.array([0.0, 0.0, 0.0])
for comp in range(3):
while distances[comp] <= clust_rad:
trans_count[comp] += 1
supercell_vectors[comp] = trans_count[comp] * self.vectors[comp]
distances[comp] = np.dot(supercell_vectors[comp], perp[comp])
return trans_count
|
Generate the translations necessary to encapsulate a sphere of given rad
Parameters
----------
clust_rad : float
Radius defining a sphere
Returns
-------
trans_count : 3 x 1 numpy array
The translations required for the unit cell to contain the sphere
|
https://github.com/crespo-otero-group/fromage/blob/9b4a80698ed1672268dde292d5512c72a23cb00a/fromage/utils/mol/_cell_operations.py#L153-L193
|
from copy import deepcopy
import numpy as np
def complete_mol(self, labels):
new_mol, scattered_mol = self.per_select(labels, old_pos=True)
new_cell_atoms = deepcopy(
[a for a in self.atoms if a not in scattered_mol])
new_cell = self.copy()
new_cell.atoms = new_cell_atoms
for atom in new_mol:
new_cell.append(atom.copy())
return new_mol, new_cell
def complete_cell(self):
full_mol_l = []
remaining = self.copy()
while len(remaining) != 0:
full_mol, cell = remaining.complete_mol(0)
full_mol_l.append(full_mol)
remaining = cell
for atom in full_mol:
if atom in remaining:
remaining.remove(atom)
out_cell = remaining
for mol in full_mol_l:
out_cell.extend(mol)
return out_cell, full_mol_l
def supercell(self, trans):
import fromage.utils.mol as mol_init
trans = np.array(trans)
new_cell = self.empty_mol()
for a_mult in range(trans[0]):
for b_mult in range(trans[1]):
for c_mult in range(trans[2]):
vector = a_mult * self.vectors[0] + b_mult * self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def centered_supercell(self, trans, from_origin=False):
import fromage.utils.mol as mol_init
trans_series = [0, 0, 0]
for i, tra in enumerate(trans):
if from_origin:
trans_series[i] = list(range(-tra, tra))
else:
trans_series[i] = list(range(-tra, tra + 1))
trans_series = np.array(trans_series)
new_cell = self.empty_mol()
for a_mult in trans_series[0]:
for b_mult in trans_series[1]:
for c_mult in trans_series[2]:
vector = a_mult * self.vectors[0] + b_mult * self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
|
MIT License
|
square/connect-python-sdk
|
squareconnect/models/order_fulfillment_pickup_details.py
|
OrderFulfillmentPickupDetails.picked_up_at
|
python
|
def picked_up_at(self, picked_up_at):
self._picked_up_at = picked_up_at
|
Sets the picked_up_at of this OrderFulfillmentPickupDetails.
The [timestamp](#workingwithdates) in RFC3339 timestamp format, e.g., \"2016-09-04T23:59:33.123Z\", indicating when the fulfillment was picked up by the recipient.
:param picked_up_at: The picked_up_at of this OrderFulfillmentPickupDetails.
:type: str
|
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/order_fulfillment_pickup_details.py#L410-L419
|
from pprint import pformat
from six import iteritems
import re
class OrderFulfillmentPickupDetails(object):
def __init__(self, recipient=None, expires_at=None, auto_complete_duration=None, schedule_type=None, pickup_at=None, pickup_window_duration=None, prep_time_duration=None, note=None, placed_at=None, accepted_at=None, rejected_at=None, ready_at=None, expired_at=None, picked_up_at=None, canceled_at=None, cancel_reason=None):
self.swagger_types = {
'recipient': 'OrderFulfillmentRecipient',
'expires_at': 'str',
'auto_complete_duration': 'str',
'schedule_type': 'str',
'pickup_at': 'str',
'pickup_window_duration': 'str',
'prep_time_duration': 'str',
'note': 'str',
'placed_at': 'str',
'accepted_at': 'str',
'rejected_at': 'str',
'ready_at': 'str',
'expired_at': 'str',
'picked_up_at': 'str',
'canceled_at': 'str',
'cancel_reason': 'str'
}
self.attribute_map = {
'recipient': 'recipient',
'expires_at': 'expires_at',
'auto_complete_duration': 'auto_complete_duration',
'schedule_type': 'schedule_type',
'pickup_at': 'pickup_at',
'pickup_window_duration': 'pickup_window_duration',
'prep_time_duration': 'prep_time_duration',
'note': 'note',
'placed_at': 'placed_at',
'accepted_at': 'accepted_at',
'rejected_at': 'rejected_at',
'ready_at': 'ready_at',
'expired_at': 'expired_at',
'picked_up_at': 'picked_up_at',
'canceled_at': 'canceled_at',
'cancel_reason': 'cancel_reason'
}
self._recipient = recipient
self._expires_at = expires_at
self._auto_complete_duration = auto_complete_duration
self._schedule_type = schedule_type
self._pickup_at = pickup_at
self._pickup_window_duration = pickup_window_duration
self._prep_time_duration = prep_time_duration
self._note = note
self._placed_at = placed_at
self._accepted_at = accepted_at
self._rejected_at = rejected_at
self._ready_at = ready_at
self._expired_at = expired_at
self._picked_up_at = picked_up_at
self._canceled_at = canceled_at
self._cancel_reason = cancel_reason
@property
def recipient(self):
return self._recipient
@recipient.setter
def recipient(self, recipient):
self._recipient = recipient
@property
def expires_at(self):
return self._expires_at
@expires_at.setter
def expires_at(self, expires_at):
self._expires_at = expires_at
@property
def auto_complete_duration(self):
return self._auto_complete_duration
@auto_complete_duration.setter
def auto_complete_duration(self, auto_complete_duration):
self._auto_complete_duration = auto_complete_duration
@property
def schedule_type(self):
return self._schedule_type
@schedule_type.setter
def schedule_type(self, schedule_type):
self._schedule_type = schedule_type
@property
def pickup_at(self):
return self._pickup_at
@pickup_at.setter
def pickup_at(self, pickup_at):
self._pickup_at = pickup_at
@property
def pickup_window_duration(self):
return self._pickup_window_duration
@pickup_window_duration.setter
def pickup_window_duration(self, pickup_window_duration):
self._pickup_window_duration = pickup_window_duration
@property
def prep_time_duration(self):
return self._prep_time_duration
@prep_time_duration.setter
def prep_time_duration(self, prep_time_duration):
self._prep_time_duration = prep_time_duration
@property
def note(self):
return self._note
@note.setter
def note(self, note):
if note is None:
raise ValueError("Invalid value for `note`, must not be `None`")
if len(note) > 500:
raise ValueError("Invalid value for `note`, length must be less than `500`")
self._note = note
@property
def placed_at(self):
return self._placed_at
@placed_at.setter
def placed_at(self, placed_at):
self._placed_at = placed_at
@property
def accepted_at(self):
return self._accepted_at
@accepted_at.setter
def accepted_at(self, accepted_at):
self._accepted_at = accepted_at
@property
def rejected_at(self):
return self._rejected_at
@rejected_at.setter
def rejected_at(self, rejected_at):
self._rejected_at = rejected_at
@property
def ready_at(self):
return self._ready_at
@ready_at.setter
def ready_at(self, ready_at):
self._ready_at = ready_at
@property
def expired_at(self):
return self._expired_at
@expired_at.setter
def expired_at(self, expired_at):
self._expired_at = expired_at
@property
def picked_up_at(self):
return self._picked_up_at
@picked_up_at.setter
|
Apache License 2.0
|
flexget/flexget
|
flexget/plugins/input/gazelle.py
|
InputGazelle.request
|
python
|
def request(self, no_login=False, **params):
if 'action' not in params:
raise ValueError("An 'action' is required when making a request")
ajaxpage = "{}/ajax.php".format(self.base_url)
r = self._session.get(ajaxpage, params=params, allow_redirects=False, raise_status=True)
if not no_login and r.is_redirect and r.next.url == "{}/login.php".format(self.base_url):
logger.warning("Redirected to login page, reauthenticating and trying again")
self.authenticate(force=True)
return self.request(no_login=True, **params)
if r.status_code != 200:
raise PluginError("{} returned a non-200 status code".format(self.base_url))
try:
json_response = r.json()
if json_response['status'] != "success":
error = json_response.get('error', json_response.get('status'))
if not error or error == "failure":
error = json_response.get('response', str(json_response))
raise PluginError(
"{} gave a failure response of '{}'".format(self.base_url, error)
)
return json_response['response']
except (ValueError, TypeError, KeyError):
raise PluginError("{} returned an invalid response".format(self.base_url))
|
Make an AJAX request to the API
If `no_login` is True, logging in will not be attempted if the request
is redirected to the login page.
Adapted from https://github.com/isaaczafuta/whatapi
|
https://github.com/flexget/flexget/blob/e625eb09324a9d6be4cfb42601c6af4628b2226a/flexget/plugins/input/gazelle.py#L217-L252
|
from datetime import datetime
from loguru import logger
from sqlalchemy import Column, DateTime, String, Unicode
from flexget import db_schema, plugin
from flexget.components.sites.utils import normalize_unicode
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.manager import Session
from flexget.plugin import PluginError
from flexget.utils.database import json_synonym
from flexget.utils.requests import TokenBucketLimiter
from flexget.utils.tools import parse_filesize
DETECT_2FA = "Authenticator Code", "TOTP code"
logger = logger.bind(name='gazelle')
Base = db_schema.versioned_base('gazelle_session', 0)
class GazelleSession(Base):
__tablename__ = 'gazelle_session'
username = Column(Unicode, primary_key=True)
base_url = Column(String, primary_key=True)
authkey = Column(String)
passkey = Column(String)
_cookies = Column('cookie', Unicode)
cookies = json_synonym('_cookies')
expires = Column(DateTime)
class InputGazelle:
def __init__(self):
self.base_url = None
self.aliases = {"search": "searchstr"}
self.params = {"searchstr": None}
@property
def schema(self):
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'max_pages': {'type': 'integer'},
'search': {'type': 'string'},
},
'required': ['username', 'password'],
'additionalProperties': False,
}
if not self.base_url:
schema['required'].append('base_url')
return schema
def _key(self, key):
if key in self.aliases:
return self.aliases[key]
return key
def _opts(self, key):
return self.params[self._key(key)]
def _getval(self, key, val):
opts = self._opts(key)
if isinstance(opts, dict):
return opts[str(val)]
elif isinstance(val, list):
return ",".join(val)
return val
def params_from_config(self, config):
ret = {}
for k, v in config.items():
key = self._key(k)
if key in self.params:
ret[key] = self._getval(k, v)
return ret
def setup(self, task, config):
self._session = task.requests
base_url = config.get('base_url', "").rstrip("/")
if base_url:
if self.base_url and self.base_url != base_url:
logger.warning(
'Using plugin designed for {} on {} - things may break',
self.base_url,
base_url,
)
self.base_url = base_url
if not self.base_url:
raise PluginError("No 'base_url' configured")
self.max_pages = config.get('max_pages', 5)
self._session.add_domain_limiter(TokenBucketLimiter(self.base_url, 2, '2 seconds'))
self.username = config['username']
self.password = config['password']
self.authenticate()
task.no_entries_ok = True
def resume_session(self):
logger.debug("Attempting to find an existing session in the DB")
with Session() as session:
db_session = (
session.query(GazelleSession)
.filter(
GazelleSession.base_url == self.base_url,
GazelleSession.username == self.username,
)
.one_or_none()
)
if db_session and db_session.expires and db_session.expires >= datetime.utcnow():
self._session.cookies.update(db_session.cookies)
self.authkey = db_session.authkey
self.passkey = db_session.passkey
return True
return False
def save_current_session(self):
logger.debug("Storing session info in the DB")
with Session() as session:
expires = None
for c in self._session.cookies:
if c.name == "session":
expires = datetime.utcfromtimestamp(c.expires)
db_session = GazelleSession(
username=self.username,
base_url=self.base_url,
cookies=dict(self._session.cookies),
expires=expires,
authkey=self.authkey,
passkey=self.passkey,
)
session.merge(db_session)
def authenticate(self, force=False):
self._session.cookies.clear()
if not force and self.resume_session():
logger.info('Logged into {} using cached session', self.base_url)
return
url = "{}/login.php".format(self.base_url)
data = {'username': self.username, 'password': self.password, 'keeplogged': 1}
r = self._session.post(url, data=data, allow_redirects=False, raise_status=True)
if not r.is_redirect or r.next.url != "{}/index.php".format(self.base_url):
msg = "Failed to log into {}".format(self.base_url)
for otp_text in DETECT_2FA:
if otp_text in r.text:
msg += " - Accounts using 2FA are currently not supported"
break
raise PluginError(msg)
account_info = self.request(no_login=True, action='index')
self.authkey = account_info['authkey']
self.passkey = account_info['passkey']
logger.info('Logged in to {}', self.base_url)
self.save_current_session()
|
MIT License
|
ebranca/owasp-pysec
|
pysec/clock.py
|
timeout
|
python
|
def timeout(timeout, default=RAISE_TIMEOUT):
timeout = int(timeout)
def raise_timeout(signalnum, frame):
raise Timeout()
def _timeout(func):
def __timeout(*args, **kwds):
old_handler = signal.signal(signal.SIGALRM, raise_timeout)
signal.alarm(timeout)
try:
result = func(*args, **kwds)
except Timeout:
if default is RAISE_TIMEOUT:
raise
return default
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return result
return __timeout
return _timeout
|
Decorator for functions, if the returned function doesn't terminate
after *timeout* seconds and *default* isn't defined a Timeout exception
will be raised, otherwise return *default* value.
If it terminate before *timeout* seconds its result will be returned.
|
https://github.com/ebranca/owasp-pysec/blob/163e10a146db04f40648979e8d7c0c10e7737781/pysec/clock.py#L33-L56
|
import signal
from pysec.core import Object, Error
class Timeout(Error):
pass
RAISE_TIMEOUT = Object()
|
Apache License 2.0
|
aws/sagemaker-inference-toolkit
|
src/sagemaker_inference/errors.py
|
GenericInferenceToolkitError.__init__
|
python
|
def __init__(self, status_code, message=None, phrase=None):
message = message or "Invalid Request"
phrase = phrase or message
super(GenericInferenceToolkitError, self).__init__(status_code, message, phrase)
|
Initializes an instance of GenericInferenceToolkitError.
Args:
status_code (int): HTTP Error Status Code to send to client
message (str): Response message to send to client
phrase (str): Response body to send to client
|
https://github.com/aws/sagemaker-inference-toolkit/blob/7fcb80561c0d87cf035e194a86f9fa5c6c9d9b33/src/sagemaker_inference/errors.py#L61-L71
|
from __future__ import absolute_import
import textwrap
class UnsupportedFormatError(Exception):
def __init__(self, content_type, **kwargs):
self._message = textwrap.dedent(
"""Content type %s is not supported by this framework.
Please implement input_fn to to deserialize the request data or an output_fn to
serialize the response. For more information, see the SageMaker Python SDK README."""
% content_type
)
super(UnsupportedFormatError, self).__init__(self._message, **kwargs)
class BaseInferenceToolkitError(Exception):
def __init__(self, status_code, message, phrase):
self.status_code = status_code
self.message = message
self.phrase = phrase
super(BaseInferenceToolkitError, self).__init__(status_code, message, phrase)
class GenericInferenceToolkitError(BaseInferenceToolkitError):
|
Apache License 2.0
|
jlezama/disentangling-jacobian
|
conditional_image_manipulation/src/evaluation.py
|
Evaluator.step
|
python
|
def step(self, n_iter):
if 1:
mean_loss = [
('Attr', 'attr_pred_costs', float(self.params.lambda_ttributes)),
('Rec', 'rec_costs', float(self.params.lambda_ae)),
('Xcov', 'xcov_costs', float(self.params.lambda_xcov)),
('flip', 'flipped_labels_prediction_costs', float(self.params.lambda_flipped)),
('lat', 'latent_code_match_costs', float(self.params.lambda_latent_match)),
('jac', 'jacobian_costs', float(self.params.lambda_jacobian)),
('y', 'y_costs', float(self.params.lambda_y))
]
logger.info(('EVAL>> %06i - ' % n_iter) +
'/ '.join(['%s : %2.3e (x%1.1e)' % (a, np.mean(self.stats[b]), c)
for a, b,c in mean_loss if len(self.stats[b]) > 0]))
self.params.n_total_iter += 1
|
End training iteration / print training statistics.
|
https://github.com/jlezama/disentangling-jacobian/blob/c570945055c735a15b9adba093b7c688c7310aad/conditional_image_manipulation/src/evaluation.py#L333-L354
|
import os
import numpy as np
import torch
from torch.autograd import Variable
from torch.nn import functional as F
from logging import getLogger
from .utils import get_optimizer, clip_grad_norm, get_lambda, reload_model, reload_model2
from .model import get_attr_loss, flip_attributes
from print_images import print_grid
logger = getLogger()
def compute_xcov(z,y,bs):
z = z.view(bs,-1)
y = y.view(bs,-1)
z = z - torch.mean(z, dim=0)
y = y - torch.mean(y, dim=0)
cov_matrix = torch.matmul(torch.t(z),y)
cov_loss = torch.norm(cov_matrix.view(1,-1))/bs
return cov_loss
class Evaluator(object):
def __init__(self, ae, ae_teacher, data, params):
self.data = data
self.params = params
self.ae = ae
self.ae_teacher = ae_teacher
self.ae_optimizer_enc = get_optimizer(ae.enc_layers, params.ae_optimizer)
self.ae_optimizer_dec = get_optimizer(ae.dec_layers, params.ae_optimizer)
logger.info(ae)
logger.info('%i parameters in the autoencoder. '
% sum([p.nelement() for p in ae.parameters()]))
if params.ae_reload:
print '<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<', params.max_fm
if int(params.max_fm) >=1064:
print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
print 'using reload_model2'
reload_model2(ae, params.ae_reload,
['img_sz', 'img_fm', 'init_fm', 'n_layers', 'n_skip', 'attr', 'n_attr'])
else:
reload_model(ae, params.ae_reload,
['img_sz', 'img_fm', 'init_fm', 'n_layers', 'n_skip', 'attr', 'n_attr'])
reload_model(ae_teacher, params.ae_teacher_reload,
['img_sz', 'img_fm', 'init_fm', 'n_layers', 'n_skip', 'attr', 'n_attr'])
self.stats = {}
self.stats['rec_costs'] = []
self.stats['xcov_costs'] = []
self.stats['attr_pred_costs'] = []
self.stats['flipped_labels_prediction_costs'] = []
self.stats['latent_code_match_costs'] = []
self.stats['jacobian_costs'] = []
self.stats['y_costs'] = []
self.best_loss = 1e12
self.params.n_total_iter = 0
def autoencoder_step(self, iterno=-1, epoch=-1):
data = self.data
params = self.params
self.ae.eval()
bs = params.batch_size
batch_x, batch_y = data.train_batch(bs)
enc_outputs, dec_outputs, y_pred = self.ae(batch_x, batch_y)
z_latent_1 = enc_outputs[-1]
penalties = torch.clamp(1-y_pred*batch_y, min=0)
attr_cost = torch.sum(penalties)/bs
attr_loss = attr_cost * params.lambda_ttributes
if params.lambda_xcov >0:
xcov_cost = compute_xcov(z_latent_1, y_pred, bs)
else:
xcov_cost = Variable(torch.FloatTensor([0]))
xcov_loss = xcov_cost * params.lambda_xcov
ae_cost = ((batch_x - dec_outputs[-1]) ** 2).mean()
loss = params.lambda_ae * ae_cost
self.stats['rec_costs'].append(ae_cost.data[0])
self.stats['attr_pred_costs'].append(attr_cost.data[0])
self.stats['xcov_costs'].append(xcov_cost.data[0])
loss += attr_loss
if params.lambda_xcov > 0:
loss += xcov_loss
if params.lambda_flipped > 0 or params.lambda_latent_match >0 or params.lambda_jacobian > 0:
flipped = y_pred.clone()
flip_idx = torch.randperm(bs).cuda()
flipped = flipped[flip_idx,:]
dec_outputs_2 = self.ae.decode(enc_outputs, flipped)
else:
flipped_labels_prediction_cost = Variable(torch.FloatTensor([0]))
latent_code_match_cost = Variable(torch.FloatTensor([0]))
jacobian_cost = Variable(torch.FloatTensor([0]))
if params.lambda_jacobian >0:
self.ae_teacher.eval()
for param in self.ae_teacher.parameters():
param.requires_grad = False
enc_teacher_outputs, dec_teacher_outputs, y_teacher_pred = self.ae_teacher(batch_x, batch_y)
teacher_flipped = y_teacher_pred.clone()
teacher_flipped = teacher_flipped[flip_idx, :]
y_cost = ((y_pred - y_teacher_pred.detach()) **2).mean()
dec_teacher_outputs_2 = self.ae_teacher.decode(enc_teacher_outputs, teacher_flipped)
diff_ae = dec_outputs[-1] - dec_outputs_2[-1]
diff_ae_teacher = dec_teacher_outputs[-1].detach() - dec_teacher_outputs_2[-1].detach()
jacobian_cost = ((diff_ae - diff_ae_teacher)**2).mean()
if 0 and iterno==0 and ((epoch %10) ==0):
DATAROOT = '/data/tmp'
OUTDIR = '%s/%s' % (DATAROOT, params.outdir)
os.system('mkdir -p %s' % OUTDIR)
print_grid(dec_teacher_outputs[-1], os.path.join(OUTDIR, '%04i_dec_teacher_outputs.png' % epoch))
print_grid(dec_teacher_outputs_2[-1], os.path.join(OUTDIR, '%04i_dec_teacher_outputs_2.png' % epoch))
print_grid(dec_outputs[-1], os.path.join(OUTDIR, '%04i_dec_outputs.png' % epoch))
print_grid(dec_outputs_2[-1], os.path.join(OUTDIR, '%04i_dec_outputs_2.png' % epoch))
print_grid(diff_ae, os.path.join(OUTDIR, '%04i_diff_ae.png' % epoch))
print_grid(diff_ae_teacher, os.path.join(OUTDIR, '%04i_diff_ae_teacher.png' % epoch))
print 'saved images to ', OUTDIR
else:
jacobian_cost = Variable(torch.FloatTensor([0]))
y_cost = Variable(torch.FloatTensor([0]))
if params.lambda_flipped > 0 or params.lambda_latent_match >0:
enc_outputs_3 = self.ae.encode(dec_outputs_2[-1])
z_all_3 = enc_outputs_3[-1]
z_latent_3 = z_all_3[:,:-params.n_attr,:,:].contiguous()
y_pred_3 = z_all_3[:,-params.n_attr:,:,:]
y_pred_3 = torch.mean(y_pred_3.contiguous().view(bs, params.n_attr, -1), dim=2)
flipped_labels = torch.sign(flipped)
if 1:
penalties_3 = torch.clamp(1-y_pred_3*flipped_labels, min=0)
flipped_labels_prediction_cost = torch.sum(penalties_3)/bs
else:
flipped_labels_prediction_cost = torch.sqrt(((y_pred_3 - flipped_labels) ** 2).sum())/bs
latent_code_match_cost = torch.sqrt(((z_latent_1 - z_latent_3) ** 2).sum())/bs
else:
flipped_labels_prediction_cost = Variable(torch.FloatTensor([0]))
latent_code_match_cost = Variable(torch.FloatTensor([0]))
flipped_labels_prediction_loss = flipped_labels_prediction_cost * params.lambda_flipped
latent_code_match_loss = latent_code_match_cost * params.lambda_latent_match
jacobian_loss = jacobian_cost * params.lambda_jacobian
y_loss = y_cost * params.lambda_y
self.stats['flipped_labels_prediction_costs'].append(flipped_labels_prediction_cost.data[0])
self.stats['latent_code_match_costs'].append(latent_code_match_cost.data[0])
self.stats['jacobian_costs'].append(jacobian_cost.data[0])
self.stats['y_costs'].append(y_cost.data[0])
if (loss != loss).data.any():
logger.error("NaN detected")
exit()
if params.lambda_flipped > 0 or params.lambda_latent_match > 0:
loss = 0
loss += flipped_labels_prediction_loss
loss += latent_code_match_loss
loss += jacobian_loss
loss += y_loss
|
MIT License
|
demisto/demisto-sdk
|
demisto_sdk/commands/update_release_notes/update_rn_manager.py
|
UpdateReleaseNotesManager.get_existing_rn
|
python
|
def get_existing_rn(self, pack) -> Optional[str]:
if pack not in self.packs_existing_rn:
return ''
if self.update_type is None:
return self.packs_existing_rn[pack]
else:
return None
|
Gets the existing rn of the pack is exists.
:param
pack: The pack to check
:return
The existing rn version if exists, otherwise an empty string
None on error when pack has rn already and update type was given
|
https://github.com/demisto/demisto-sdk/blob/8d8767c2dfec77b67c35f4e1022e30ed2893e864/demisto_sdk/commands/update_release_notes/update_rn_manager.py#L233-L248
|
import os
from typing import Optional, Tuple
import git
from demisto_sdk.commands.common.constants import (
API_MODULES_PACK, SKIP_RELEASE_NOTES_FOR_TYPES)
from demisto_sdk.commands.common.tools import (LOG_COLORS,
filter_files_by_type,
filter_files_on_pack,
get_pack_name,
get_pack_names_from_files,
pack_name_to_path, print_color,
print_warning, suppress_stdout)
from demisto_sdk.commands.update_release_notes.update_rn import (
UpdateRN, update_api_modules_dependents_rn)
from demisto_sdk.commands.validate.validate_manager import ValidateManager
class UpdateReleaseNotesManager:
def __init__(self, user_input: Optional[str] = None, update_type: Optional[str] = None,
pre_release: bool = False, is_all: Optional[bool] = False, text: Optional[str] = None,
specific_version: Optional[str] = None, id_set_path: Optional[str] = None,
prev_ver: Optional[str] = None, is_force: bool = False, is_bc: bool = False):
self.given_pack = user_input
self.changed_packs_from_git: set = set()
self.update_type = update_type
self.pre_release: bool = False if pre_release is None else pre_release
self.is_all = True if not self.given_pack else is_all
self.text: str = '' if text is None else text
self.is_force = is_force
self.specific_version = specific_version
self.id_set_path = id_set_path
self.prev_ver = prev_ver
self.packs_existing_rn: dict = {}
self.total_updated_packs: set = set()
if self.given_pack and self.is_all:
raise ValueError('Please remove the -g flag when specifying only one pack.')
self.rn_path: list = list()
self.is_bc = is_bc
def manage_rn_update(self):
print_color('Starting to update release notes.', LOG_COLORS.NATIVE)
if self.given_pack and '/' in self.given_pack:
self.given_pack = get_pack_name(self.given_pack)
modified_files, added_files, old_format_files = self.get_git_changed_files()
self.changed_packs_from_git = get_pack_names_from_files(modified_files).union(
get_pack_names_from_files(added_files)).union(get_pack_names_from_files(old_format_files))
self.check_existing_rn(added_files)
self.handle_api_module_change(modified_files, added_files)
self.create_release_notes(modified_files, added_files, old_format_files)
if len(self.total_updated_packs) > 1:
print_color('\nSuccessfully updated the following packs:\n' + '\n'.join(self.total_updated_packs),
LOG_COLORS.GREEN)
def filter_to_relevant_files(self, file_set: set, validate_manager: ValidateManager) -> Tuple[set, set]:
filtered_set = set()
if self.given_pack:
for file in file_set:
if isinstance(file, tuple):
file_path = str(file[1])
else:
file_path = str(file)
file_pack_name = get_pack_name(file_path)
if not file_pack_name or file_pack_name not in self.given_pack:
continue
filtered_set.add(file)
return validate_manager.filter_to_relevant_files(filtered_set)
def filter_files_from_git(self, modified_files: set, added_files: set, renamed_files: set,
validate_manager: ValidateManager):
filtered_modified, old_format_files = self.filter_to_relevant_files(modified_files, validate_manager)
filtered_renamed, _ = self.filter_to_relevant_files(renamed_files, validate_manager)
filtered_modified = filtered_modified.union(filtered_renamed)
filtered_added, new_files_in_old_format = self.filter_to_relevant_files(added_files, validate_manager)
old_format_files = old_format_files.union(new_files_in_old_format)
return filtered_modified, filtered_added, old_format_files
def setup_validate_manager(self):
return ValidateManager(skip_pack_rn_validation=True, prev_ver=self.prev_ver,
silence_init_prints=True, skip_conf_json=True, check_is_unskipped=False,
file_path=self.given_pack)
def get_git_changed_files(self) -> Tuple[set, set, set]:
try:
validate_manager = self.setup_validate_manager()
if not validate_manager.git_util:
raise git.InvalidGitRepositoryError('unable to connect to git.')
validate_manager.setup_git_params()
if self.given_pack:
with suppress_stdout():
modified_files, added_files, renamed_files = validate_manager.get_unfiltered_changed_files_from_git()
return self.filter_files_from_git(modified_files, added_files, renamed_files, validate_manager)
modified_files, added_files, renamed_files = validate_manager.get_unfiltered_changed_files_from_git()
return self.filter_files_from_git(modified_files, added_files, renamed_files, validate_manager)
except (git.InvalidGitRepositoryError, git.NoSuchPathError, FileNotFoundError) as e:
raise FileNotFoundError(
"You are not running `demisto-sdk update-release-notes` command in the content repository.\n"
"Please run `cd content` from your terminal and run the command again") from e
def check_existing_rn(self, added_files: set):
for file_path in added_files:
if 'ReleaseNotes' in file_path:
self.packs_existing_rn[get_pack_name(file_path)] = file_path
def handle_api_module_change(self, modified_files: set, added_files: set):
api_module_was_given = self.given_pack and API_MODULES_PACK in self.given_pack
api_module_changed_in_git = self.changed_packs_from_git and API_MODULES_PACK in self.changed_packs_from_git
if api_module_was_given or (api_module_changed_in_git and self.is_all):
updated_packs = update_api_modules_dependents_rn(self.pre_release, self.update_type, added_files,
modified_files, self.id_set_path, self.text)
self.total_updated_packs = self.total_updated_packs.union(updated_packs)
def create_release_notes(self, modified_files: set, added_files: set, old_format_files: set):
filtered_modified_files = filter_files_by_type(modified_files, skip_file_types=SKIP_RELEASE_NOTES_FOR_TYPES)
filtered_added_files = filter_files_by_type(added_files, skip_file_types=SKIP_RELEASE_NOTES_FOR_TYPES)
if self.given_pack:
self.create_pack_release_notes(self.given_pack, filtered_modified_files, filtered_added_files,
old_format_files)
elif self.changed_packs_from_git:
for pack in self.changed_packs_from_git:
if 'APIModules' in pack:
continue
self.create_pack_release_notes(pack, filtered_modified_files, filtered_added_files, old_format_files)
else:
print_warning('No changes that require release notes were detected. If such changes were made, '
'please commit the changes and rerun the command.')
def create_pack_release_notes(self, pack: str, filtered_modified_files: set, filtered_added_files: set,
old_format_files: set):
existing_rn_version = self.get_existing_rn(pack)
if existing_rn_version is None:
raise RuntimeError(f"New release notes file already found for {pack}. "
f"Please update manually or run `demisto-sdk update-release-notes "
f"-i {pack}` without specifying the update_type.")
pack_modified = filter_files_on_pack(pack, filtered_modified_files)
pack_added = filter_files_on_pack(pack, filtered_added_files)
pack_old = filter_files_on_pack(pack, old_format_files)
if pack_modified or pack_added or pack_old or self.is_force:
pack_path = pack_name_to_path(pack)
update_pack_rn = UpdateRN(pack_path=pack_path, update_type=self.update_type,
modified_files_in_pack=pack_modified.union(pack_old),
pre_release=self.pre_release,
added_files=pack_added, specific_version=self.specific_version,
text=self.text, is_force=self.is_force,
existing_rn_version_path=existing_rn_version, is_bc=self.is_bc)
updated = update_pack_rn.execute_update()
self.rn_path.append(update_pack_rn.rn_path)
if updated:
self.total_updated_packs.add(pack)
if update_pack_rn.should_delete_existing_rn:
os.unlink(self.packs_existing_rn[pack])
else:
print_warning(f'Either no changes were found in {pack} pack '
f'or the changes found should not be documented in the release notes file.\n'
f'If relevant changes were made, please commit the changes and rerun the command.')
|
MIT License
|
magenta/magenta
|
magenta/models/score2perf/score2perf_hparams.py
|
t_rel_len2048_h384_att512_fs1024_n8_dropout10
|
python
|
def t_rel_len2048_h384_att512_fs1024_n8_dropout10():
hparams = transformer.transformer_base()
update_transformer_hparams_for_music(hparams)
update_truncate_length(hparams, 2048)
update_small(hparams)
hparams.self_attention_type = "dot_product_relative_v2"
hparams.num_hidden_layers = 8
return hparams
|
Hparams for LM with relative attention.
|
https://github.com/magenta/magenta/blob/be6558f1a06984faff6d6949234f5fe9ad0ffdb5/magenta/models/score2perf/score2perf_hparams.py#L161-L169
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.models import transformer
from tensor2tensor.utils import registry
def update_transformer_hparams_for_music(hparams):
hparams.shared_embedding_and_softmax_weights = False
hparams.symbol_modality_num_shards = 1
hparams.label_smoothing = 0.0
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.max_length = 0
hparams.batch_size = 2048
hparams.sampling_method = "random"
hparams.summarize_vars = True
hparams.add_hparam("block_length", 512)
hparams.add_hparam("decode_output_dir", "/tmp/")
hparams.add_hparam("aggregation", "sum")
def update_truncate_length(hparams, length):
hparams.max_target_seq_length = length
hparams.min_length = length
hparams.max_relative_position = int(length / 2.0)
def update_dropout(hparams, dropout):
hparams.layer_prepostprocess_dropout = dropout
hparams.attention_dropout = dropout
hparams.relu_dropout = dropout
def update_tiny(hparams):
hparams.hidden_size = 256
hparams.attention_key_channels = 512
hparams.filter_size = 2048
hparams.batch_size = 9000
hparams.learning_rate = 0.1
return hparams
def update_small(hparams):
hparams.hidden_size = 384
hparams.attention_key_channels = 512
hparams.filter_size = 1024
def update_small_lr(hparams):
hparams.learning_rate = 0.1
hparams.hidden_size = 384
hparams.attention_key_channels = 512
hparams.filter_size = 1024
def update_medium(hparams):
hparams.learning_rate = 0.1
hparams.hidden_size = 512
hparams.attention_key_channels = 512
hparams.filter_size = 1024
@registry.register_hparams
def t_rel_len2048_h384_att512_fs1024_n8_dropout20():
hparams = transformer.transformer_base()
update_transformer_hparams_for_music(hparams)
update_truncate_length(hparams, 2048)
update_small(hparams)
update_dropout(hparams, 0.20)
hparams.self_attention_type = "dot_product_relative_v2"
hparams.num_hidden_layers = 8
return hparams
@registry.register_hparams
def t_len2048_h384_att512_fs1024_n8_dropout20():
hparams = transformer.transformer_base()
update_transformer_hparams_for_music(hparams)
update_truncate_length(hparams, 2048)
update_small(hparams)
update_dropout(hparams, 0.20)
hparams.num_hidden_layers = 8
return hparams
@registry.register_hparams
def t_rel_len2048_h512_att512_fs1024_n6_dropout10():
hparams = transformer.transformer_base()
update_transformer_hparams_for_music(hparams)
update_truncate_length(hparams, 2048)
update_medium(hparams)
hparams.self_attention_type = "dot_product_relative_v2"
hparams.num_hidden_layers = 6
return hparams
@registry.register_hparams
def t_rel_len2048_h384_att512_fs1024_n6_dropout10():
hparams = transformer.transformer_base()
update_transformer_hparams_for_music(hparams)
update_truncate_length(hparams, 2048)
update_small_lr(hparams)
hparams.self_attention_type = "dot_product_relative_v2"
hparams.num_hidden_layers = 6
return hparams
@registry.register_hparams
def t_rel_len2048_h512_att512_fs1024_n8_dropout10():
hparams = transformer.transformer_base()
update_transformer_hparams_for_music(hparams)
update_truncate_length(hparams, 2048)
update_medium(hparams)
hparams.self_attention_type = "dot_product_relative_v2"
hparams.num_hidden_layers = 8
return hparams
@registry.register_hparams
|
Apache License 2.0
|
slavaganzin/ramda.py
|
ramda/reduced.py
|
reduced
|
python
|
def reduced(value):
return Reduced(value)
|
Returns a value wrapped to indicate that it is the final value of the reduce
and transduce functions. The returned value should be considered a black
box: the internal structure is not guaranteed to be stable.
Note: this optimization is unavailable to functions not explicitly listed
above. For instance, it is not currently supported by
reduceRight
|
https://github.com/slavaganzin/ramda.py/blob/7d4cc8a3253d7f14a6d53b75550863ee11e306e0/ramda/reduced.py#L12-L19
|
import collections
class Reduced:
def __init__(self, value):
self.value = value
def unwrap(self):
return self.value
|
MIT License
|
soad241/django-generic-images
|
generic_images/models.py
|
AbstractAttachedImage.get_order_in_album
|
python
|
def get_order_in_album(self, reversed_ordering=True):
lookup = 'order__gt' if reversed_ordering else 'order__lt'
return self.__class__.objects. for_model(self.content_object, self.content_type). filter(**{lookup: self.order}).count() + 1
|
Returns image order number. It is calculated as (number+1) of images
attached to the same content_object whose order is greater
(if 'reverse_ordering' is True) or lesser (if 'reverse_ordering' is
False) than image's order.
|
https://github.com/soad241/django-generic-images/blob/3e44a4f5e11d57931d455f10f1d5523061ea08c6/generic_images/models.py#L141-L150
|
import os, time
import random
from django.db import models
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.db.models import Max
from django.utils.translation import ugettext_lazy as _
from generic_images.signals import image_saved, image_deleted
from generic_images.managers import AttachedImageManager
from generic_utils.models import GenericModelBase
from django.conf import settings
from athumb.fields import ImageWithThumbsField
from athumb.backends.s3boto import S3BotoStorage_AllPublic
PUBLIC_MEDIA_BUCKET = S3BotoStorage_AllPublic(settings.AWS_STORAGE_BUCKET_NAME)
class BaseImageModel(models.Model):
def get_upload_path(self, filename):
raise NotImplementedError
def _upload_path_wrapper(self, filename):
return self.get_upload_path(filename)
image = ImageWithThumbsField(
_('Image'),
thumbnail_format='jpeg',
upload_to=_upload_path_wrapper,
storage=PUBLIC_MEDIA_BUCKET,
thumbs=(
('100x100', {'size': (100, 100), 'crop':False, 'upscale': False }),
('300x300', {'size': (300, 300), 'crop':False, 'upscale': False }),
('480x1500', {'size': (480, 1500), 'crop':False, 'upscale': False }),
('300x100', {'size': (300, 100), 'crop':False, 'upscale': False}),
('130x100', {'size': (130, 100), 'crop': False, 'upscale': False}),
('130x100_crop', {'size': (130, 100), 'crop': 'center',
'upscale': False}),
))
class Meta:
abstract = True
class ReplaceOldImageModel(BaseImageModel):
def _replace_old_image(self):
try:
old_obj = self.__class__.objects.get(pk=self.pk)
if old_obj.image.name != self.image.name:
path = old_obj.image.name
default_storage.delete(path)
except self.__class__.DoesNotExist:
pass
def save(self, *args, **kwargs):
if self.pk:
self._replace_old_image()
super(ReplaceOldImageModel, self).save(*args, **kwargs)
class Meta:
abstract = True
class AbstractAttachedImage(ReplaceOldImageModel, GenericModelBase):
user = models.ForeignKey(User, blank=True, null=True,
verbose_name=_('User'))
caption = models.TextField(_('Caption'), null=True, blank=True)
is_main = models.BooleanField(_('Main image'), default=False)
order = models.IntegerField(_('Order'), default=0)
objects = AttachedImageManager()
def next(self):
try:
return self.__class__.objects.for_model(self.content_object,
self.content_type). filter(order__lt=self.order).order_by('-order')[0]
except IndexError:
return None
def previous(self):
try:
return self.__class__.objects.for_model(self.content_object,
self.content_type). filter(order__gt=self.order).order_by('order')[0]
except IndexError:
return None
|
MIT License
|
watson-developer-cloud/python-sdk
|
ibm_watson/assistant_v2.py
|
BulkClassifyResponse.__str__
|
python
|
def __str__(self) -> str:
return json.dumps(self.to_dict(), indent=2)
|
Return a `str` version of this BulkClassifyResponse object.
|
https://github.com/watson-developer-cloud/python-sdk/blob/f9a32d46d5ae31d1e43c9530e829248a3b9e0219/ibm_watson/assistant_v2.py#L716-L718
|
from enum import Enum
from typing import Dict, List
import json
import sys
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
class AssistantV2(BaseService):
DEFAULT_SERVICE_URL = 'https://api.us-south.assistant.watson.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'assistant'
def __init__(
self,
version: str,
authenticator: Authenticator = None,
service_name: str = DEFAULT_SERVICE_NAME,
) -> None:
if version is None:
raise ValueError('version must be provided')
if not authenticator:
authenticator = get_authenticator_from_environment(service_name)
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
self.version = version
self.configure_service(service_name)
def create_session(self, assistant_id: str, **kwargs) -> DetailedResponse:
if assistant_id is None:
raise ValueError('assistant_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='create_session')
headers.update(sdk_headers)
params = {'version': self.version}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['assistant_id']
path_param_values = self.encode_path_vars(assistant_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v2/assistants/{assistant_id}/sessions'.format(**path_param_dict)
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params)
response = self.send(request, **kwargs)
return response
def delete_session(self, assistant_id: str, session_id: str,
**kwargs) -> DetailedResponse:
if assistant_id is None:
raise ValueError('assistant_id must be provided')
if session_id is None:
raise ValueError('session_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='delete_session')
headers.update(sdk_headers)
params = {'version': self.version}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['assistant_id', 'session_id']
path_param_values = self.encode_path_vars(assistant_id, session_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v2/assistants/{assistant_id}/sessions/{session_id}'.format(
**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request, **kwargs)
return response
def message(self,
assistant_id: str,
session_id: str,
*,
input: 'MessageInput' = None,
context: 'MessageContext' = None,
user_id: str = None,
**kwargs) -> DetailedResponse:
if assistant_id is None:
raise ValueError('assistant_id must be provided')
if session_id is None:
raise ValueError('session_id must be provided')
if input is not None:
input = convert_model(input)
if context is not None:
context = convert_model(context)
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='message')
headers.update(sdk_headers)
params = {'version': self.version}
data = {'input': input, 'context': context, 'user_id': user_id}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['assistant_id', 'session_id']
path_param_values = self.encode_path_vars(assistant_id, session_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v2/assistants/{assistant_id}/sessions/{session_id}/message'.format(
**path_param_dict)
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request, **kwargs)
return response
def message_stateless(self,
assistant_id: str,
*,
input: 'MessageInputStateless' = None,
context: 'MessageContextStateless' = None,
user_id: str = None,
**kwargs) -> DetailedResponse:
if assistant_id is None:
raise ValueError('assistant_id must be provided')
if input is not None:
input = convert_model(input)
if context is not None:
context = convert_model(context)
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='message_stateless')
headers.update(sdk_headers)
params = {'version': self.version}
data = {'input': input, 'context': context, 'user_id': user_id}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['assistant_id']
path_param_values = self.encode_path_vars(assistant_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v2/assistants/{assistant_id}/message'.format(**path_param_dict)
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request, **kwargs)
return response
def bulk_classify(self,
skill_id: str,
*,
input: List['BulkClassifyUtterance'] = None,
**kwargs) -> DetailedResponse:
if skill_id is None:
raise ValueError('skill_id must be provided')
if input is not None:
input = [convert_model(x) for x in input]
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='bulk_classify')
headers.update(sdk_headers)
params = {'version': self.version}
data = {'input': input}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['skill_id']
path_param_values = self.encode_path_vars(skill_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v2/skills/{skill_id}/workspace/bulk_classify'.format(
**path_param_dict)
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request, **kwargs)
return response
def list_logs(self,
assistant_id: str,
*,
sort: str = None,
filter: str = None,
page_limit: int = None,
cursor: str = None,
**kwargs) -> DetailedResponse:
if assistant_id is None:
raise ValueError('assistant_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='list_logs')
headers.update(sdk_headers)
params = {
'version': self.version,
'sort': sort,
'filter': filter,
'page_limit': page_limit,
'cursor': cursor
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['assistant_id']
path_param_values = self.encode_path_vars(assistant_id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v2/assistants/{assistant_id}/logs'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request, **kwargs)
return response
def delete_user_data(self, customer_id: str, **kwargs) -> DetailedResponse:
if customer_id is None:
raise ValueError('customer_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='delete_user_data')
headers.update(sdk_headers)
params = {'version': self.version, 'customer_id': customer_id}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/v2/user_data'
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request, **kwargs)
return response
class AgentAvailabilityMessage():
def __init__(self, *, message: str = None) -> None:
self.message = message
@classmethod
def from_dict(cls, _dict: Dict) -> 'AgentAvailabilityMessage':
args = {}
if 'message' in _dict:
args['message'] = _dict.get('message')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
_dict = {}
if hasattr(self, 'message') and self.message is not None:
_dict['message'] = self.message
return _dict
def _to_dict(self):
return self.to_dict()
def __str__(self) -> str:
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'AgentAvailabilityMessage') -> bool:
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AgentAvailabilityMessage') -> bool:
return not self == other
class BulkClassifyOutput():
def __init__(self,
*,
input: 'BulkClassifyUtterance' = None,
entities: List['RuntimeEntity'] = None,
intents: List['RuntimeIntent'] = None) -> None:
self.input = input
self.entities = entities
self.intents = intents
@classmethod
def from_dict(cls, _dict: Dict) -> 'BulkClassifyOutput':
args = {}
if 'input' in _dict:
args['input'] = BulkClassifyUtterance.from_dict(_dict.get('input'))
if 'entities' in _dict:
args['entities'] = [
RuntimeEntity.from_dict(x) for x in _dict.get('entities')
]
if 'intents' in _dict:
args['intents'] = [
RuntimeIntent.from_dict(x) for x in _dict.get('intents')
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
_dict = {}
if hasattr(self, 'input') and self.input is not None:
_dict['input'] = self.input.to_dict()
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x.to_dict() for x in self.entities]
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x.to_dict() for x in self.intents]
return _dict
def _to_dict(self):
return self.to_dict()
def __str__(self) -> str:
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'BulkClassifyOutput') -> bool:
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'BulkClassifyOutput') -> bool:
return not self == other
class BulkClassifyResponse():
def __init__(self, *, output: List['BulkClassifyOutput'] = None) -> None:
self.output = output
@classmethod
def from_dict(cls, _dict: Dict) -> 'BulkClassifyResponse':
args = {}
if 'output' in _dict:
args['output'] = [
BulkClassifyOutput.from_dict(x) for x in _dict.get('output')
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
_dict = {}
if hasattr(self, 'output') and self.output is not None:
_dict['output'] = [x.to_dict() for x in self.output]
return _dict
def _to_dict(self):
return self.to_dict()
|
Apache License 2.0
|
hyperledger/education-sawtooth-simple-supply
|
subscriber/simple_supply_subscriber/subscriber.py
|
Subscriber.clear_handlers
|
python
|
def clear_handlers(self):
self._event_handlers = []
|
Clears any delta handlers.
|
https://github.com/hyperledger/education-sawtooth-simple-supply/blob/335d4c8a5b6b35cac1189caabd022475943e830f/subscriber/simple_supply_subscriber/subscriber.py#L55-L58
|
import logging
from sawtooth_sdk.protobuf.client_event_pb2 import ClientEventsSubscribeRequest
from sawtooth_sdk.protobuf.client_event_pb2 import ClientEventsSubscribeResponse
from sawtooth_sdk.protobuf.client_event_pb2 import ClientEventsUnsubscribeRequest
from sawtooth_sdk.protobuf.client_event_pb2 import ClientEventsUnsubscribeResponse
from sawtooth_sdk.protobuf.events_pb2 import EventList
from sawtooth_sdk.protobuf.events_pb2 import EventSubscription
from sawtooth_sdk.protobuf.events_pb2 import EventFilter
from sawtooth_sdk.protobuf.validator_pb2 import Message
from sawtooth_sdk.messaging.stream import Stream
from simple_supply_addressing.addresser import NAMESPACE
LOGGER = logging.getLogger(__name__)
NULL_BLOCK_ID = '0000000000000000'
class Subscriber(object):
def __init__(self, validator_url):
LOGGER.info('Connecting to validator: %s', validator_url)
self._stream = Stream(validator_url)
self._event_handlers = []
self._is_active = False
def add_handler(self, handler):
self._event_handlers.append(handler)
|
Apache License 2.0
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/neighbors/neighbor/state/__init__.py
|
state._set_ip
|
python
|
def _set_ip(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="ip",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ip must be of a type compatible with inet:ipv4-address-no-zone""",
"defined-type": "inet:ipv4-address-no-zone",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
}
)
self.__ip = t
if hasattr(self, "_set"):
self._set()
|
Setter method for ip, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/neighbors/neighbor/state/ip (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip() directly.
YANG Description: The IPv4 address of the neighbor node.
|
https://github.com/napalm-automation/napalm-yang/blob/9148e015b086ebe311c07deb92e168ea36fd7771/napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/neighbors/neighbor/state/__init__.py#L147-L193
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
__slots__ = (
"_path_helper", "_extmethods", "__ip", "__link_layer_address", "__origin"
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__ip = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="ip",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
self.__link_layer_address = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?"},
),
is_leaf=True,
yang_name="link-layer-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="yang:phys-address",
is_config=False,
)
self.__origin = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"OTHER": {}, "STATIC": {}, "DYNAMIC": {}},
),
is_leaf=True,
yang_name="origin",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="neighbor-origin",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv4",
"neighbors",
"neighbor",
"state",
]
def _get_ip(self):
return self.__ip
|
Apache License 2.0
|
stanford-mast/nn_dataflow
|
nn_dataflow/core/loop_blocking_scheme.py
|
LoopBlockingScheme._bl_tp
|
python
|
def _bl_tp(self, bl_lvls):
assert isinstance(bl_lvls, slice)
return [util.prod(ts[bl_lvls]) for ts in zip(*self.bl_ts)]
|
Get the products of the loop blocking factors for the given levels
`bl_lvls`.
|
https://github.com/stanford-mast/nn_dataflow/blob/198a5274b9529125c6aa2b8b72b365d60cf83778/nn_dataflow/core/loop_blocking_scheme.py#L530-L536
|
import itertools
import math
from . import data_category_enum as de
from . import loop_enum as le
from . import mem_hier_enum as me
from .node_region import NodeRegion
from .. import util
class LoopBlockingScheme():
class BL():
GBUF = 0
REGF = 1
NUM = 2
def __init__(self, nested_loop_desc, bl_ts, bl_ords, resource, bufshr,
options):
BL = self.BL
self.nld = nested_loop_desc
self.total_access_gbuf = [self.nld.total_access_at_of(me.GBUF, dce)
for dce in range(de.NUM)]
assert len(bl_ts) == BL.NUM + 1, 'LoopBlockingScheme: bl_ts has invalid length.'
assert all(len(bl_t) == le.NUM for bl_t in bl_ts), 'LoopBlockingScheme: bl_ts elements have invalid length.'
assert len(bl_ords) == BL.NUM, 'LoopBlockingScheme: bl_ords has invalid length.'
assert all(tuple(sorted(bl_ord)) == tuple(range(le.NUM)) for bl_ord in bl_ords), 'LoopBlockingScheme: bl_ords elements are invalid.'
self.bl_ts = [tuple(bl_t) for bl_t in bl_ts]
self.bl_ords = [tuple(bl_ord) for bl_ord in bl_ords]
bl_tp = self._bl_tp(slice(None))
for lpe in range(le.NUM):
assert bl_tp[lpe] >= self.nld.loopcnt[lpe], 'LoopBlockingScheme: invalid blocking LP {}: {} for {}.' .format(lpe, self.bl_ts, self.nld.loopcnt)
self.lcnt = util.prod(bl_tp)
self.time = float('inf')
self._init_bufshr(bufshr, options)
self.unit_size = [tuple() for _ in range(BL.NUM)]
self.unit_size[BL.GBUF] = self.nld.usize_gbuf
self.unit_size[BL.REGF] = self.nld.usize_regf
self._set_unit_cnt()
self.stored_in_gbuf = [not options.sw_gbuf_bypass[dce]
for dce in range(de.NUM)]
if self.data_size(BL.REGF) > resource.size_regf or self.data_size(BL.GBUF) > resource.size_gbuf:
self.valid = False
return
self.valid = True
self._set_fetch()
self.src_is_dram = (resource.src_data_region.type == NodeRegion.DRAM)
self.dst_is_dram = (resource.dst_data_region.type == NodeRegion.DRAM)
self.filter_pinned = False
if not self.src_is_dram:
if self.fetch[BL.GBUF][de.IFM] > 1:
self.valid = False
return
if resource.src_data_region == resource.proc_region:
self.stored_in_gbuf[de.IFM] = True
if not self.dst_is_dram:
if self.fetch[BL.GBUF][de.OFM] > 1:
self.valid = False
return
if resource.dst_data_region == resource.proc_region:
self.stored_in_gbuf[de.OFM] = True
for dce in range(de.NUM):
if self.stored_in_gbuf[dce]:
continue
assert options.sw_gbuf_bypass[dce]
if self.fetch[BL.GBUF][dce] < self.fetch[BL.REGF][dce]:
self.stored_in_gbuf[dce] = True
if self.data_size(BL.REGF) > resource.size_regf or self.data_size(BL.GBUF) > resource.size_gbuf:
self.valid = False
return
self.array_bus_width = resource.array_bus_width
self.dram_bandwidth = resource.dram_bandwidth
self.num_nodes = resource.proc_region.dim.size()
self.finalized_stats = False
self.ops = float('nan')
self.time = float('nan')
self.proc_time = float('nan')
self.bus_time = float('nan')
self.dram_time = float('nan')
self.access = [[float('nan')] * de.NUM for _ in range(me.NUM)]
self.noc_access = [0.] * de.NUM
self.bufshr_rotation_access = [0.] * de.NUM
self.bufshr_wide_fetch_access = [0.] * de.NUM
self._set_bufshr(resource, bufshr, options)
self._set_accfwd(bufshr, options)
self.remote_gbuf_access = [0.] * de.NUM
if resource.no_time_mux:
if all(self.bl_ts[0][lpe] == 1 for lpe
in self.nld.data_loops[de.FIL].loops()):
self.filter_pinned = True
self.fetch[0][de.FIL] = 0
def is_valid(self):
return self.valid
def data_size(self, blvl, dce=None):
if dce is None:
return sum(self.data_size(blvl, dce) for dce in range(de.NUM))
size = self.unit_cnt[blvl][dce] * self.unit_size[blvl][dce]
if blvl == self.BL.GBUF:
size *= 1 if self.stored_in_gbuf[dce] else 0
size = util.idivc(size, self.bufshr_subgrp_size[dce])
return size
def get_access(self):
if not self.is_valid():
return [[float('inf')] * de.NUM for _ in range(me.NUM)]
if not self.finalized_stats:
self._calc_stats()
return self.access
def get_top_level_fetch(self):
if not self.is_valid():
return None
if not self.finalized_stats:
self._calc_stats()
return self.fetch[self.BL.GBUF]
def get_noc_access(self):
if not self.is_valid():
return None
if not self.finalized_stats:
self._calc_stats()
return self.noc_access
def get_access_cost(self, cost):
if not self.is_valid():
return float('inf')
if not self.finalized_stats:
self._calc_stats()
acc_cost = sum(c * sum(a) for c, a in zip(cost.mem_hier, self.access))
acc_cost += cost.mem_hier_at(me.GBUF) * sum(self.remote_gbuf_access)
return acc_cost
def gen_index(self):
bl_idxgen_list = []
bl_cnt_list = []
assert self.BL.NUM == 2
bl_gbuf = self.BL.GBUF
bl_regf = self.BL.REGF
t_x = self.bl_ts[bl_gbuf]
order_x = self.bl_ords[bl_gbuf]
cnt_x = self._bl_tp(slice(bl_gbuf + 1, None))
bl_idxgen_list.append(self._gen_index_single_level(t_x, order_x))
bl_cnt_list.append(cnt_x)
t_x = self.bufshr_bs_t
order_x = self.bufshr_bs_ord
cnt_x = [x // b for x, b
in zip(self._bl_tp(slice(bl_gbuf + 1, None)),
self.bufshr_bs_t)]
bl_idxgen_list.append(self._gen_index_single_level(t_x, order_x))
bl_cnt_list.append(cnt_x)
t_x = [x // b for x, b
in zip(self.bl_ts[bl_regf], self.bufshr_bs_t)]
order_x = self.bl_ords[bl_regf]
cnt_x = self._bl_tp(slice(bl_regf + 1, None))
bl_idxgen_list.append(self._gen_index_single_level(t_x, order_x))
bl_cnt_list.append(cnt_x)
t_x = self.bl_ts[2]
order_x = (0, 1, 2)
cnt_x = (1,) * le.NUM
bl_idxgen_list.append(self._gen_index_single_level(t_x, order_x))
bl_cnt_list.append(cnt_x)
num = 0
for bl_idx_list in itertools.product(*bl_idxgen_list):
idx = (0,) * le.NUM
for bl_idx, bl_cnt in zip(bl_idx_list, bl_cnt_list):
idx = tuple(i + bi * bc for i, bi, bc
in zip(idx, bl_idx, bl_cnt))
num += 1
yield idx
assert num == self.lcnt
@classmethod
def ordered_loops(cls, bl_t, bl_ord, lpe_only=False, reverse=False):
ord_lpes = list(sorted([lpe for lpe in range(le.NUM) if bl_t[lpe] > 1],
key=(lambda lpe: bl_ord[lpe]),
reverse=not reverse))
if not lpe_only:
return [(lpe, bl_t[lpe]) for lpe in ord_lpes]
return ord_lpes
def _set_unit_cnt(self):
self.unit_cnt = []
for bl in range(self.BL.NUM):
uc = self._t_data_cnt(self._bl_tp(slice(bl + 1, None)))
self.unit_cnt.append(uc)
def _set_fetch(self):
self.fetch = []
assert self.BL.GBUF < self.BL.REGF
for bl in range(self.BL.NUM):
fe = [0] * de.NUM
bl_t = self.bl_ts[bl]
bl_ord = self.bl_ords[bl]
for dce in range(de.NUM):
inntdim_lp = self._innt_dim_loop(dce, bl_t, bl_ord)
if inntdim_lp is None:
fe[dce] = self.fetch[bl-1][dce] if bl > 0 else 1
continue
f = 1
for lpe in self.nld.data_loops[dce].drop(range(le.NUM)):
bl_start = bl + (bl_ord[lpe] > bl_ord[inntdim_lp])
f *= self._bl_tp(slice(bl_start))[lpe]
fe[dce] = 2 * f - 1 if dce == de.OFM else f
self.fetch.append(fe)
def _calc_stats(self):
self.ops = self.nld.unit_ops * self.lcnt * self.num_nodes
self.proc_time = self.nld.unit_time * self.lcnt
self.access[me.REGF] = [v * self.lcnt * t * self.num_nodes
for v, t in zip(self.nld.unit_access[me.REGF],
[1, 1, 2])]
self.access[me.ITCN] = [self.nld.total_access_at_of(me.ITCN, dce)
* self.fetch[self.BL.REGF][dce]
* self.num_nodes
for dce in range(de.NUM)]
self.access[me.GBUF] = [self.nld.total_access_at_of(me.GBUF, dce)
* self.fetch[self.BL.REGF][dce]
* self.stored_in_gbuf[dce]
* self.num_nodes
for dce in range(de.NUM)]
self.access[me.DRAM] = [(self.nld.total_access_at_of(me.DRAM, dce)
if self.stored_in_gbuf[dce]
else self.nld.total_access_at_of(me.GBUF, dce))
* self.fetch[self.BL.GBUF][dce]
* self.num_nodes
/ self.accfwd_reduction[dce]
for dce in range(de.NUM)]
self.bufshr_rotation_access = self._calc_bufshr_rotation_access(
self.bufshr_rot_fetch)
self.bufshr_wide_fetch_access = self._calc_bufshr_widefetch_access(
self.bufshr_wide_fetch)
self.noc_access = [a1 + a2 for a1, a2
in zip(self.bufshr_rotation_access,
self.bufshr_wide_fetch_access)]
if not self.src_is_dram:
self.remote_gbuf_access[de.IFM] += self.access[me.DRAM][de.IFM]
self.access[me.DRAM][de.IFM] = 0
if not self.dst_is_dram:
self.remote_gbuf_access[de.OFM] += self.access[me.DRAM][de.OFM]
self.access[me.DRAM][de.OFM] = 0
if self.filter_pinned:
assert self.access[me.DRAM][de.FIL] == 0
self.dram_time = int(math.ceil(sum(self.access[me.DRAM])
/ self.dram_bandwidth))
self.bus_time = util.idivc(int(math.ceil(1. * max(self.access[me.GBUF])
/ self.num_nodes)),
self.array_bus_width)
self.time = max(self.proc_time, self.bus_time, self.dram_time)
self.finalized_stats = True
|
BSD 3-Clause New or Revised License
|
google-research/language
|
language/question_answering/b2t2/compute_vcr_features.py
|
create_tf_examples
|
python
|
def create_tf_examples(tokenizer,
example,
image_string,
metadata,
is_test=False):
tokenize_fn = functools.partial(tokenize, tokenizer, example)
q = tokenize_fn(example["question"])
if FLAGS.include_rationales:
if is_test:
answers = [tokenize_fn(a) for a in example["answer_choices"]]
else:
a = example["answer_choices"][example["answer_label"]]
answers = [tokenize_fn(a)]
rationales = [tokenize_fn(r) for r in example["rationale_choices"]]
else:
answers = [tokenize_fn(a) for a in example["answer_choices"]]
rationales = [[]]
y = collections.OrderedDict()
y["image"] = make_bytes_feature([image_string])
annot_id = int(re.match(".*-([0-9]*)", example["annot_id"]).group(1))
img_id = int(re.match(".*-([0-9]*)", example["img_id"]).group(1))
y["annot_id"] = make_int64_feature([annot_id])
y["img_id"] = make_int64_feature([img_id])
for i, a in enumerate(answers):
for j, r in enumerate(rationales):
extra_tokens = []
if FLAGS.append_all_bboxes:
num_appended_bboxes = min(len(metadata["boxes"]), FLAGS.max_num_bboxes)
for idx in range(num_appended_bboxes):
extra_tokens.extend(tokenizer.tokenize(example["objects"][idx]))
extra_tokens.append("[OBJ-%d]" % idx)
max_len = FLAGS.max_seq_length - 4 - len(extra_tokens)
q, a, r = cap_length(q, a, r, max_len)
tokens = ["[CLS]", "[IMAGE]"] + q + ["[SEP]"] + a + r + ["[SEP]"]
tokens.extend(extra_tokens)
tf.logging.info("Final tokens: %s", " ".join(tokens))
bbox_positions_matrix, bbox_indices_vector = get_bboxes(tokens, metadata)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
segment_ids = [0] * (len(q) + 3) + [1] * (
len(a) + len(r) + 1 + len(extra_tokens))
input_mask = [1] * len(input_ids)
padding_len = FLAGS.max_seq_length - len(input_ids)
input_ids.extend([0] * padding_len)
segment_ids.extend([0] * padding_len)
input_mask.extend([0] * padding_len)
bbox_indices_vector.extend([-1] * padding_len)
if (len(input_ids) != FLAGS.max_seq_length or
len(segment_ids) != FLAGS.max_seq_length or
len(input_mask) != FLAGS.max_seq_length or
len(bbox_indices_vector) != FLAGS.max_seq_length):
tf.logging.fatal("Bad feature lengths: %d, %d, %d, %d", len(input_ids),
len(segment_ids), len(input_mask),
len(bbox_indices_vector))
if "answer_label" in example and "rationale_label" in example:
if FLAGS.include_rationales:
positive = j == example["rationale_label"]
else:
positive = i == example["answer_label"]
else:
positive = False
y["choice_id"] = make_int64_feature([i * 4 + j])
y["input_ids"] = make_int64_feature(input_ids)
y["segment_ids"] = make_int64_feature(segment_ids)
y["input_mask"] = make_int64_feature(input_mask)
y["label"] = make_int64_feature([int(positive)])
y["bbox_pos"] = make_int64_feature(bbox_positions_matrix)
y["bbox_idx"] = make_int64_feature(bbox_indices_vector)
yield tf.train.Example(features=tf.train.Features(feature=y))
|
Creates TF examples for the given VCR example and image feature vector.
|
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/question_answering/b2t2/compute_vcr_features.py#L190-L274
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import collections
import functools
import json
import os
import re
import zipfile
import absl
from bert import tokenization
import tensorflow.compat.v1 as tf
flags = absl.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"data_dir", os.path.expanduser("~/data/vcr"),
"Directory containing downloaded VCR data.")
flags.DEFINE_string("vocab_path", None,
"BERT vocab.")
flags.DEFINE_string(
"output_tfrecord", None,
"Tf record file to write extracted features to.")
flags.DEFINE_integer(
"shard", 0,
"Shard number for parallel processing.")
flags.DEFINE_integer(
"num_shards", 20,
"Total number of shards for parallel processing.")
flags.DEFINE_integer("max_seq_length", 64, "Maximum sequence length for BERT.")
flags.DEFINE_integer("max_num_bboxes", 4,
"Maximum number of bounding boxes to consider.")
flags.DEFINE_bool(
"append_all_bboxes", False,
"Append all bboxes to end of token sequence, up to max_num_bboxes. We add "
"the bboxes in order, including ones already mentioned in the text.")
flags.DEFINE_bool(
"include_rationales", False,
"Whether to include rationales as choices. VCR requires to predict the "
"answer first without the rationale, so we train a separate model for "
"this case.")
BLANK_JPEG = base64.b64decode(
"/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh"
"0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIy"
"MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAABAA"
"EDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIE"
"AwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJi"
"coKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWW"
"l5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09f"
"b3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQA"
"AQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKj"
"U2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJma"
"oqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9"
"oADAMBAAIRAxEAPwD3+iiigD//2Q==")
def make_int64_feature(v):
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
def make_float_feature(v):
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
def make_bytes_feature(v):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
def parse_image(height, width, image_string):
image_decoded = tf.image.decode_image(image_string, channels=3)
image_decoded.set_shape([None, None, 3])
image_float = tf.image.convert_image_dtype(image_decoded, tf.float32)
image_resized = tf.image.resize_image_with_pad(image_float, height, width)
image = tf.reshape(image_resized, [1, height, width, 3])
return image
def tokenize(tokenizer, example, text):
tokens = []
for word_token in text:
if isinstance(word_token, list):
for i, object_id in enumerate(word_token):
if i: tokens.append("and")
tokens.extend(tokenizer.tokenize(example["objects"][object_id]))
tokens.append("[OBJ-%d]" % object_id)
else:
tokens.extend(tokenizer.tokenize(word_token))
tf.logging.info("Tokenization: %s", tokens)
return tokens
def cap_length(q, a, r, length):
assert length > 0, "length to cap too short: %d" % length
while len(q) + len(a) + len(r) >= length:
max_len = max(len(q), len(a), len(r))
if len(r) == max_len:
r = r[:-1]
elif len(a) == max_len:
a = a[:-1]
else:
q = q[:-1]
return q, a, r
def get_bboxes(tokens, metadata):
bbox_positions = {}
bbox_indices = collections.defaultdict(list)
for idx, t in enumerate(tokens):
if len(bbox_positions) >= FLAGS.max_num_bboxes:
break
m = re.match(r"\[OBJ-(\d+)\]", t)
if m:
object_id = int(m.group(1))
bbox_positions[object_id] = metadata["boxes"][object_id]
bbox_indices[object_id].append(idx)
for idx in range(FLAGS.max_num_bboxes):
if len(bbox_positions) == FLAGS.max_num_bboxes:
break
bbox_positions[-idx - 1] = [0, 0, 1, 1]
bbox_positions_matrix = []
bbox_indices_vector = [-1] * len(tokens)
for idx, (object_id, bbox) in enumerate(bbox_positions.iteritems()):
offset_height = int(bbox[1])
offset_width = int(bbox[0])
target_height = int(bbox[3] - bbox[1])
target_width = int(bbox[2] - bbox[0])
bbox_positions_matrix.extend(
[offset_height, offset_width, target_height, target_width])
for token_idx in bbox_indices[object_id]:
bbox_indices_vector[token_idx] = idx
tf.logging.info("Box positions: %s", bbox_positions_matrix)
tf.logging.info("Box token indices: %s", bbox_indices_vector)
return bbox_positions_matrix, bbox_indices_vector
|
Apache License 2.0
|
openstack/os-brick
|
os_brick/initiator/connectors/remotefs.py
|
RemoteFsConnector.connect_volume
|
python
|
def connect_volume(self, connection_properties):
path = self._get_volume_path(connection_properties)
return {'path': path}
|
Ensure that the filesystem containing the volume is mounted.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
connection_properties must include:
export - remote filesystem device (e.g. '172.18.194.100:/var/nfs')
name - file name within the filesystem
:type connection_properties: dict
:returns: dict
connection_properties may optionally include:
options - options to pass to mount
|
https://github.com/openstack/os-brick/blob/d0e762299143e0a2dbc4688f763e801df349472e/os_brick/initiator/connectors/remotefs.py#L89-L105
|
from oslo_log import log as logging
from os_brick import initiator
from os_brick.initiator.connectors import base
from os_brick.remotefs import remotefs
from os_brick import utils
LOG = logging.getLogger(__name__)
class RemoteFsConnector(base.BaseLinuxConnector):
def __init__(self, mount_type, root_helper, driver=None,
execute=None,
device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
kwargs = kwargs or {}
conn = kwargs.get('conn')
mount_type_lower = mount_type.lower()
if conn:
mount_point_base = conn.get('mount_point_base')
if mount_type_lower in ('nfs', 'glusterfs', 'scality',
'quobyte', 'vzstorage'):
kwargs[mount_type_lower + '_mount_point_base'] = (
kwargs.get(mount_type_lower + '_mount_point_base') or
mount_point_base)
else:
LOG.warning("Connection details not present."
" RemoteFsClient may not initialize properly.")
if mount_type_lower == 'scality':
cls = remotefs.ScalityRemoteFsClient
elif mount_type_lower == 'vzstorage':
cls = remotefs.VZStorageRemoteFSClient
else:
cls = remotefs.RemoteFsClient
self._remotefsclient = cls(mount_type, root_helper, execute=execute,
*args, **kwargs)
super(RemoteFsConnector, self).__init__(
root_helper, driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
@staticmethod
def get_connector_properties(root_helper, *args, **kwargs):
return {}
def set_execute(self, execute):
super(RemoteFsConnector, self).set_execute(execute)
self._remotefsclient.set_execute(execute)
def get_search_path(self):
return self._remotefsclient.get_mount_base()
def _get_volume_path(self, connection_properties):
mnt_flags = []
if connection_properties.get('options'):
mnt_flags = connection_properties['options'].split()
nfs_share = connection_properties['export']
self._remotefsclient.mount(nfs_share, mnt_flags)
mount_point = self._remotefsclient.get_mount_point(nfs_share)
path = mount_point + '/' + connection_properties['name']
return path
def get_volume_paths(self, connection_properties):
path = self._get_volume_path(connection_properties)
return [path]
@utils.trace
|
Apache License 2.0
|
spoclab-ca/covfefe
|
utils/lexicosyntactic/functions.py
|
get_anew_norms
|
python
|
def get_anew_norms(path_to_norms=None):
if path_to_norms is not None:
source_norms = path_to_norms
else:
source_norms = os.path.abspath('TODOme')
with open(source_norms, "r") as fin:
f = fin.readlines()
f = f[1:]
anew = {}
for line in f:
l = line.strip().split()
anew[l[0]] = l[2:8]
return anew
return None
|
Parameters:
path_to_norms : optional, string. Full path, including filename, of the ANEW norms.
Return dictionary of ANEW norms, order: [ValMn ValSD AroMn AroSD DomMn DomSD]
|
https://github.com/spoclab-ca/covfefe/blob/905e8e5eb0905791de869db18c6f755838657203/utils/lexicosyntactic/functions.py#L81-L102
|
import nltk
import os
import re
def get_filename(full_path):
if full_path.find(os.sep) != -1 and full_path.rfind(os.sep) < len(full_path)-1:
return full_path[full_path.rfind(os.sep)+1:]
else:
return full_path
def get_fileid(filename):
return filename[:filename.rfind('.')]
def get_subject_sample(file_id):
regex_fileformat = re.compile(r'^(?P<subjectID>[0-9a-zA-Z]+)[_-](?P<sessionID>[0-9a-zA-Z]+)[.](?:[0-9a-zA-Z]+)$')
regex_extendedformat = re.compile(r'^(?P<subjectID>[0-9a-zA-Z]+)[_-](?P<sessionID>[0-9a-zA-Z]+)[-_](?P<speakerID>[0-9a-zA-Z]+)[.](?:[0-9a-zA-Z]+)$')
if regex_fileformat.findall(file_id):
return regex_fileformat.findall(file_id)[0]
elif regex_extendedformat.findall(file_id):
return regex_extendedformat.findall(file_id)[0]
return None
def get_frequency_norms(path_to_norms=None):
if path_to_norms is not None:
source_norms = path_to_norms
else:
source_norms = os.path.abspath('../feature_extraction/text/frequencies.txt')
with open(source_norms, "r") as fin:
f = fin.readlines()
f = f[1:]
freq = {}
for line in f:
l = line.strip().split()
if len(l) == 0:
continue
freq[l[0].lower()] = l[1:]
return freq
return None
def get_warringer_norms(path_to_norms=None):
if path_to_norms is not None:
source_norms = path_to_norms
else:
source_norms = os.path.abspath('TODOme')
with open(source_norms, "r") as fin:
f = fin.readlines()
f = f[1:]
warr = {}
for line in f:
l = line.strip().split(',')
warr[l[1]] = l[2:11]
return warr
return None
|
Apache License 2.0
|
dgilland/zulu
|
src/zulu/zulu.py
|
Zulu.copy
|
python
|
def copy(self):
return self.__class__(*self.datetimetuple())
|
Return a new :class`Zulu` instance with the same datetime value.
Returns:
:class:`.Zulu`
|
https://github.com/dgilland/zulu/blob/f911dd34d3ad2487edf4bb8b5c751eaef25e4f9f/src/zulu/zulu.py#L455-L462
|
import calendar
from collections import namedtuple
from datetime import datetime, timedelta
import time
from babel.dates import LC_TIME
from dateutil.relativedelta import relativedelta
from . import parser
from .delta import Delta
from .helpers import FOLD_AVAILABLE, NUMBER_TYPES
from .parser import UTC
LOCAL = "local"
DATETIME_ATTRS = (
"year",
"month",
"day",
"hour",
"minute",
"second",
"microsecond",
"tzinfo",
"fold",
)
TIME_FRAMES = (
"century",
"decade",
"year",
"month",
"week",
"day",
"hour",
"minute",
"second",
)
DateTime = namedtuple(
"DateTime",
["year", "month", "day", "hour", "second", "minute", "microsecond", "tzinfo"],
)
Date = namedtuple("Date", ["year", "month", "day"])
def validate_frame(frame):
if frame not in TIME_FRAMES:
raise ValueError(f"Time frame must be one of {'|'.join(TIME_FRAMES)}, not '{frame}'")
class Zulu(datetime):
def __new__(
cls,
year=1970,
month=1,
day=1,
hour=0,
minute=0,
second=0,
microsecond=0,
tzinfo=None,
*,
fold=0,
):
if isinstance(year, bytes) and len(year) == 10 and 1 <= year[2] & 0x7F <= 12:
return cls.fromdatetime(datetime(year, month))
elif isinstance(year, dict):
obj = {key: value for key, value in year.items() if key in DATETIME_ATTRS}
return cls(**obj)
extra = {"fold": fold} if FOLD_AVAILABLE else {}
if tzinfo:
tzinfo = parser.get_timezone(tzinfo)
if hasattr(tzinfo, "localize"):
dt = tzinfo.localize(
datetime(year, month, day, hour, minute, second, microsecond, **extra),
is_dst=None,
)
else:
dt = datetime(year, month, day, hour, minute, second, microsecond, tzinfo, **extra)
if dt.utcoffset() != timedelta(0):
dt = dt.astimezone(UTC)
year = dt.year
month = dt.month
day = dt.day
hour = dt.hour
minute = dt.minute
second = dt.second
microsecond = dt.microsecond
tzinfo = dt.tzinfo
if FOLD_AVAILABLE:
extra["fold"] = dt.fold
else:
tzinfo = UTC
return datetime.__new__(
cls, year, month, day, hour, minute, second, microsecond, tzinfo, **extra
)
@classmethod
def now(cls):
return cls.fromtimestamp(time.time())
@classmethod
def utcnow(cls):
return cls.now()
@classmethod
def parse(cls, obj, formats=None, default_tz=None):
if isinstance(obj, dict):
dt = cls(obj)
else:
dt = parser.parse_datetime(obj, formats, default_tz=default_tz)
dt = cls.fromdatetime(dt)
return dt
@classmethod
def fromdatetime(cls, dt):
return cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
fold=getattr(dt, "fold", 0),
)
@classmethod
def fromtimestamp(cls, timestamp, tz=UTC):
return cls.utcfromtimestamp(timestamp)
@classmethod
def utcfromtimestamp(cls, timestamp):
return cls.fromdatetime(datetime.utcfromtimestamp(timestamp))
@classmethod
def fromordinal(cls, ordinal):
return cls.fromdatetime(datetime.fromordinal(ordinal))
@classmethod
def fromgmtime(cls, struct):
return cls.fromtimestamp(calendar.timegm(struct))
@classmethod
def fromlocaltime(cls, struct):
return cls.fromtimestamp(time.mktime(struct))
@classmethod
def combine(cls, date, time):
if callable(getattr(date, "date", None)):
date = date.date()
if callable(getattr(time, "time", None)):
time = time.time()
return cls.fromdatetime(datetime.combine(date, time))
@classmethod
def span_range(cls, frame, start, end):
if not isinstance(start, Zulu):
start = cls.parse(start)
if not isinstance(end, Zulu):
end = cls.parse(end)
if start > end:
return
next_start = start
while True:
span = next_start.span(frame)
if span[1] <= end:
yield span
next_start = span[1].shift(microseconds=1)
else:
break
@classmethod
def range(cls, frame, start, end):
if not isinstance(start, Zulu):
start = cls.parse(start)
if not isinstance(end, Zulu):
end = cls.parse(end)
validate_frame(frame)
if start > end:
return
if frame == "century":
step_value = 100
frame = "year"
elif frame == "decade":
step_value = 10
frame = "year"
else:
step_value = 1
step = {f"{frame}s": step_value}
next_start = start
while True:
next_end = next_start.shift(**step)
if next_end <= end:
yield next_start
next_start = next_end
else:
break
@property
def naive(self):
return self.datetime.replace(tzinfo=None)
@property
def datetime(self):
return datetime(*self.datetimetuple())
def timestamp(self):
return parser.get_timestamp(self)
def datetimetuple(self):
return DateTime(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo,
)
def datetuple(self):
return Date(self.year, self.month, self.day)
|
MIT License
|
ensime/ensime-vim
|
ensime_shared/client.py
|
EnsimeClient.type_check_cmd
|
python
|
def type_check_cmd(self, args, range=None):
self.log.debug('type_check_cmd: in')
self.start_typechecking()
self.type_check("")
self.editor.message('typechecking')
|
Sets the flag to begin buffering typecheck notes & clears any
stale notes before requesting a typecheck from the server
|
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/client.py#L336-L342
|
import inspect
import json
import logging
import os
import shutil
import sys
import tempfile
import time
from subprocess import PIPE, Popen
from threading import Thread
import websocket
from .config import feedback, gconfig, LOG_FORMAT
from .debugger import DebuggerClient
from .errors import InvalidJavaPathError
from .protocol import ProtocolHandler, ProtocolHandlerV1, ProtocolHandlerV2
from .typecheck import TypecheckHandler
from .util import catch, Pretty, Util
if sys.version_info > (3, 0):
from queue import Queue
else:
from Queue import Queue
class EnsimeClient(TypecheckHandler, DebuggerClient, ProtocolHandler):
def __init__(self, editor, launcher):
def setup_logger():
path = os.path
config = self.launcher.config
projectdir = path.abspath(config['root-dir'])
project = config.get('name', path.basename(projectdir))
logger = logging.getLogger(__name__).getChild(project)
if os.environ.get('ENSIME_VIM_DEBUG'):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logdir = config['cache-dir']
if not path.isdir(logdir):
try:
os.mkdir(logdir)
except OSError:
logger.addHandler(logging.NullHandler())
return logger
logfile = path.join(logdir, 'ensime-vim.log')
handler = logging.FileHandler(logfile, mode='w')
handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(handler)
logger.info('Initializing project - %s', projectdir)
return logger
super(EnsimeClient, self).__init__()
self.editor = editor
self.launcher = launcher
self.log = setup_logger()
self.log.debug('__init__: in')
self.editor.initialize()
self.ws = None
self.ensime = None
self.ensime_server = None
self.call_id = 0
self.call_options = {}
self.refactor_id = 1
self.refactorings = {}
self.queue = Queue()
self.suggestions = None
self.completion_timeout = 10
self.completion_started = False
self.full_types_enabled = False
self.toggle_teardown = True
self.connection_attempts = 0
self.tmp_diff_folder = tempfile.mkdtemp(prefix='ensime-vim-diffs')
self.number_try_connection = 1
self.debug_thread_id = None
self.running = True
thread = Thread(name='queue-poller', target=self.queue_poll)
thread.daemon = True
thread.start()
def queue_poll(self, sleep_t=0.5):
connection_alive = True
while self.running:
if self.ws:
def logger_and_close(msg):
self.log.error('Websocket exception', exc_info=True)
if not self.running:
connection_alive = False
else:
if not self.number_try_connection:
self.teardown()
self._display_ws_warning()
with catch(websocket.WebSocketException, logger_and_close):
result = self.ws.recv()
self.queue.put(result)
if connection_alive:
time.sleep(sleep_t)
def setup(self, quiet=False, bootstrap_server=False):
def lazy_initialize_ensime():
if not self.ensime:
called_by = inspect.stack()[4][3]
self.log.debug(str(inspect.stack()))
self.log.debug('setup(quiet=%s, bootstrap_server=%s) called by %s()',
quiet, bootstrap_server, called_by)
installed = self.launcher.strategy.isinstalled()
if not installed and not bootstrap_server:
if not quiet:
scala = self.launcher.config.get('scala-version')
msg = feedback["prompt_server_install"].format(scala_version=scala)
self.editor.raw_message(msg)
return False
try:
self.ensime = self.launcher.launch()
except InvalidJavaPathError:
self.editor.message('invalid_java')
return bool(self.ensime)
def ready_to_connect():
if not self.ws and self.ensime.is_ready():
self.connect_ensime_server()
return True
return self.running and lazy_initialize_ensime() and ready_to_connect()
def _display_ws_warning(self):
warning = "A WS exception happened, 'ensime-vim' has been disabled. " + "For more information, have a look at the logs in `.ensime_cache`"
self.editor.raw_message(warning)
def send(self, msg):
def reconnect(e):
self.log.error('send error, reconnecting...', exc_info=True)
self.connect_ensime_server()
if self.ws:
self.ws.send(msg + "\n")
self.log.debug('send: in')
if self.running and self.ws:
with catch(websocket.WebSocketException, reconnect):
self.log.debug('send: sending JSON on WebSocket')
self.ws.send(msg + "\n")
def connect_ensime_server(self):
self.log.debug('connect_ensime_server: in')
server_v2 = isinstance(self, EnsimeClientV2)
def disable_completely(e):
if e:
self.log.error('connection error: %s', e, exc_info=True)
self.shutdown_server()
self._display_ws_warning()
if self.running and self.number_try_connection:
self.number_try_connection -= 1
if not self.ensime_server:
port = self.ensime.http_port()
uri = "websocket" if server_v2 else "jerky"
self.ensime_server = gconfig["ensime_server"].format(port, uri)
with catch(websocket.WebSocketException, disable_completely):
options = {"subprotocols": ["jerky"]} if server_v2 else {}
options['enable_multithread'] = True
self.log.debug("About to connect to %s with options %s",
self.ensime_server, options)
self.ws = websocket.create_connection(self.ensime_server, **options)
if self.ws:
self.send_request({"typehint": "ConnectionInfoReq"})
else:
disable_completely(None)
def shutdown_server(self):
self.log.debug('shutdown_server: in')
if self.ensime and self.toggle_teardown:
self.ensime.stop()
def teardown(self):
self.log.debug('teardown: in')
self.running = False
self.shutdown_server()
shutil.rmtree(self.tmp_diff_folder, ignore_errors=True)
def send_at_position(self, what, useSelection, where="range"):
self.log.debug('send_at_position: in')
b, e = self.editor.selection_pos() if useSelection else self.editor.word_under_cursor_pos()
self.log.debug('useSelection: {}, beg: {}, end: {}'.format(useSelection, b, e))
beg = self.get_position(b[0], b[1])
end = self.get_position(e[0], e[1])
self.send_request(
{"typehint": what + "AtPointReq",
"file": self.editor.path(),
where: {"from": beg, "to": end}})
def set_position(self, decl_pos):
if decl_pos["typehint"] == "LineSourcePosition":
self.editor.set_cursor(decl_pos['line'], 0)
else:
point = decl_pos["offset"]
row, col = self.editor.point2pos(point + 1)
self.editor.set_cursor(row, col)
def get_position(self, row, col):
result = col
self.log.debug('%s %s', row, col)
lines = self.editor.getlines()[:row - 1]
result += sum([len(l) + 1 for l in lines])
self.log.debug(result)
return result
def open_decl_for_inspector_symbol(self):
self.log.debug('open_decl_for_inspector_symbol: in')
lineno = self.editor.cursor()[0]
symbol = self.editor.symbol_for_inspector_line(lineno)
self.symbol_by_name([symbol])
self.unqueue(should_wait=True)
def symbol_by_name(self, args, range=None):
self.log.debug('symbol_by_name: in')
if not args:
self.editor.raw_message('Must provide a fully-qualifed symbol name')
return
self.call_options[self.call_id] = {"split": True,
"vert": True,
"open_definition": True}
fqn = args[0]
req = {
"typehint": "SymbolByNameReq",
"typeFullName": fqn
}
if len(args) == 2:
req["memberName"] = args[1]
self.send_request(req)
def complete(self, row, col):
self.log.debug('complete: in')
pos = self.get_position(row, col)
self.send_request({"point": pos, "maxResults": 100,
"typehint": "CompletionsReq",
"caseSens": True,
"fileInfo": self._file_info(),
"reload": False})
def send_at_point(self, what, row, col):
pos = self.get_position(row, col)
self.send_request(
{"typehint": what + "AtPointReq",
"file": self._file_info(),
"point": pos})
def do_toggle_teardown(self, args, range=None):
self.log.debug('do_toggle_teardown: in')
self.toggle_teardown = not self.toggle_teardown
|
MIT License
|
cics-nd/gptorch
|
test/test_models/test_base.py
|
TestGPModel._predict_fy_cuda
|
python
|
def _predict_fy_cuda(self, attr):
gp = self._get_model()
f = getattr(gp, attr)
x_test = np.random.randn(5, gp.input_dimension)
x_test_torch = TensorType(x_test)
gp.cuda()
cuda_np = f(x_test)
for result in cuda_np:
assert isinstance(result, np.ndarray)
cuda_torch = f(x_test_torch)
for result in cuda_torch:
assert result.device == x_test_torch.device
cuda_gpu = f(x_test_torch.to("cuda"))
for result in cuda_gpu:
assert result.is_cuda
|
attr='predict_f' or 'predict_y'
|
https://github.com/cics-nd/gptorch/blob/a80296c44d5738d2b9ffb8dfbd5b98d0febde069/test/test_models/test_base.py#L109-L132
|
import os
import sys
import pytest
import torch
import numpy as np
from gptorch.models.base import GPModel
from gptorch.mean_functions import Zero
from gptorch.util import TensorType
from gptorch.kernels import Rbf
from gptorch.models import GPR
base_path = os.path.join(os.path.dirname(__file__), "..", "..")
if not base_path in sys.path:
sys.path.append(base_path)
from test.util import needs_cuda
class TestGPModel(object):
@needs_cuda
def test_cuda(self):
gp = self._get_model()
gp.cuda()
assert gp.X.is_cuda
assert gp.Y.is_cuda
@needs_cuda
def test_cpu(self):
gp = self._get_model()
gp.cuda()
gp.cpu()
assert not gp.X.is_cuda
assert not gp.Y.is_cuda
def test_optimize(self):
gp = self._get_model()
gp.optimize(max_iter=2)
gp.optimize(method="L-BFGS-B", max_iter=2)
def test_predict_f(self):
self._predict_fy("predict_f")
@needs_cuda
def test_predict_f_cuda(self):
self._predict_fy_cuda("predict_f")
def test_predict_y(self):
self._predict_fy("predict_y")
@needs_cuda
def test_predict_y_cuda(self):
self._predict_fy_cuda("predict_y")
def test_predict_f_samples(self):
self._predict_fy_samples("predict_f_samples")
@needs_cuda
def test_predict_f_samples_cuda(self):
self._predict_fy_samples_cuda("predict_f_samples")
def test_predict_y_samples(self):
self._predict_fy_samples("predict_y_samples")
@needs_cuda
def test_predict_y_samples_cuda(self):
self._predict_fy_samples_cuda("predict_y_samples")
def _predict_fy(self, attr):
n, dx, dy = 5, 3, 2
x, y = np.random.randn(n, dx), np.random.randn(n, dy)
kern = Rbf(dx, ARD=True)
gp = GPR(x, y, kern)
n_test = 5
x_test = np.random.randn(n_test, dx)
f = getattr(gp, attr)
mu, v = f(x_test)
for result in [mu, v]:
assert isinstance(result, np.ndarray)
assert result.ndim == 2
assert result.shape == (n_test, dy)
x_test_torch = TensorType(x_test)
mu_torch, v_torch = f(x_test_torch)
for result in [mu_torch, v_torch]:
assert isinstance(result, TensorType)
assert result.ndimension() == 2
assert result.shape == (n_test, dy)
|
MIT License
|
societe-generale/aikit
|
aikit/tools/db_informations.py
|
get_var_type_columns_dico
|
python
|
def get_var_type_columns_dico(columns_informations):
var_type_columns_dico = OrderedDict()
for col, info in columns_informations.items():
if info["ToKeep"]:
vt = info["TypeOfVariable"]
if vt not in var_type_columns_dico:
var_type_columns_dico[vt] = []
var_type_columns_dico[vt].append(col)
return var_type_columns_dico
|
get a dictionnary with the list of columns for each type
|
https://github.com/societe-generale/aikit/blob/17f6cbc97e7f91e57a1e8d7db39ad29990dfbb47/aikit/tools/db_informations.py#L138-L150
|
from collections import OrderedDict
import pandas as pd
import numpy as np
from sklearn.utils.multiclass import type_of_target
import aikit.enums as en
from aikit.enums import TypeOfVariables
def guess_type_of_problem(dfX, y=None):
if y is None:
return en.TypeOfProblem.CLUSTERING
if len(np.unique(y)) == 1:
raise ValueError("y seems to be unique")
if type_of_target(y) in ["binary", "multiclass"]:
if "float" in str(y.dtype) or "int" in str(y.dtype):
nb_u = len(np.unique(y))
nb = len(y)
if nb_u >= 0.25 * nb:
return en.TypeOfProblem.REGRESSION
else:
return en.TypeOfProblem.CLASSIFICATION
return en.TypeOfProblem.CLASSIFICATION
else:
return en.TypeOfProblem.REGRESSION
def guess_type_of_variable(s):
if not isinstance(s, pd.Series):
raise TypeError("s should be a Serie, not a '%s'" % type(s))
st = str(s.dtype)
if "int" in st:
return TypeOfVariables.NUM
elif "float" in st:
return TypeOfVariables.NUM
elif "object" in st:
nb_u = s.nunique()
nb = len(s)
if hasattr(s, "str"):
avg_l = s.str.len().mean()
else:
avg_l = 0
if avg_l >= 50 or nb_u >= 0.5 * nb:
return TypeOfVariables.TEXT
return TypeOfVariables.CAT
elif "bool" in st:
return TypeOfVariables.CAT
elif "category" in st:
return TypeOfVariables.CAT
elif "category" in st:
return TypeOfVariables.CAT
else:
raise NotImplementedError("I don't know that type of Series : %s, please check" % st)
def has_missing_values(s):
if not isinstance(s, pd.Series):
raise TypeError("s should be a Serie, not a '%s'" % type(s))
return bool(np.asarray(s.isnull()).sum().sum() > 0)
def get_columns_informations(dfX):
df_columns_info = OrderedDict()
for c in dfX.columns:
df_columns_info[c] = {
"TypeOfVariable": guess_type_of_variable(dfX[c]),
"HasMissing": has_missing_values(dfX[c]),
"ToKeep": True,
}
return df_columns_info
|
BSD 2-Clause Simplified License
|
unofficial-memsource/memsource-cli-client
|
memsource_cli/models/quality_assurance_batch_run_dto_v3.py
|
QualityAssuranceBatchRunDtoV3.settings
|
python
|
def settings(self):
return self._settings
|
Gets the settings of this QualityAssuranceBatchRunDtoV3. # noqa: E501
:return: The settings of this QualityAssuranceBatchRunDtoV3. # noqa: E501
:rtype: QualityAssuranceRunDtoV3
|
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/quality_assurance_batch_run_dto_v3.py#L86-L93
|
import pprint
import re
import six
from memsource_cli.models.quality_assurance_run_dto_v3 import QualityAssuranceRunDtoV3
from memsource_cli.models.uid_reference import UidReference
class QualityAssuranceBatchRunDtoV3(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'jobs': 'list[UidReference]',
'settings': 'QualityAssuranceRunDtoV3',
'max_qa_warnings_count': 'int'
}
attribute_map = {
'jobs': 'jobs',
'settings': 'settings',
'max_qa_warnings_count': 'maxQaWarningsCount'
}
def __init__(self, jobs=None, settings=None, max_qa_warnings_count=None):
self._jobs = None
self._settings = None
self._max_qa_warnings_count = None
self.discriminator = None
self.jobs = jobs
if settings is not None:
self.settings = settings
if max_qa_warnings_count is not None:
self.max_qa_warnings_count = max_qa_warnings_count
@property
def jobs(self):
return self._jobs
@jobs.setter
def jobs(self, jobs):
if jobs is None:
raise ValueError("Invalid value for `jobs`, must not be `None`")
self._jobs = jobs
@property
|
Apache License 2.0
|
davidinouye/destructive-deep-learning
|
ddl/independent.py
|
IndependentDestructor.inverse_transform
|
python
|
def inverse_transform(self, X, y=None):
self._check_is_fitted()
X = check_array(X, ensure_min_samples=0)
if X.shape[0] == 0:
return X
self._check_dim(X)
X = check_X_in_interval(X, np.array([0, 1]))
Z = np.array([
u_dens.inverse_cdf(np.reshape(x_col, (-1, 1))).ravel()
for u_dens, x_col in zip(self.density_.univariate_densities_, X.transpose())
]).transpose()
return Z
|
Apply inverse destructive transformation to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : None, default=None
Not used in the transformation but kept for compatibility.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
Transformed data.
|
https://github.com/davidinouye/destructive-deep-learning/blob/632add7a9731347e050d271ceebb24251e1d8e01/ddl/independent.py#L129-L160
|
from __future__ import division, print_function
import itertools
import logging
import warnings
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_array, check_is_fitted, check_random_state
from .base import BaseDensityDestructor, ScoreMixin
from .univariate import STANDARD_NORMAL_DENSITY, ScipyUnivariateDensity
from .utils import (_UNIT_SPACE, check_X_in_interval, get_domain_or_default, get_support_or_default,
make_interior_probability)
logger = logging.getLogger(__name__)
class IndependentDestructor(BaseDensityDestructor):
def __init__(self, independent_density=None):
self.independent_density = independent_density
def _get_density_estimator(self):
if self.independent_density is None:
return IndependentDensity()
else:
return clone(self.independent_density)
@classmethod
def create_fitted(cls, fitted_density, **kwargs):
destructor = cls(**kwargs)
destructor.density_ = fitted_density
return destructor
def transform(self, X, y=None):
self._check_is_fitted()
X = check_array(X)
if X.shape[0] == 0:
return X
self._check_dim(X)
X = check_X_in_interval(X, get_domain_or_default(self))
Z = np.array([
u_dens.cdf(np.reshape(x_col, (-1, 1))).ravel()
for u_dens, x_col in zip(self.density_.univariate_densities_, X.transpose())
]).transpose()
return Z
|
BSD 3-Clause New or Revised License
|
maestrograph/sparse-hyper
|
experiments/gconvolution.py
|
MatrixHyperlayer.generate_integer_tuples
|
python
|
def generate_integer_tuples(self, means,rng=None, use_cuda=False):
dv = 'cuda' if use_cuda else 'cpu'
c, k, rank = means.size()
assert rank == 1
"""
Sample the 2 nearest points
"""
floor_mask = torch.tensor([1, 0], device=dv, dtype=torch.uint8)
fm = floor_mask.unsqueeze(0).unsqueeze(2).expand(c, k, 2, 1)
neighbor_ints = means.data.unsqueeze(2).expand(c, k, 2, 1).contiguous()
neighbor_ints[fm] = neighbor_ints[fm].floor()
neighbor_ints[~fm] = neighbor_ints[~fm].ceil()
neighbor_ints = neighbor_ints.long()
"""
Sample uniformly from a small range around the given index tuple
"""
rr_ints = torch.cuda.FloatTensor(c, k, self.radditional, 1) if use_cuda else torch.FloatTensor(c, k, self.radditional, 1)
rr_ints.uniform_()
rr_ints *= (1.0 - gaussian.EPSILON)
rng = torch.cuda.FloatTensor(rng) if use_cuda else torch.FloatTensor(rng)
rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints)
rrng = torch.cuda.FloatTensor(self.region) if use_cuda else torch.FloatTensor(self.region)
rrng = rrng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints)
mns_expand = means.round().unsqueeze(2).expand_as(rr_ints)
lower = mns_expand - rrng * 0.5
upper = mns_expand + rrng * 0.5
idxs = lower < 0.0
lower[idxs] = 0.0
idxs = upper > rngxp
lower[idxs] = rngxp[idxs] - rrng[idxs]
rr_ints = (rr_ints * rrng + lower).long()
"""
Sample uniformly from all index tuples
"""
g_ints = torch.cuda.FloatTensor(c, k, self.gadditional, 1) if use_cuda else torch.FloatTensor(c, k, self.gadditional, 1)
rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(g_ints)
g_ints.uniform_()
g_ints *= (1.0 - gaussian.EPSILON) * rngxp
g_ints = g_ints.long()
ints = torch.cat([neighbor_ints, rr_ints, g_ints], dim=2)
return ints.view(c, -1, rank)
|
Sample the 2 nearest points
|
https://github.com/maestrograph/sparse-hyper/blob/a1b2186e2d0c9047085ef4273909d207facbeeb3/experiments/gconvolution.py#L153-L219
|
import sys
import matplotlib as mpl
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from torch import nn
from torch.autograd import Variable
from tqdm import trange
import gaussian
import util
from util import sparsemm
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
import networkx as nx
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
def clean(axes=None):
if axes is None:
axes = plt.gca()
[s.set_visible(False) for s in axes.spines.values()]
axes.tick_params(top=False, bottom=False, left=False, right=False, labelbottom=False, labelleft=False)
def densities(points, means, sigmas):
batchsize, n, rank = points.size()
batchsize, k, rank = means.size()
points = points.unsqueeze(2).expand(batchsize, n, k, rank)
means = means.unsqueeze(1).expand_as(points)
sigmas = sigmas.unsqueeze(1).expand_as(points)
sigmas_squared = torch.sqrt(1.0/(gaussian.EPSILON + sigmas))
points = points - means
points = points * sigmas_squared
points = points.view(-1, 1, rank)
products = torch.bmm(points, points.transpose(1,2))
products = products.view(batchsize, n, k)
num = torch.exp(- 0.5 * products)
return num
class MatrixHyperlayer(nn.Module):
def duplicates(self, tuples):
b, k, r = tuples.size()
primes = self.primes[:r]
primes = primes.unsqueeze(0).unsqueeze(0).expand(b, k, r)
unique = ((tuples+1) ** primes).prod(dim=2)
sorted, sort_idx = torch.sort(unique, dim=1)
_, unsort_idx = torch.sort(sort_idx, dim=1)
mask = sorted[:, 1:] == sorted[:, :-1]
zs = torch.zeros(b, 1, dtype=torch.uint8, device='cuda' if self.use_cuda else 'cpu')
mask = torch.cat([zs, mask], dim=1)
return torch.gather(mask, 1, unsort_idx)
def cuda(self, device_id=None):
self.use_cuda = True
super().cuda(device_id)
def __init__(self, in_num, out_num, k, radditional=0, gadditional=0, region=(128,),
sigma_scale=0.2, min_sigma=0.0, fix_value=False):
super().__init__()
self.min_sigma = min_sigma
self.use_cuda = False
self.in_num = in_num
self.out_num = out_num
self.k = k
self.radditional = radditional
self.region = region
self.gadditional = gadditional
self.sigma_scale = sigma_scale
self.fix_value = fix_value
self.weights_rank = 2
self.params = Parameter(torch.randn(k * out_num, 3))
outs = torch.arange(out_num).unsqueeze(1).expand(out_num, k * (2 + radditional + gadditional)).contiguous().view(-1, 1)
self.register_buffer('outs', outs.long())
outs_inf = torch.arange(out_num).unsqueeze(1).expand(out_num, k).contiguous().view(-1, 1)
self.register_buffer('outs_inf', outs_inf.long())
self.register_buffer('primes', torch.tensor(util.PRIMES))
def size(self):
return (self.out_num, self.in_num)
|
MIT License
|
tlc-pack/tenset
|
python/tvm/contrib/debugger/debug_runtime.py
|
GraphModuleDebug._create_debug_env
|
python
|
def _create_debug_env(self, graph_json, ctx):
if not self._dump_root:
self._dump_root = tempfile.mkdtemp(prefix=_DUMP_ROOT_PREFIX)
ctx = self._format_context(ctx)
self._dump_path = self._get_dump_path(ctx)
self.debug_datum = debug_result.DebugResult(graph_json, self._dump_path)
|
Create UI wrapper framework to handle multiple UI frontends for tvmdbg
Parameters
----------
graph_json : json format
json formatted NNVM graph contain list of each node's name, shape and type.
nodes_list : list
List of all the nodes presented in the graph
ctx : TVMContext
The context this module is under.
|
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/python/tvm/contrib/debugger/debug_runtime.py#L145-L170
|
import os
import tempfile
import shutil
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.contrib import graph_runtime
from tvm.runtime.ndarray import array
from . import debug_result
_DUMP_ROOT_PREFIX = "tvmdbg_"
_DUMP_PATH_PREFIX = "_tvmdbg_"
def create(graph_json_str, libmod, ctx, dump_root=None):
assert isinstance(graph_json_str, string_types)
try:
ctx, num_rpc_ctx, device_type_id = graph_runtime.get_device_ctx(libmod, ctx)
if num_rpc_ctx == len(ctx):
fcreate = ctx[0]._rpc_sess.get_function("tvm.graph_runtime_debug.create")
else:
fcreate = tvm._ffi.get_global_func("tvm.graph_runtime_debug.create")
except ValueError:
raise ValueError(
"Please set '(USE_GRAPH_RUNTIME_DEBUG ON)' in "
"config.cmake and rebuild TVM to enable debug mode"
)
func_obj = fcreate(graph_json_str, libmod, *device_type_id)
return GraphModuleDebug(func_obj, ctx, graph_json_str, dump_root)
class GraphModuleDebug(graph_runtime.GraphModule):
def __init__(self, module, ctx, graph_json_str, dump_root):
self._dump_root = dump_root
self._dump_path = None
self._get_output_by_layer = module["get_output_by_layer"]
self._run_individual = module["run_individual"]
graph_runtime.GraphModule.__init__(self, module)
self._create_debug_env(graph_json_str, ctx)
def _format_context(self, ctx):
return str(ctx[0]).upper().replace("(", ":").replace(")", "")
def _ensure_dir(self, directory):
if not os.path.exists(directory):
os.makedirs(directory, 0o700)
def _get_dump_path(self, ctx):
folder_name = _DUMP_PATH_PREFIX + "ctx_"
folder_name = folder_name + ctx.replace(":", "_")
path = os.path.join(self._dump_root, folder_name)
self._ensure_dir(path)
return path
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
|
Apache License 2.0
|
xhochy/fletcher
|
fletcher/base.py
|
FletcherBaseArray.ndim
|
python
|
def ndim(self) -> int:
return len(self.shape)
|
Return the number of dimensions of the underlying data.
|
https://github.com/xhochy/fletcher/blob/39ce11d71e4ac1e95725d46be21bb38f3ea04349/fletcher/base.py#L433-L435
|
import datetime
import operator
from collections import OrderedDict
from collections.abc import Iterable
from copy import copy as copycopy
from distutils.version import LooseVersion
from functools import partialmethod
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from pandas.api.types import (
is_array_like,
is_bool_dtype,
is_int64_dtype,
is_integer,
is_integer_dtype,
)
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.dtypes import ExtensionDtype, register_extension_dtype
from pandas.util._decorators import doc
from fletcher._algorithms import (
extract_isnull_bytemap,
kurt_op,
max_op,
median_op,
min_op,
np_ufunc_op,
prod_op,
skew_op,
std_op,
sum_op,
take_on_pyarrow_list,
var_op,
)
from fletcher.algorithms.bool import all_op, all_true, any_op, or_na, or_vectorised
from fletcher.algorithms.utils.chunking import _calculate_chunk_offsets
from fletcher.string_mixin import StringSupportingExtensionArray
PANDAS_GE_0_26_0 = LooseVersion(pd.__version__) >= "0.26.0"
if PANDAS_GE_0_26_0:
from pandas.core.indexers import check_array_indexer
ARROW_GE_0_18_0 = LooseVersion(pa.__version__) >= "0.18.0"
_python_type_map = {
pa.null().id: str,
pa.bool_().id: bool,
pa.int8().id: int,
pa.uint8().id: int,
pa.int16().id: int,
pa.uint16().id: int,
pa.int32().id: int,
pa.uint32().id: int,
pa.int64().id: int,
pa.uint64().id: int,
pa.float16().id: float,
pa.float32().id: float,
pa.float64().id: float,
pa.date32().id: datetime.date,
pa.date64().id: datetime.date,
pa.timestamp("ms").id: datetime.datetime,
pa.binary().id: bytes,
pa.string().id: str,
pa.list_(pa.string()).id: list,
pa.large_list(pa.string()).id: list,
pa.dictionary(pa.int32(), pa.int32()).id: dict,
pa.duration("ns").id: datetime.timedelta,
}
_string_type_map = {"date64[ms]": pa.date64(), "string": pa.string()}
_examples = {
pa.null(): pa.array([None, None], type=pa.null()),
pa.bool_(): pa.array([None, True], type=pa.bool_()),
pa.int8(): pa.array([None, -1], type=pa.int8()),
pa.uint8(): pa.array([None, 1], type=pa.uint8()),
pa.int16(): pa.array([None, -1], type=pa.int16()),
pa.uint16(): pa.array([None, 1], type=pa.uint16()),
pa.int32(): pa.array([None, -1], type=pa.int32()),
pa.uint32(): pa.array([None, 1], type=pa.uint32()),
pa.int64(): pa.array([None, -1], type=pa.int64()),
pa.uint64(): pa.array([None, 1], type=pa.uint64()),
pa.float16(): pa.array([None, np.float16(-0.1)], type=pa.float16()),
pa.float32(): pa.array([None, -0.1], type=pa.float32()),
pa.float64(): pa.array([None, -0.1], type=pa.float64()),
pa.date32(): pa.array([None, datetime.date(2010, 9, 8)], type=pa.date32()),
pa.date64(): pa.array([None, datetime.date(2010, 9, 8)], type=pa.date64()),
pa.timestamp("s"): pa.array(
[None, datetime.datetime(2013, 12, 11, 10, 9, 8)], type=pa.timestamp("s")
),
pa.timestamp("ms"): pa.array(
[None, datetime.datetime(2013, 12, 11, 10, 9, 8, 1000)], type=pa.timestamp("ms")
),
pa.timestamp("us"): pa.array(
[None, datetime.datetime(2013, 12, 11, 10, 9, 8, 7)], type=pa.timestamp("us")
),
pa.timestamp("ns"): pa.array(
[None, datetime.datetime(2013, 12, 11, 10, 9, 8, 7)], type=pa.timestamp("ns")
),
pa.binary(): pa.array([None, b"122"], type=pa.binary()),
pa.string(): pa.array([None, "🤔"], type=pa.string()),
pa.duration("s"): pa.array(
[None, datetime.timedelta(seconds=9)], type=pa.duration("s")
),
pa.duration("ms"): pa.array(
[None, datetime.timedelta(milliseconds=8)], type=pa.duration("ms")
),
pa.duration("us"): pa.array(
[None, datetime.timedelta(microseconds=7)], type=pa.duration("us")
),
pa.duration("ns"): pa.array(
[None, datetime.timedelta(microseconds=7)], type=pa.duration("ns")
),
}
def _get_example(arrow_dtype: pa.DataType) -> pa.Array:
if isinstance(arrow_dtype, pa.ListType):
return pa.array(
[None, _get_example(arrow_dtype.value_type).to_pylist()], type=arrow_dtype
)
return _examples[arrow_dtype]
def _is_numeric(arrow_dtype: pa.DataType) -> bool:
return (
pa.types.is_integer(arrow_dtype)
or pa.types.is_floating(arrow_dtype)
or pa.types.is_decimal(arrow_dtype)
)
class FletcherBaseDtype(ExtensionDtype):
na_value = pd.NA
def __init__(self, arrow_dtype: pa.DataType):
self.arrow_dtype = arrow_dtype
def __hash__(self) -> int:
return hash(self.arrow_dtype)
def __eq__(self, other) -> bool:
if isinstance(other, str):
return other == self.name
elif isinstance(other, type(self)):
return self.arrow_dtype == other.arrow_dtype
else:
return False
@property
def type(self):
return _python_type_map[self.arrow_dtype.id]
@property
def kind(self) -> str:
if pa.types.is_date(self.arrow_dtype):
return "O"
elif self._is_list:
return "O"
elif pa.types.is_string(self.arrow_dtype):
return "U"
else:
return np.dtype(self.arrow_dtype.to_pandas_dtype()).kind
@property
def name(self) -> str:
return str(self)
@property
def itemsize(self) -> int:
return self.arrow_dtype.bit_width
@property
def _is_boolean(self):
return pa.types.is_boolean(self.arrow_dtype)
@property
def _is_numeric(self):
return _is_numeric(self.arrow_dtype)
@property
def _is_list(self):
return pa.types.is_list(self.arrow_dtype) or pa.types.is_large_list(
self.arrow_dtype
)
def __from_arrow__(self, data):
return self.construct_array_type()(data)
def example(self):
return self.construct_array_type()(_get_example(self.arrow_dtype))
@register_extension_dtype
class FletcherContinuousDtype(FletcherBaseDtype):
def __str__(self) -> str:
return f"fletcher_continuous[{self.arrow_dtype}]"
def __repr__(self) -> str:
return "FletcherContinuousDtype({})".format(str(self.arrow_dtype))
@classmethod
def construct_from_string(cls, string: str):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got <class 'int'>"
)
if string.startswith("fletcher_continuous["):
string = string[len("fletcher_continuous[") : -1]
else:
raise TypeError(
f"Cannot construct a 'FletcherContinuousDtype' from '{string}'"
)
if string == "list<item: string>":
return cls(pa.list_(pa.string()))
try:
type_for_alias = pa.type_for_alias(string)
except (ValueError, KeyError):
raise TypeError(string)
return cls(type_for_alias)
@classmethod
def construct_array_type(cls, *args):
if len(args) > 0:
raise NotImplementedError("construct_array_type does not support arguments")
return FletcherContinuousArray
@register_extension_dtype
class FletcherChunkedDtype(FletcherBaseDtype):
def __str__(self) -> str:
return f"fletcher_chunked[{self.arrow_dtype}]"
def __repr__(self) -> str:
return "FletcherChunkedDtype({})".format(str(self.arrow_dtype))
@classmethod
def construct_from_string(cls, string: str) -> "FletcherChunkedDtype":
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got <class 'int'>"
)
if string.startswith("fletcher_chunked["):
string = string[len("fletcher_chunked[") : -1]
else:
raise TypeError(
f"Cannot construct a 'FletcherChunkedDtype' from '{string}'"
)
if string == "list<item: string>":
return cls(pa.list_(pa.string()))
try:
type_for_alias = pa.type_for_alias(string)
except (ValueError, KeyError):
raise TypeError(string)
return cls(type_for_alias)
@classmethod
def construct_array_type(cls, *args) -> "Type[FletcherChunkedArray]":
if len(args) > 0:
raise NotImplementedError("construct_array_type does not support arguments")
return FletcherChunkedArray
class FletcherBaseArray(StringSupportingExtensionArray):
_can_hold_na = True
@property
def dtype(self) -> ExtensionDtype:
return self._dtype
def __array__(self, *args, **kwargs) -> np.ndarray:
return self.data.__array__(*args, **kwargs)
def __arrow_array__(self, type=None):
return self.data
@property
def size(self) -> int:
return len(self.data)
@property
def shape(self) -> Tuple[int]:
return (self.size,)
@property
|
MIT License
|
victorca25/basicsr
|
codes/models/modules/architectures/block.py
|
depth_to_space_tf
|
python
|
def depth_to_space_tf(x, bs:int=2):
assert bs >= 1 and isinstance(bs, int)
if bs == 1:
return x
b, c, h, w = x.size()
if c % (bs ** 2) != 0:
raise ValueError("The tensor channels must be divisible by "
"(bs ** 2).")
new_d = -1
new_h = h * bs
new_w = w * bs
x = x.view(b, bs, bs, new_d, h, w)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous()
return x.view(b, new_d, new_h, new_w)
|
Pixel shuffle (TensorFlow).
Equivalent to:
https://www.tensorflow.org/api_docs/python/tf/nn/depth_to_space
Args:
x (Tensor): Input tensor (b, c, h, w).
bs: block_size, scale factor.
Returns:
Tensor: tensor after pixel shuffle.
|
https://github.com/victorca25/basicsr/blob/62cf668ebe35b1b0c9d573b500e129f94430ab5a/codes/models/modules/architectures/block.py#L463-L490
|
from collections import OrderedDict
import torch
import torch.nn as nn
from models.modules.architectures.convolutions.partialconv2d import PartialConv2d
from models.modules.architectures.convolutions.deformconv2d import DeformConv2d
from models.networks import weights_init_normal, weights_init_xavier, weights_init_kaiming, weights_init_orthogonal
def swish_func(x, beta=1.0, inplace=False):
if inplace:
result = x.clone()
torch.sigmoid_(beta*x)
x *= result
return x
return x * torch.sigmoid(beta * x)
class Swish(nn.Module):
__constants__ = ['beta', 'slope', 'inplace']
def __init__(self, beta=1.0, slope=1.67653251702, inplace=False):
super(Swish, self).__init__()
self.inplace = inplace
self.beta = torch.nn.Parameter(torch.tensor(beta))
self.beta.requiresGrad = True
self.slope = slope / 2
def forward(self, x):
return 2 * self.slope * swish_func(x, self.beta, self.inplace)
def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0):
act_type = act_type.lower()
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type in ('leakyrelu', 'lrelu'):
layer = nn.LeakyReLU(neg_slope, inplace)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
elif act_type == 'tanh':
layer = nn.Tanh()
elif act_type == 'sigmoid':
layer = nn.Sigmoid()
elif act_type == 'swish':
layer = Swish(beta=beta, inplace=inplace)
else:
raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type))
return layer
class Identity(nn.Module):
def __init__(self, *kwargs):
super(Identity, self).__init__()
def forward(self, x, *kwargs):
return x
def norm(norm_type, nc):
norm_type = norm_type.lower()
if norm_type == 'batch':
layer = nn.BatchNorm2d(nc, affine=True)
elif norm_type == 'instance':
layer = nn.InstanceNorm2d(nc, affine=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type))
return layer
def add_spectral_norm(module, use_spectral_norm=False):
if use_spectral_norm:
return nn.utils.spectral_norm(module)
return module
def pad(pad_type, padding):
pad_type = pad_type.lower()
if padding == 0:
return None
if pad_type == 'reflect':
layer = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
layer = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
layer = nn.ZeroPad2d(padding)
else:
raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type))
return layer
def get_valid_padding(kernel_size, dilation):
kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
padding = (kernel_size - 1) // 2
return padding
class ConcatBlock(nn.Module):
def __init__(self, submodule):
super(ConcatBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = torch.cat((x, self.sub(x)), dim=1)
return output
def __repr__(self):
return 'Identity .. \n|' + self.sub.__repr__().replace('\n', '\n|')
class ShortcutBlock(nn.Module):
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
return 'Identity + \n|' + self.sub.__repr__().replace('\n', '\n|')
def sequential(*args):
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1,
bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='CNA',
convtype='Conv2D', spectral_norm=False):
assert mode in ['CNA', 'NAC', 'CNAC'], 'Wrong conv mode [{:s}]'.format(mode)
padding = get_valid_padding(kernel_size, dilation)
p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
padding = padding if pad_type == 'zero' else 0
if convtype=='PartialConv2D':
c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias, groups=groups)
elif convtype=='DeformConv2D':
c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias, groups=groups)
elif convtype=='Conv3D':
c = nn.Conv3d(in_nc, out_nc, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias, groups=groups)
else:
c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias, groups=groups)
if spectral_norm:
c = nn.utils.spectral_norm(c)
a = act(act_type) if act_type else None
if 'CNA' in mode:
n = norm(norm_type, out_nc) if norm_type else None
return sequential(p, c, n, a)
elif mode == 'NAC':
if norm_type is None and act_type is not None:
a = act(act_type, inplace=False)
n = norm(norm_type, in_nc) if norm_type else None
return sequential(n, a, p, c)
def make_layer(basic_block, num_basic_block, **kwarg):
layers = []
for _ in range(num_basic_block):
layers.append(basic_block(**kwarg))
return nn.Sequential(*layers)
class Mean(nn.Module):
def __init__(self, dim: list, keepdim=False):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.mean(x, self.dim, self.keepdim)
@torch.no_grad()
def default_init_weights(module_list, init_type='kaiming', scale=1, bias_fill=0, **kwargs):
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.modules():
if init_type == 'normal':
weights_init_normal(m, bias_fill=bias_fill, **kwargs)
if init_type == 'xavier':
weights_init_xavier(m, scale=scale, bias_fill=bias_fill, **kwargs)
elif init_type == 'kaiming':
weights_init_kaiming(m, scale=scale, bias_fill=bias_fill, **kwargs)
elif init_type == 'orthogonal':
weights_init_orthogonal(m, bias_fill=bias_fill)
else:
raise NotImplementedError('initialization method [{:s}] not implemented'.format(init_type))
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None):
super(Upsample, self).__init__()
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.size = size
self.align_corners = align_corners
def forward(self, x):
return nn.functional.interpolate(
x, size=self.size, scale_factor=self.scale_factor,
mode=self.mode, align_corners=self.align_corners)
def extra_repr(self):
if self.scale_factor is not None:
info = 'scale_factor=' + str(self.scale_factor)
else:
info = 'size=' + str(self.size)
info += ', mode=' + self.mode
return info
def pixelshuffle_block(in_nc, out_nc, upscale_factor=2,
kernel_size=3, stride=1, bias=True, pad_type='zero',
norm_type=None, act_type='relu', convtype='Conv2D'):
conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias,
pad_type=pad_type, norm_type=None, act_type=None, convtype=convtype)
pixel_shuffle = nn.PixelShuffle(upscale_factor)
n = norm(norm_type, out_nc) if norm_type else None
a = act(act_type) if act_type else None
return sequential(conv, pixel_shuffle, n, a)
def upconv_block(in_nc, out_nc, upscale_factor=2, kernel_size=3,
stride=1, bias=True, pad_type='zero', norm_type=None,
act_type='relu', mode='nearest', convtype='Conv2D'):
upscale_factor = (1, upscale_factor, upscale_factor) if convtype == 'Conv3D' else upscale_factor
upsample = Upsample(scale_factor=upscale_factor, mode=mode)
conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias,
pad_type=pad_type, norm_type=norm_type, act_type=act_type,
convtype=convtype)
return sequential(upsample, conv)
class DepthToSpace(nn.Module):
def __init__(self, block_size:int=2, form:str='pt'):
super().__init__()
self.bs = block_size
self.form = form
def forward(self, x):
if self.form == 'tf':
return depth_to_space_tf(x, self.bs)
return depth_to_space(x, self.bs)
def extra_repr(self):
return f"block_size={self.bs}"
def depth_to_space(x, bs:int=2):
assert bs >= 1 and isinstance(bs, int)
if bs == 1:
return x
b, c, h, w = x.size()
if c % (bs ** 2) != 0:
raise ValueError("The tensor channels must be divisible by "
"(bs ** 2).")
new_d = -1
new_h = h * bs
new_w = w * bs
x = x.view(b, new_d, bs, bs, h, w)
x = x.permute(0, 1, 4, 2, 5, 3).contiguous()
return x.view(b, new_d, new_h, new_w)
|
Apache License 2.0
|
pycontribs/pyrax
|
pyrax/cloudmonitoring.py
|
CloudMonitorAlarm.update
|
python
|
def update(self, criteria=None, disabled=False, label=None, name=None,
metadata=None):
return self.entity.update_alarm(self, criteria=criteria,
disabled=disabled, label=label, name=name, metadata=metadata)
|
Updates this alarm.
|
https://github.com/pycontribs/pyrax/blob/a0c022981f76a4cba96a22ecc19bb52843ac4fbe/pyrax/cloudmonitoring.py#L969-L975
|
from __future__ import absolute_import, unicode_literals
from functools import wraps
import re
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
_invalid_key_pat = re.compile(r"Validation error for key '([^']+)'")
def _params_to_dict(params, dct, local_dict):
for param in params:
val = local_dict.get(param)
if val is None:
continue
dct[param] = val
return dct
def assure_check(fnc):
@wraps(fnc)
def _wrapped(self, check, *args, **kwargs):
if not isinstance(check, CloudMonitorCheck):
check = self._check_manager.get(check)
return fnc(self, check, *args, **kwargs)
return _wrapped
def assure_entity(fnc):
@wraps(fnc)
def _wrapped(self, entity, *args, **kwargs):
if not isinstance(entity, CloudMonitorEntity):
entity = self._entity_manager.get(entity)
return fnc(self, entity, *args, **kwargs)
return _wrapped
class CloudMonitorEntity(BaseResource):
def __init__(self, *args, **kwargs):
super(CloudMonitorEntity, self).__init__(*args, **kwargs)
self._check_manager = CloudMonitorCheckManager(self.manager.api,
uri_base="entities/%s/checks" % self.id,
resource_class=CloudMonitorCheck, response_key=None,
plural_response_key=None)
self._alarm_manager = CloudMonitorAlarmManager(self.manager,
self.manager.api, uri_base="entities/%s/alarms" % self.id,
resource_class=CloudMonitorAlarm, response_key=None,
plural_response_key=None)
def update(self, agent=None, metadata=None):
self.manager.update_entity(self, agent=agent, metadata=metadata)
def get_check(self, check):
chk = self._check_manager.get(check)
chk.set_entity(self)
return chk
def list_checks(self, limit=None, marker=None, return_next=False):
checks = self._check_manager.list(limit=limit, marker=marker,
return_next=return_next)
for check in checks:
check.set_entity(self)
return checks
def find_all_checks(self, **kwargs):
checks = self._check_manager.find_all_checks(**kwargs)
for check in checks:
check.set_entity(self)
return checks
def create_check(self, label=None, name=None, check_type=None,
disabled=False, metadata=None, details=None,
monitoring_zones_poll=None, timeout=None, period=None,
target_alias=None, target_hostname=None, target_receiver=None,
test_only=False, include_debug=False):
return self._check_manager.create_check(label=label, name=name,
check_type=check_type, disabled=disabled, metadata=metadata,
details=details, monitoring_zones_poll=monitoring_zones_poll,
timeout=timeout, period=period, target_alias=target_alias,
target_hostname=target_hostname,
target_receiver=target_receiver, test_only=test_only,
include_debug=include_debug)
def update_check(self, check, label=None, name=None, disabled=None,
metadata=None, monitoring_zones_poll=None, timeout=None,
period=None, target_alias=None, target_hostname=None,
target_receiver=None):
return self._check_manager.update(check, label=label, name=name,
disabled=disabled, metadata=metadata,
monitoring_zones_poll=monitoring_zones_poll, timeout=timeout,
period=period, target_alias=target_alias,
target_hostname=target_hostname,
target_receiver=target_receiver)
def delete_check(self, check):
return self._check_manager.delete(check)
@assure_check
def list_metrics(self, check, limit=None, marker=None, return_next=False):
return check.list_metrics(limit=limit, marker=marker,
return_next=return_next)
@assure_check
def get_metric_data_points(self, check, metric, start, end, points=None,
resolution=None, stats=None):
return check.get_metric_data_points(metric, start, end, points=points,
resolution=resolution, stats=stats)
def create_alarm(self, check, notification_plan, criteria=None,
disabled=False, label=None, name=None, metadata=None):
return self._alarm_manager.create(check, notification_plan,
criteria=criteria, disabled=disabled, label=label, name=name,
metadata=metadata)
def update_alarm(self, alarm, criteria=None, disabled=False,
label=None, name=None, metadata=None):
return self._alarm_manager.update(alarm, criteria=criteria,
disabled=disabled, label=label, name=name, metadata=metadata)
def list_alarms(self, limit=None, marker=None, return_next=False):
return self._alarm_manager.list(limit=limit, marker=marker,
return_next=return_next)
def get_alarm(self, alarm):
return self._alarm_manager.get(alarm)
def delete_alarm(self, alarm):
return self._alarm_manager.delete(alarm)
@property
def name(self):
return self.label
class _PaginationManager(BaseManager):
def list(self, limit=None, marker=None, return_next=False):
kwargs = {}
if return_next:
kwargs["other_keys"] = "metadata"
ret = super(_PaginationManager, self).list(limit=limit,
marker=marker, **kwargs)
if return_next:
ents, meta = ret
return (ents, meta[0].get("next_marker"))
else:
return ret
class CloudMonitorNotificationManager(_PaginationManager):
def create(self, notification_type, label=None, name=None, details=None):
uri = "/%s" % self.uri_base
body = {"label": label or name,
"type": utils.get_id(notification_type),
"details": details,
}
resp, resp_body = self.api.method_post(uri, body=body)
return self.get(resp.headers["x-object-id"])
def test_notification(self, notification=None, notification_type=None,
details=None):
if notification:
uri = "/%s/%s/test" % (self.uri_base, utils.get_id(notification))
body = None
else:
uri = "/test-notification"
body = {"type": utils.get_id(notification_type),
"details": details}
resp, resp_body = self.api.method_post(uri, body=body)
def update_notification(self, notification, details):
if isinstance(notification, CloudMonitorNotification):
nid = notification.id
ntyp = notification.type
else:
nfcn = self.get(notification)
nid = notification
ntyp = nfcn.type
uri = "/%s/%s" % (self.uri_base, nid)
body = {"type": ntyp,
"details": details}
resp, resp_body = self.api.method_put(uri, body=body)
def list_types(self):
uri = "/notification_types"
resp, resp_body = self.api.method_get(uri)
return [CloudMonitorNotificationType(self, info)
for info in resp_body["values"]]
def get_type(self, notification_type_id):
uri = "/notification_types/%s" % utils.get_id(notification_type_id)
resp, resp_body = self.api.method_get(uri)
return CloudMonitorNotificationType(self, resp_body)
class CloudMonitorNotificationPlanManager(_PaginationManager):
def create(self, label=None, name=None, critical_state=None, ok_state=None,
warning_state=None):
uri = "/%s" % self.uri_base
body = {"label": label or name}
def make_list_of_ids(parameter):
params = utils.coerce_to_list(parameter)
return [utils.get_id(param) for param in params]
if critical_state:
critical_state = utils.coerce_to_list(critical_state)
body["critical_state"] = make_list_of_ids(critical_state)
if warning_state:
warning_state = utils.coerce_to_list(warning_state)
body["warning_state"] = make_list_of_ids(warning_state)
if ok_state:
ok_state = utils.coerce_to_list(ok_state)
body["ok_state"] = make_list_of_ids(ok_state)
resp, resp_body = self.api.method_post(uri, body=body)
return self.get(resp.headers["x-object-id"])
class CloudMonitorMetricsManager(_PaginationManager):
def get_metric_data_points(self, metric, start, end, points=None,
resolution=None, stats=None):
allowed_resolutions = ("FULL", "MIN5", "MIN20", "MIN60", "MIN240",
"MIN1440")
if not (points or resolution):
raise exc.MissingMonitoringCheckGranularity("You must specify "
"either the 'points' or 'resolution' parameter when "
"fetching metrics.")
if resolution:
if resolution.upper() not in allowed_resolutions:
raise exc.InvalidMonitoringMetricsResolution("The specified "
"resolution '%s' is not valid. The valid values are: "
"%s." % (resolution, str(allowed_resolutions)))
start_tm = utils.to_timestamp(start)
end_tm = utils.to_timestamp(end)
start_tm *= 1000
end_tm *= 1000
qparms = []
qparms.append("from=%s" % int(start_tm))
qparms.append("to=%s" % int(end_tm))
if points:
qparms.append("points=%s" % points)
if resolution:
qparms.append("resolution=%s" % resolution.upper())
if stats:
stats = utils.coerce_to_list(stats)
for stat in stats:
qparms.append("select=%s" % stat)
qparm = "&".join(qparms)
uri = "/%s/%s/plot?%s" % (self.uri_base, metric, qparm)
try:
resp, resp_body = self.api.method_get(uri)
except exc.BadRequest as e:
msg = e.message
dtls = e.details
if msg.startswith("Validation error"):
raise exc.InvalidMonitoringMetricsRequest("Your request was "
"invalid: '%s'" % dtls)
else:
raise
return resp_body["values"]
class CloudMonitorAlarmManager(_PaginationManager):
def __init__(self, entity_manager, api, resource_class=None,
response_key=None, plural_response_key=None, uri_base=None):
self.entity_manager = entity_manager
_PaginationManager.__init__(self, api, resource_class=resource_class,
response_key=response_key, plural_response_key=plural_response_key,
uri_base=uri_base)
def create(self, check, notification_plan, criteria=None,
disabled=False, label=None, name=None, metadata=None):
uri = "/%s" % self.uri_base
body = {"check_id": utils.get_id(check),
"notification_plan_id": utils.get_id(notification_plan),
}
if criteria:
body["criteria"] = criteria
if disabled is not None:
body["disabled"] = disabled
label_name = label or name
if label_name:
body["label"] = label_name
if metadata:
body["metadata"] = metadata
resp, resp_body = self.api.method_post(uri, body=body)
if resp.status_code == 201:
alarm_id = resp.headers["x-object-id"]
return self.get(alarm_id)
def update(self, alarm, criteria=None, disabled=False, label=None,
name=None, metadata=None):
uri = "/%s/%s" % (self.uri_base, utils.get_id(alarm))
body = {}
if criteria:
body["criteria"] = criteria
if disabled is not None:
body["disabled"] = disabled
label_name = label or name
if label_name:
body["label"] = label_name
if metadata:
body["metadata"] = metadata
resp, resp_body = self.api.method_put(uri, body=body)
class CloudMonitorCheckManager(_PaginationManager):
def create_check(self, label=None, name=None, check_type=None,
details=None, disabled=False, metadata=None,
monitoring_zones_poll=None, timeout=None, period=None,
target_alias=None, target_hostname=None, target_receiver=None,
test_only=False, include_debug=False):
if details is None:
raise exc.MissingMonitoringCheckDetails("The required 'details' "
"parameter was not passed to the create_check() method.")
ctype = utils.get_id(check_type)
is_remote = ctype.startswith("remote")
monitoring_zones_poll = utils.coerce_to_list(monitoring_zones_poll)
monitoring_zones_poll = [utils.get_id(mzp)
for mzp in monitoring_zones_poll]
if is_remote:
if not monitoring_zones_poll:
raise exc.MonitoringZonesPollMissing("You must specify the "
"'monitoring_zones_poll' parameter for remote checks.")
if not (target_alias or target_hostname):
raise exc.MonitoringCheckTargetNotSpecified("You must "
"specify either the 'target_alias' or 'target_hostname' "
"when creating a remote check.")
body = {"label": label or name,
"details": details,
"disabled": disabled,
"type": utils.get_id(check_type),
}
params = ("monitoring_zones_poll", "timeout", "period",
"target_alias", "target_hostname", "target_receiver")
body = _params_to_dict(params, body, locals())
if test_only:
uri = "/%s/test-check" % self.uri_base
if include_debug:
uri = "%s?debug=true" % uri
else:
uri = "/%s" % self.uri_base
try:
resp = self.api.method_post(uri, body=body)[0]
except exc.BadRequest as e:
msg = e.message
dtls = e.details
match = _invalid_key_pat.match(msg)
if match:
missing = match.groups()[0].replace("details.", "")
if missing in details:
errmsg = "".join(["The value passed for '%s' in the ",
"details parameter is not valid."]) % missing
else:
errmsg = "".join(["The required value for the '%s' ",
"setting is missing from the 'details' ",
"parameter."]) % missing
utils.update_exc(e, errmsg)
raise e
else:
if msg == "Validation error":
raise exc.InvalidMonitoringCheckDetails("Validation "
"failed. Error: '%s'." % dtls)
raise e
else:
if resp.status_code == 201:
check_id = resp.headers["x-object-id"]
return self.get(check_id)
raise exc.ClientException("Unknown response code creating check;"
" expected 201, got %s" % resp.status_code)
def update(self, check, label=None, name=None, disabled=None,
metadata=None, monitoring_zones_poll=None, timeout=None,
period=None, target_alias=None, target_hostname=None,
target_receiver=None):
if monitoring_zones_poll:
monitoring_zones_poll = utils.coerce_to_list(monitoring_zones_poll)
monitoring_zones_poll = [utils.get_id(mzp)
for mzp in monitoring_zones_poll]
body = {}
local_dict = locals()
label = label or name
params = ("label", "disabled", "metadata", "monitoring_zones_poll",
"timeout", "period", "target_alias", "target_hostname",
"target_receiver")
body = _params_to_dict(params, body, locals())
entity = check.entity
uri = "/%s/%s" % (self.uri_base, utils.get_id(check))
try:
resp, resp_body = self.api.method_put(uri, body=body)
except exc.BadRequest as e:
msg = e.message
dtls = e.details
if msg.startswith("Validation error"):
raise exc.InvalidMonitoringCheckUpdate("The update failed "
"validation: %s: %s" % (msg, dtls))
else:
raise
return resp_body
def find_all_checks(self, **kwargs):
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class _EntityFilteringManger(BaseManager):
def list(self, entity=None):
uri = "/%s" % self.uri_base
if entity:
uri = "%s?entityId=%s" % (uri, utils.get_id(entity))
resp, resp_body = self._list(uri, return_raw=True)
return resp_body
class CloudMonitorEntityManager(_PaginationManager):
def _create_body(self, name, label=None, agent=None, ip_addresses=None,
metadata=None):
label = label or name
if ip_addresses is not None:
body = {"label": label}
if ip_addresses:
body["ip_addresses"] = ip_addresses
if agent:
body["agent_id"] = utils.get_id(agent)
if metadata:
body["metadata"] = metadata
return body
def update_entity(self, entity, agent=None, metadata=None):
body = {}
if agent:
body["agent_id"] = utils.get_id(agent)
if metadata:
body["metadata"] = metadata
if body:
uri = "/%s/%s" % (self.uri_base, utils.get_id(entity))
resp, body = self.api.method_put(uri, body=body)
class CloudMonitorTokenManager(BaseManager):
def __init__(self, api):
super(CloudMonitorTokenManager, self).__init__(api,
resource_class=CloudMonitorAgentToken, uri_base="agent_tokens")
def create(self, name):
resp = super(CloudMonitorTokenManager, self).create(name,
return_response=True)
loc = resp.headers.get("location")
if loc:
return self.get(loc.rsplit("/")[-1])
def update(self, token, label):
uri = "/%s/%s" % (self.uri_base, utils.get_id(token))
self._update(uri, self._create_body(label))
return self.get(token)
def _create_body(self, name, *args, **kwargs):
return {
"label": name
}
class CloudMonitorCheck(BaseResource):
def __init__(self, manager, info, entity=None, key=None, loaded=False):
super(CloudMonitorCheck, self).__init__(manager, info, key=key,
loaded=loaded)
self.set_entity(entity)
def set_entity(self, entity):
if entity is None:
return
if not isinstance(entity, CloudMonitorEntity):
entity = self.manager.get(entity)
self.entity = entity
self._metrics_manager = CloudMonitorMetricsManager(self.manager.api,
uri_base="entities/%s/checks/%s/metrics" % (self.entity.id,
self.id), resource_class=CloudMonitorMetric, response_key=None,
plural_response_key=None)
@property
def name(self):
return self.label
def get(self):
new = self.manager.get(self)
if new:
self._add_details(new._info)
reload = get
def update(self, label=None, name=None, disabled=None, metadata=None,
monitoring_zones_poll=None, timeout=None, period=None,
target_alias=None, target_hostname=None, target_receiver=None):
self.manager.update(self, label=label, name=name,
disabled=disabled, metadata=metadata,
monitoring_zones_poll=monitoring_zones_poll, timeout=timeout,
period=period, target_alias=target_alias,
target_hostname=target_hostname,
target_receiver=target_receiver)
def delete(self):
self.manager.delete(self)
def list_metrics(self, limit=None, marker=None, return_next=False):
return self._metrics_manager.list(limit=limit, marker=marker,
return_next=return_next)
def get_metric_data_points(self, metric, start, end, points=None,
resolution=None, stats=None):
return self._metrics_manager.get_metric_data_points(metric, start, end,
points=points, resolution=resolution, stats=stats)
def create_alarm(self, notification_plan, criteria=None, disabled=False,
label=None, name=None, metadata=None):
return self.manager.create_alarm(self.entity, self, notification_plan,
criteria=criteria, disabled=disabled, label=label, name=name,
metadata=metadata)
class CloudMonitorCheckType(BaseResource):
@property
def field_names(self):
return [field["name"] for field in self.fields]
@property
def required_field_names(self):
return [field["name"] for field in self.fields
if not field["optional"]]
@property
def optional_field_names(self):
return [field["name"] for field in self.fields
if field["optional"]]
class CloudMonitorZone(BaseResource):
@property
def name(self):
return self.label
class CloudMonitorNotification(BaseResource):
@property
def name(self):
return self.label
def update(self, details):
return self.manager.update_notification(self, details)
class CloudMonitorNotificationType(BaseResource):
@property
def name(self):
return self.label
class CloudMonitorNotificationPlan(BaseResource):
@property
def name(self):
return self.label
class CloudMonitorMetric(BaseResource):
pass
class CloudMonitorAlarm(BaseResource):
def __init__(self, manager, info, entity=None, key=None, loaded=False):
super(CloudMonitorAlarm, self).__init__(manager, info, key=key,
loaded=loaded)
if entity is None:
entity = info['entity_id']
if not isinstance(entity, CloudMonitorEntity):
entity = manager.entity_manager.get(entity)
self.entity = entity
|
Apache License 2.0
|
aiworx-labs/chocolate
|
chocolate/mo/pyhv.py
|
hypervolume
|
python
|
def hypervolume(pointset, ref):
hv = _HyperVolume(ref)
return hv.compute(pointset)
|
Python version of hypervolume computation.
.. note::
Use the wrapper from :mod:`chocolate.mo` module instead.
|
https://github.com/aiworx-labs/chocolate/blob/0ba4f6f0130eab851d32d5534241c8cac3f6666e/chocolate/mo/pyhv.py#L18-L26
|
from math import log, floor
import random
import warnings
import numpy
__all__ = ["hypervolume"]
warnings.simplefilter("once", ImportWarning)
warnings.warn("Using Python version of hypervolume module. Expect this to be slower.", ImportWarning)
|
BSD 3-Clause New or Revised License
|
nic30/hwt
|
hwt/interfaces/signalOps.py
|
SignalOps.__call__
|
python
|
def __call__(self, source, exclude=None, fit=False):
assert self._isAccessible, self
return self._sig(source, exclude=None, fit=fit)
|
connect this signal to driver
:attention: it is not call of function it is operator of assignment
:return: list of assignments
|
https://github.com/nic30/hwt/blob/db57819a4234d818d9bc00b927e5bc208195a530/hwt/interfaces/signalOps.py#L161-L169
|
from hwt.doc_markers import internal
class SignalOps(object):
def _auto_cast(self, toT):
return self._sig._auto_cast(toT)
@internal
def _convSign(self, signed):
return self._sig._convSign(signed)
def _signed(self):
return self._convSign(True)
def _unsigned(self):
return self._convSign(False)
def _vec(self):
return self._convSign(None)
def _reinterpret_cast(self, toT):
return self._sig._reinterpret_cast(toT)
def _onRisingEdge(self):
return self._sig._onRisingEdge()
def _onFallingEdge(self):
return self._sig._onFallingEdge()
def _isOn(self):
return self._sig._isOn()
def _eq(self, other):
return self._sig._eq(other)
def __ne__(self, other):
return self._sig.__ne__(other)
def __gt__(self, other):
return self._sig.__gt__(other)
def __lt__(self, other):
return self._sig.__lt__(other)
def __ge__(self, other):
return self._sig.__ge__(other)
def __le__(self, other):
return self._sig.__le__(other)
def __invert__(self):
return self._sig.__invert__()
def __and__(self, other):
return self._sig.__and__(other)
def __xor__(self, other):
return self._sig.__xor__(other)
def __or__(self, other):
return self._sig.__or__(other)
def __neg__(self):
return self._sig.__neg__()
def __add__(self, other):
return self._sig.__add__(other)
def __sub__(self, other):
return self._sig.__sub__(other)
def __mul__(self, other):
return self._sig.__mul__(other)
def __pow__(self, other):
return self._sig.__pow__(other)
def __mod__(self, other):
return self._sig.__mod__(other)
def __truediv__(self, other):
return self._sig.__truediv__(other)
def __floordiv__(self, other):
return self._sig.__floordiv__(other)
def __lshift__(self, other):
return self._sig.__lshift__(other)
def __rshift__(self, other):
return self._sig.__lshift__(other)
def _reversed(self):
return self._sig._reversed()
def _concat(self, *others):
return self._sig._concat(*others)
def __getitem__(self, key):
return self._sig.__getitem__(key)
def _ternary(self, ifTrue, ifFalse):
return self._sig._ternary(ifTrue, ifFalse)
|
MIT License
|
python-discord/sir-lancebot
|
bot/exts/fun/trivia_quiz.py
|
DynamicQuestionGen.taxonomic_rank
|
python
|
def taxonomic_rank(cls, q_format: str, a_format: str) -> QuizEntry:
level = random.randint(0, len(cls.TAXONOMIC_HIERARCHY) - 2)
question = q_format.format(cls.TAXONOMIC_HIERARCHY[level])
answer = a_format.format(cls.TAXONOMIC_HIERARCHY[level + 1])
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
|
Generate a question on taxonomic classification.
|
https://github.com/python-discord/sir-lancebot/blob/559e76ffbef7af85132d86f2e3ab8acf7e7f5eef/bot/exts/fun/trivia_quiz.py#L178-L185
|
import asyncio
import json
import logging
import operator
import random
import re
import string
from collections import defaultdict
from dataclasses import dataclass
from datetime import datetime, timedelta
from pathlib import Path
from typing import Callable, Optional
import discord
from discord.ext import commands, tasks
from rapidfuzz import fuzz
from bot.bot import Bot
from bot.constants import Client, Colours, NEGATIVE_REPLIES, Roles
logger = logging.getLogger(__name__)
DEFAULT_QUESTION_LIMIT = 7
STANDARD_VARIATION_TOLERANCE = 88
DYNAMICALLY_GEN_VARIATION_TOLERANCE = 97
MAX_ERROR_FETCH_TRIES = 3
WRONG_ANS_RESPONSE = [
"No one answered correctly!",
"Better luck next time...",
]
RULES = (
"No cheating and have fun!",
"Points for each question reduces by 25 after 10s or after a hint. Total time is 30s per question"
)
WIKI_FEED_API_URL = "https://en.wikipedia.org/api/rest_v1/feed/featured/{date}"
TRIVIA_QUIZ_ICON = (
"https://raw.githubusercontent.com/python-discord/branding/main/icons/trivia_quiz/trivia-quiz-dist.png"
)
@dataclass(frozen=True)
class QuizEntry:
question: str
answers: list[str]
var_tol: int
class DynamicQuestionGen:
N_PREFIX_STARTS_AT = 5
N_PREFIXES = [
"penta", "hexa", "hepta", "octa", "nona",
"deca", "hendeca", "dodeca", "trideca", "tetradeca",
]
PLANETS = [
("1st", "Mercury"),
("2nd", "Venus"),
("3rd", "Earth"),
("4th", "Mars"),
("5th", "Jupiter"),
("6th", "Saturn"),
("7th", "Uranus"),
("8th", "Neptune"),
]
TAXONOMIC_HIERARCHY = [
"species", "genus", "family", "order",
"class", "phylum", "kingdom", "domain",
]
UNITS_TO_BASE_UNITS = {
"hertz": ("(unit of frequency)", "s^-1"),
"newton": ("(unit of force)", "m*kg*s^-2"),
"pascal": ("(unit of pressure & stress)", "m^-1*kg*s^-2"),
"joule": ("(unit of energy & quantity of heat)", "m^2*kg*s^-2"),
"watt": ("(unit of power)", "m^2*kg*s^-3"),
"coulomb": ("(unit of electric charge & quantity of electricity)", "s*A"),
"volt": ("(unit of voltage & electromotive force)", "m^2*kg*s^-3*A^-1"),
"farad": ("(unit of capacitance)", "m^-2*kg^-1*s^4*A^2"),
"ohm": ("(unit of electric resistance)", "m^2*kg*s^-3*A^-2"),
"weber": ("(unit of magnetic flux)", "m^2*kg*s^-2*A^-1"),
"tesla": ("(unit of magnetic flux density)", "kg*s^-2*A^-1"),
}
@classmethod
def linear_system(cls, q_format: str, a_format: str) -> QuizEntry:
x, y = random.randint(2, 5), random.randint(2, 5)
answer = a_format.format(x, y)
coeffs = random.sample(range(1, 6), 4)
question = q_format.format(
coeffs[0],
coeffs[1],
coeffs[0] * x + coeffs[1] * y,
coeffs[2],
coeffs[3],
coeffs[2] * x + coeffs[3] * y,
)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def mod_arith(cls, q_format: str, a_format: str) -> QuizEntry:
quotient, m, b = random.randint(30, 40), random.randint(10, 20), random.randint(200, 350)
ans = random.randint(0, 9)
a = quotient * m + ans - b
question = q_format.format(a, b, m)
answer = a_format.format(ans)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def ngonal_prism(cls, q_format: str, a_format: str) -> QuizEntry:
n = random.randint(0, len(cls.N_PREFIXES) - 1)
question = q_format.format(cls.N_PREFIXES[n])
answer = a_format.format((n + cls.N_PREFIX_STARTS_AT) * 2)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def imag_sqrt(cls, q_format: str, a_format: str) -> QuizEntry:
ans_coeff = random.randint(3, 10)
question = q_format.format(ans_coeff ** 2)
answer = a_format.format(ans_coeff)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def binary_calc(cls, q_format: str, a_format: str) -> QuizEntry:
a = random.randint(15, 20)
b = random.randint(10, a)
oper = random.choice(
(
("+", operator.add),
("-", operator.sub),
("*", operator.mul),
)
)
if oper[0] == "*":
a -= 5
b -= 5
question = q_format.format(a, oper[0], b)
answer = a_format.format(oper[1](a, b))
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
def solar_system(cls, q_format: str, a_format: str) -> QuizEntry:
planet = random.choice(cls.PLANETS)
question = q_format.format(planet[0])
answer = a_format.format(planet[1])
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE)
@classmethod
|
MIT License
|
google/deluca
|
deluca/lung/envs/_balloon_lung.py
|
BalloonLung.dynamics
|
python
|
def dynamics(self, state, action):
volume, pressure = state["volume"], state["pressure"]
u_in, u_out = action
flow = jnp.clip(PropValve(u_in) * self.R, 0.0, 2.0)
flow -= jax.lax.cond(
pressure > self.peep_valve,
lambda x: jnp.clip(Solenoid(u_out), 0.0, 2.0) * 0.05 * pressure,
lambda x: 0.0,
flow,
)
volume += flow * self.dt
volume += jax.lax.cond(
self.leak,
lambda x: (self.dt / (5.0 + self.dt) * (self.min_volume - volume)),
lambda x: 0.0,
0.0,
)
r = (3.0 * volume / (4.0 * jnp.pi))**(1.0 / 3.0)
pressure = self.P0 + self.PC * (1.0 - (self.r0 / r)**6.0) / (
self.r0**2.0 * r)
return {"volume": volume, "pressure": pressure}
|
state: (volume, pressure)
action: (u_in, u_out)
|
https://github.com/google/deluca/blob/9fdcb9b382cae2ff9d8c7600469d2c6f1a128d1c/deluca/lung/envs/_balloon_lung.py#L95-L124
|
import jax
import jax.numpy as jnp
from deluca.lung.core import BreathWaveform
from deluca.lung.core import LungEnv
def PropValve(x):
y = 3.0 * x
flow_new = 1.0 * (jnp.tanh(0.03 * (y - 130)) + 1.0)
flow_new = jnp.clip(flow_new, 0.0, 1.72)
return flow_new
def Solenoid(x):
return x > 0
class BalloonLung(LungEnv):
def __init__(
self,
leak=False,
peep_valve=5.0,
PC=40.0,
P0=0.0,
C=10.0,
R=15.0,
dt=0.03,
waveform=None,
reward_fn=None,
):
self.viewer = None
self.min_volume = 1.5
self.C = C
self.R = R
self.PC = PC
self.P0 = 0.0
self.leak = leak
self.peep_valve = peep_valve
self.time = 0.0
self.dt = dt
self.waveform = waveform or BreathWaveform()
self.r0 = (3.0 * self.min_volume / (4.0 * jnp.pi))**(1.0 / 3.0)
self.reset()
def reset(self):
self.time = 0.0
self.target = self.waveform.at(self.time)
self.state = self.dynamics({
"volume": self.min_volume,
"pressure": -1.0
}, (0.0, 0.0))
return self.observation
@property
def observation(self):
return {
"measured": self.state["pressure"],
"target": self.target,
"dt": self.dt,
"phase": self.waveform.phase(self.time),
}
|
Apache License 2.0
|
thu-ml/zhusuan
|
zhusuan/utils.py
|
convert_to_int
|
python
|
def convert_to_int(x):
if isinstance(x, int):
return x
return None
|
Try to convert input to type int in python.
:param x: The input instance.
:return: A int if succeed, else None.
|
https://github.com/thu-ml/zhusuan/blob/4386b2a12ae4f4ed8e694e504e51d7dcdfd6f22a/zhusuan/utils.py#L199-L208
|
from __future__ import absolute_import
from __future__ import division
from functools import wraps
import tensorflow as tf
__all__ = [
'TensorArithmeticMixin',
'log_mean_exp',
'merge_dicts',
]
class TensorArithmeticMixin(object):
def __abs__(self):
return tf.abs(self)
def __neg__(self):
return tf.negative(self)
def __add__(self, other):
return tf.add(self, other)
def __radd__(self, other):
return tf.add(other, self)
def __sub__(self, other):
return tf.subtract(self, other)
def __rsub__(self, other):
return tf.subtract(other, self)
def __mul__(self, other):
return tf.multiply(self, other)
def __rmul__(self, other):
return tf.multiply(other, self)
def __div__(self, other):
return tf.divide(self, other)
def __rdiv__(self, other):
return tf.divide(other, self)
def __truediv__(self, other):
return tf.truediv(self, other)
def __rtruediv__(self, other):
return tf.truediv(other, self)
def __floordiv__(self, other):
return tf.floordiv(self, other)
def __rfloordiv__(self, other):
return tf.floordiv(other, self)
def __mod__(self, other):
return tf.mod(self, other)
def __rmod__(self, other):
return tf.mod(other, self)
def __pow__(self, other):
return tf.pow(self, other)
def __rpow__(self, other):
return tf.pow(other, self)
def __invert__(self):
return tf.logical_not(self)
def __and__(self, other):
return tf.logical_and(self, other)
def __rand__(self, other):
return tf.logical_and(other, self)
def __or__(self, other):
return tf.logical_or(self, other)
def __ror__(self, other):
return tf.logical_or(other, self)
def __xor__(self, other):
return tf.logical_xor(self, other)
def __rxor__(self, other):
return tf.logical_xor(other, self)
def __lt__(self, other):
return tf.less(self, other)
def __le__(self, other):
return tf.less_equal(self, other)
def __gt__(self, other):
return tf.greater(self, other)
def __ge__(self, other):
return tf.greater_equal(self, other)
def __getitem__(self, item):
return (tf.convert_to_tensor(self))[item]
def __hash__(self):
return id(self)
def __eq__(self, other):
return id(self) == id(other)
def __iter__(self):
raise TypeError(
"{} object is not iterable.".format(self.__class__.__name__))
def __bool__(self):
raise TypeError(
"Using a `{}` object as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.".format(self.__class__.__name__)
)
def __nonzero__(self):
raise TypeError(
"Using a `{}` object as a Python `bool` is not allowed. "
"Use `if t is not None:` instead of `if t:` to test if a "
"tensor is defined, and use TensorFlow ops such as "
"tf.cond to execute subgraphs conditioned on the value of "
"a tensor.".format(self.__class__.__name__)
)
def log_sum_exp(x, axis=None, keepdims=False):
x = tf.convert_to_tensor(x)
x_max = tf.reduce_max(x, axis=axis, keepdims=True)
ret = tf.log(tf.reduce_sum(tf.exp(x - x_max), axis=axis,
keepdims=True)) + x_max
if not keepdims:
ret = tf.reduce_sum(ret, axis=axis)
return ret
def log_mean_exp(x, axis=None, keepdims=False):
x = tf.convert_to_tensor(x)
x_max = tf.reduce_max(x, axis=axis, keepdims=True)
ret = tf.log(tf.reduce_mean(tf.exp(x - x_max), axis=axis,
keepdims=True)) + x_max
if not keepdims:
ret = tf.reduce_mean(ret, axis=axis)
return ret
|
MIT License
|
google-research/language
|
language/labs/consistent_zero_shot_nmt/models/agreement.py
|
base_agreement
|
python
|
def base_agreement(hparams):
hparams.add_hparam("enc_agreement_coeff", 0.0001)
hparams.add_hparam("enc_agreement_loss", "cosine")
hparams.add_hparam("enc_agreement_pool", True)
hparams.add_hparam("enc_agreement_enable_step", 100000)
hparams.add_hparam("aux_decode_length", 40)
hparams.add_hparam("dec_agreement_coeff", 0.001)
hparams.add_hparam("dec_agreement_loss_sparse", False)
hparams.add_hparam("dec_agreement_enable_step", 100000)
return hparams
|
Adds base hparams for AgreementMultilingualNmt.
|
https://github.com/google-research/language/blob/240cd2a1fd0307c6822b6f1f6c2abf1349a5a4da/language/labs/consistent_zero_shot_nmt/models/agreement.py#L534-L548
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from language.labs.consistent_zero_shot_nmt.data_generators import translate_multilingual
from language.labs.consistent_zero_shot_nmt.models import basic
from language.labs.consistent_zero_shot_nmt.models import losses
from language.labs.consistent_zero_shot_nmt.modules import decoders
from language.labs.consistent_zero_shot_nmt.modules import encoders
from language.labs.consistent_zero_shot_nmt.modules import language_models
from language.labs.consistent_zero_shot_nmt.utils import model_utils
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
from tensorflow.contrib import seq2seq as contrib_seq2seq
from tensorflow.contrib import training as contrib_training
__all__ = [
"AgreementMultilingualNmt",
"AgreementMultilingualNmtLm",
]
@registry.register_model
class AgreementMultilingualNmt(basic.BasicMultilingualNmt):
def _build_inputs_and_targets(
self, from_seqs=None, from_tags=None, to_seqs=None, to_tags=None):
del from_tags
if from_seqs is not None:
inputs = from_seqs
inputs_length = common_layers.length_from_embedding(inputs)
if to_tags is not None:
inputs = tf.concat([to_tags, inputs], axis=1)
inputs_length = inputs_length + 1
inputs = common_layers.flatten4d3d(inputs)
else:
inputs = None
inputs_length = None
if to_seqs is not None:
targets = common_layers.shift_right(to_seqs)
targets_length = common_layers.length_from_embedding(targets) + 1
targets = common_layers.flatten4d3d(targets)
else:
targets = None
targets_length = None
return (inputs, inputs_length), (targets, targets_length)
def _preprocess(self, features):
seqs, tags = {}, {}
if self._hparams.mode == tf.estimator.ModeKeys.TRAIN:
seqs["src"] = features["inputs"]
seqs["tgt"] = features["targets"]
seqs["aux"] = None
tags["src"] = features["input_tags"]
tags["tgt"] = features["target_tags"]
tags["aux"] = None
batch_size = common_layers.shape_list(features["all_tags"])[0]
num_all_tags = common_layers.shape_list(features["all_tags"])[1]
all_tags = features["all_tags"][0]
aux_tag_index = tf.multinomial(
tf.ones([1, num_all_tags]), batch_size,
output_dtype=tf.int32)[0]
tags["aux"] = tf.expand_dims(tf.gather(all_tags, aux_tag_index), 1)
from_domains = ["src", "src", "tgt"]
to_domains = ["tgt", "aux", "aux"]
else:
seqs["src"] = features["inputs"]
seqs["tgt"] = features["targets"]
tags["src"] = None
tags["tgt"] = features["target_tags"]
if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:
tags["tgt"] = tf.tile(tags["tgt"], [self._hparams.beam_width, 1, 1, 1])
from_domains = ["src"]
to_domains = ["tgt"]
inputs, targets = {}, {}
for fd, td in zip(from_domains, to_domains):
key = "%s>%s" % (fd, td)
inputs[key], targets[key] = self._build_inputs_and_targets(
seqs[fd], tags[fd], seqs[td], tags[td])
return inputs, targets
def _build_encoder_agreement_loss(self):
aux_keys = ["src>aux", "tgt>aux"]
for key in aux_keys:
if key not in self.enc_outputs:
encode_func = self.get_encode_func(*self.inputs[key])
self.enc_outputs[key] = encode_func()
with tf.name_scope("enc_agreement_loss"):
if self._hparams.enc_agreement_loss in {"cosine", "l2"}:
if self._hparams.enc_agreement_pool:
preproc_op_type = "max_pool"
else:
preproc_op_type = "truncate"
enc_src, enc_tgt = model_utils.make_sequences_compatible(
self.enc_outputs["src>aux"].outputs,
self.enc_outputs["tgt>aux"].outputs,
op_type=preproc_op_type)
if self._hparams.enc_agreement_loss == "cosine":
dist_fn = functools.partial(losses.cosine_distance, normalize=True)
else:
dist_fn = functools.partial(losses.l2_distance, normalize=True)
aux_loss_fn = losses.DistanceLoss(dist_fn)
aux_loss = aux_loss_fn(enc_src, enc_tgt)
elif self._hparams.enc_agreement_loss in {"xatt_cosine", "xatt_l2"}:
if self._hparams.enc_agreement_loss.endswith("cosine"):
dist_fn = functools.partial(losses.cosine_distance, normalize=True)
else:
dist_fn = functools.partial(losses.l2_distance, normalize=True)
aux_loss_fn = losses.CrossAttentionDistanceLoss(dist_fn=dist_fn)
aux_loss = aux_loss_fn(self.enc_outputs["src>aux"].outputs,
self.enc_outputs["tgt>aux"].outputs)
else:
raise ValueError("Unknown auxiliary loss: %s." %
self._hparams.enc_agreement_loss)
aux_loss = self._hparams.enc_agreement_coeff * aux_loss
return aux_loss
def _build_aux_sequences(self, target_embeddings, target_vocab_size,
central_lang_tag="<en>"):
aux_keys = ["src>aux", "tgt>aux"]
central_lang_id = translate_multilingual.get_tag_id(central_lang_tag)
self._is_central = {
"src>aux": tf.squeeze(
self._body_features["input_tags_raw"] == central_lang_id),
"tgt>aux": tf.squeeze(
self._body_features["target_tags_raw"] == central_lang_id)}
for key in aux_keys:
if key not in self.enc_outputs:
encode_func = self.get_encode_func(*self.inputs[key])
self.enc_outputs[key] = encode_func()
if key not in self.dec_outputs:
target_seqs, target_lens = self.targets[key]
hiddens = self.enc_outputs[key].outputs
hiddens_length = self.inputs[key][1]
enc_state = self.enc_outputs[key].final_state
decoder_hparams = contrib_training.HParams(auxiliary=True)
decode_func = self.get_decode_func(
target_embeddings,
target_seqs, target_lens,
hiddens, hiddens_length,
enc_state,
mode=self._hparams.mode,
decoder_hparams=decoder_hparams,
decoder_iterations=self._hparams.aux_decode_length)
self.dec_outputs[key] = decode_func()
self.dec_outputs[key]["logits"] = model_utils.build_logits(
sequences=tf.expand_dims(
self.dec_outputs[key]["rnn_output"], axis=2),
embeddings=target_embeddings,
vocab_size=target_vocab_size)
for element in self.dec_outputs[key]:
self.dec_outputs[key][element] = tf.where(
self._is_central[key],
tf.stop_gradient(self.dec_outputs[key][element]),
self.dec_outputs[key][element])
return aux_keys
def _build_decoder_agreement_loss(self, central_lang_tag="<en>"):
target_modality = self._problem_hparams.modality["targets"]
target_modality_scope = self._variable_scopes[target_modality.name]
target_embeddings = model_utils.get_embeddings(
modality=target_modality,
outer_scope=target_modality_scope,
inner_scope="shared")
target_vocab_size = target_modality._vocab_size
aux_keys = self._build_aux_sequences(
target_embeddings, target_vocab_size,
central_lang_tag=central_lang_tag)
aux_loss = 0.
with tf.name_scope("dec_agreement_loss"):
for key1, key2 in zip(aux_keys, aux_keys[::-1]):
targets = self.dec_outputs[key2]["rnn_output"]
targets_length = self.dec_outputs[key2]["length"]
shifted_targets = common_layers.shift_right_3d(targets)
hiddens = self.enc_outputs[key1].outputs
hiddens_length = self.inputs[key1][1]
enc_state = self.enc_outputs[key1].final_state
decode_func = self.get_decode_func(
target_embeddings,
shifted_targets, targets_length,
hiddens, hiddens_length,
enc_state,
mode=tf.estimator.ModeKeys.PREDICT,
decoder_iterations=self._hparams.aux_decode_length)
aux_dec_outputs = decode_func()
aux_logits_1 = model_utils.build_logits(
sequences=tf.expand_dims(
aux_dec_outputs["rnn_output"], axis=2),
embeddings=target_embeddings,
vocab_size=target_vocab_size)
aux_logits_1 = tf.where(
self._is_central[key1],
tf.stop_gradient(aux_logits_1),
aux_logits_1)
logits = tf.squeeze(aux_logits_1, axis=2)
if self._hparams.dec_agreement_loss_sparse:
target_ids = self.dec_outputs[key2]["sample_id"]
aux_loss = aux_loss + losses.CrossEntropyLoss(sparse=True)(
logits, target_ids, targets_length)
else:
aux_logits_2 = tf.squeeze(self.dec_outputs[key2]["logits"], axis=2)
target_probs = tf.nn.softmax(aux_logits_2, axis=-1)
aux_loss = aux_loss + losses.CrossEntropyLoss(sparse=False)(
logits, target_probs, targets_length)
aux_loss = self._hparams.dec_agreement_coeff * aux_loss
return aux_loss
def get_encode_func(self, inputs, inputs_length):
def encode_func():
return self.encoder(
inputs=inputs,
inputs_length=inputs_length,
mode=self._hparams.mode,
hparams=self._hparams,
reuse=tf.AUTO_REUSE)
return encode_func
def get_decode_func(self, embeddings,
inputs, inputs_length,
hiddens, hiddens_length,
enc_state,
mode=None,
decoder_hparams=None,
impute_finished=False,
decoder_iterations=None):
def decode_func():
dec_outputs, _, dec_lengths = contrib_seq2seq.dynamic_decode(
decoder=self.decoder(
embeddings=embeddings,
inputs=inputs,
inputs_length=inputs_length,
hiddens=hiddens,
hiddens_length=hiddens_length,
enc_state=enc_state,
mode=mode,
hparams=self._hparams,
decoder_hparams=decoder_hparams,
reuse=tf.AUTO_REUSE),
impute_finished=impute_finished,
maximum_iterations=decoder_iterations)
return {
"rnn_output": dec_outputs.rnn_output,
"sample_id": dec_outputs.sample_id,
"length": dec_lengths}
return decode_func
def body(self, features):
self._body_features = features
self.inputs, self.targets = self._preprocess(features)
batch_size = common_layers.shape_list(features["inputs"])[0]
global_step = model_utils.get_global_step(self._hparams)
key = "src>tgt"
self.enc_outputs = {}
self.encoder = encoders.get(self._hparams.encoder_type)
encode_func = self.get_encode_func(*self.inputs[key])
self.enc_outputs[key] = encode_func()
target_modality = self._problem_hparams.modality["targets"]
target_modality_scope = self._variable_scopes[target_modality.name]
target_embeddings = model_utils.get_embeddings(
modality=target_modality,
outer_scope=target_modality_scope,
inner_scope="shared")
key = "src>tgt"
self.decoders = {}
self.dec_outputs = {}
self.decoder = decoders.get(self._hparams.decoder_type)
target_seqs, target_lens = self.targets[key]
hiddens = self.enc_outputs[key].outputs
hiddens_length = self.inputs[key][1]
enc_state = self.enc_outputs[key].final_state
decode_func = self.get_decode_func(
target_embeddings,
target_seqs, target_lens,
hiddens, hiddens_length,
enc_state,
mode=self._hparams.mode)
self.dec_outputs[key] = decode_func()
outputs = tf.expand_dims(self.dec_outputs[key]["rnn_output"], axis=2)
aux_losses = {}
if self._hparams.mode == tf.estimator.ModeKeys.TRAIN:
if self._hparams.enc_agreement_coeff > 0:
aux_losses["agreement_enc"] = tf.cond(
global_step > self._hparams.enc_agreement_enable_step,
self._build_encoder_agreement_loss,
lambda: tf.zeros([batch_size]))
if self._hparams.dec_agreement_coeff > 0:
aux_losses["agreement_dec"] = tf.cond(
global_step > self._hparams.dec_agreement_enable_step,
self._build_decoder_agreement_loss,
lambda: tf.zeros([batch_size]))
return outputs, aux_losses
@registry.register_model
class AgreementMultilingualNmtLm(AgreementMultilingualNmt):
def _build_lm_inputs(self, features):
targets = features["targets"]
target_tags = features["target_tags"]
if self._hparams.mode == tf.estimator.ModeKeys.PREDICT:
target_tags = tf.tile(target_tags, [self._hparams.beam_width, 1, 1, 1])
inputs = common_layers.shift_right(targets, pad_value=target_tags)
inputs_length = common_layers.length_from_embedding(targets) + 1
inputs = common_layers.flatten4d3d(inputs)
return inputs, inputs_length
def _build_decoder_lm_loss(self, central_lang_tag="<en>"):
target_modality = self._problem_hparams.modality["targets"]
target_modality_scope = self._variable_scopes[target_modality.name]
target_embeddings = model_utils.get_embeddings(
modality=target_modality,
outer_scope=target_modality_scope,
inner_scope="shared")
target_vocab_size = target_modality._vocab_size
aux_keys = self._build_aux_sequences(
target_embeddings, target_vocab_size,
central_lang_tag=central_lang_tag)
target_embeddings = tf.stop_gradient(target_embeddings)
aux_loss = 0.
with tf.name_scope("aux_lm_loss"):
for key in aux_keys:
dec_outputs = tf.expand_dims(
self.dec_outputs[key]["rnn_output"], axis=2)
dec_output_tags = tf.expand_dims(
self.inputs[key][0][:, :1], axis=2)
dec_lengths = self.dec_outputs[key]["length"]
lm_features = {
"targets": dec_outputs,
"target_tags": dec_output_tags}
inputs, inputs_length = self._build_lm_inputs(lm_features)
lm_outputs = self.language_model(
inputs=inputs,
inputs_length=inputs_length,
mode=tf.estimator.ModeKeys.PREDICT,
hparams=self._hparams,
trainable=False,
reuse=tf.AUTO_REUSE)
lm_logits = model_utils.build_logits(
sequences=tf.expand_dims(lm_outputs, axis=2),
embeddings=target_embeddings,
vocab_size=target_vocab_size)
dec_logits = model_utils.build_logits(
sequences=dec_outputs,
embeddings=target_embeddings,
vocab_size=target_vocab_size)
dec_probs = tf.nn.softmax(dec_logits, axis=-1)
aux_loss = aux_loss + losses.CrossEntropyLoss(sparse=False)(
lm_logits, dec_probs, dec_lengths)
aux_loss = self._hparams.lm_loss_coeff * aux_loss
return aux_loss
def body(self, features):
if self._hparams.lm_do_train:
inputs, inputs_length = self._build_lm_inputs(features)
self.language_model = language_models.get(self._hparams.lm_type)
lm_outputs = self.language_model(
inputs=inputs,
inputs_length=inputs_length,
mode=self._hparams.mode,
hparams=self._hparams)
outputs = tf.expand_dims(lm_outputs, axis=2)
aux_losses = {}
else:
nmt_body = super(AgreementMultilingualNmtLm, self).body
outputs, aux_losses = nmt_body(features)
if self._hparams.mode == tf.estimator.ModeKeys.TRAIN:
if self._hparams.lm_loss_coeff > 0:
self.language_model = language_models.get(self._hparams.lm_type)
batch_size = common_layers.shape_list(features["inputs"])[0]
global_step = model_utils.get_global_step(self._hparams)
aux_losses["language_model"] = tf.cond(
global_step > self._hparams.lm_loss_enable_step,
self._build_decoder_lm_loss,
lambda: tf.zeros([batch_size]))
return outputs, aux_losses
|
Apache License 2.0
|
neuralmagic/sparsezoo
|
src/sparsezoo/utils/numpy.py
|
tensor_export
|
python
|
def tensor_export(
tensor: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
export_dir: str,
name: str,
npz: bool = True,
) -> str:
create_dirs(export_dir)
export_path = os.path.join(export_dir, f"{name}.{'npz' if npz else 'npy'}")
if isinstance(tensor, numpy.ndarray) and npz:
numpy.savez_compressed(export_path, tensor)
elif isinstance(tensor, numpy.ndarray):
numpy.save(export_path, tensor)
elif isinstance(tensor, Dict) and npz:
numpy.savez_compressed(export_path, **tensor)
elif isinstance(tensor, Dict):
raise ValueError("tensor dictionaries can only be saved as npz")
elif isinstance(tensor, Iterable) and npz:
numpy.savez_compressed(export_path, *tensor)
elif isinstance(tensor, Iterable):
raise ValueError("tensor iterables can only be saved as npz")
else:
raise ValueError(f"unknown type give for tensor {tensor}")
return export_path
|
:param tensor: tensor to export to a saved numpy array file
:param export_dir: the directory to export the file in
:param name: the name of the file, .npy will be appended to it
:param npz: True to export as an npz file, False otherwise
:return: the path of the numpy file the tensor was exported to
|
https://github.com/neuralmagic/sparsezoo/blob/ff43990c3c492e6bbc876158a6a33907d2b4b667/src/sparsezoo/utils/numpy.py#L268-L299
|
import glob
import logging
import os
import tarfile
from collections import OrderedDict
from io import BytesIO
from typing import Dict, Iterable, List, Union
import numpy
from sparsezoo.utils.helpers import clean_path, create_dirs
__all__ = [
"NDARRAY_KEY",
"load_numpy",
"save_numpy",
"load_numpy_list",
"NumpyArrayBatcher",
"tensor_export",
"tensors_export",
]
NDARRAY_KEY = "ndarray"
_LOGGER = logging.getLogger(__name__)
def _fix_loaded_numpy(array) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]:
if not isinstance(array, numpy.ndarray):
tmp_arrray = array
array = OrderedDict()
for key, val in tmp_arrray.items():
array[key] = val
return array
def load_numpy(file_path: str) -> Union[numpy.ndarray, Dict[str, numpy.ndarray]]:
file_path = clean_path(file_path)
array = numpy.load(file_path)
return _fix_loaded_numpy(array)
def save_numpy(
array: Union[numpy.ndarray, Dict[str, numpy.ndarray], Iterable[numpy.ndarray]],
export_dir: str,
name: str,
npz: bool = True,
):
create_dirs(export_dir)
export_path = os.path.join(export_dir, f"{name}.{'npz' if npz else 'npy'}")
if isinstance(array, numpy.ndarray) and npz:
numpy.savez_compressed(export_path, array)
elif isinstance(array, numpy.ndarray):
numpy.save(export_path, array)
elif isinstance(array, Dict) and npz:
numpy.savez_compressed(export_path, **array)
elif isinstance(array, Dict):
raise ValueError("Dict can only be exported to an npz file")
elif isinstance(array, Iterable) and npz:
numpy.savez_compressed(export_path, *[val for val in array])
elif isinstance(array, Iterable):
raise ValueError("Iterable can only be exported to an npz file")
else:
raise ValueError(f"Unrecognized type given for array {array}")
return export_path
def load_numpy_from_tar(
path: str,
) -> List[Union[numpy.ndarray, Dict[str, numpy.ndarray]]]:
tar = tarfile.open(path, "r")
files = tar.getmembers()
files = sorted([file.name for file in files])
data = []
for file in files:
extracted = BytesIO()
extracted.write(tar.extractfile(file).read())
extracted.seek(0)
array = numpy.load(extracted)
data.append(_fix_loaded_numpy(array))
return data
def load_numpy_list(
data: Union[str, Iterable[Union[str, numpy.ndarray, Dict[str, numpy.ndarray]]]],
) -> List[Union[numpy.ndarray, Dict[str, numpy.ndarray]]]:
loaded = []
if isinstance(data, str):
if os.path.isfile(data) and tarfile.is_tarfile(data):
data = load_numpy_from_tar(data)
elif os.path.isfile(data) and ".np" in data:
data = [load_numpy(data)]
else:
glob_path = os.path.join(data, "*") if os.path.isdir(data) else data
data = sorted(glob.glob(glob_path))
for dat in data:
if isinstance(dat, str):
dat = load_numpy(dat)
loaded.append(dat)
return loaded
class NumpyArrayBatcher(object):
def __init__(self):
self._items = OrderedDict()
self._batch_index = None
def __len__(self):
if len(self._items) == 0:
return 0
return len(self._items[list(self._items.keys())[0]])
def append(self, item: Union[numpy.ndarray, Dict[str, numpy.ndarray]]):
if len(self) < 1 and isinstance(item, numpy.ndarray):
self._items[NDARRAY_KEY] = [item]
elif len(self) < 1:
for key, val in item.items():
self._items[key] = [val]
elif isinstance(item, numpy.ndarray):
if self._batch_index is None:
self._batch_index = {NDARRAY_KEY: 0}
if NDARRAY_KEY not in self._items:
raise ValueError(
"numpy ndarray passed for item, but prev_batch does not contain one"
)
if item.shape != self._items[NDARRAY_KEY][0].shape:
self._batch_index[NDARRAY_KEY] = 1
if item.shape != self._items[NDARRAY_KEY][0].shape and (
item.shape[0] != self._items[NDARRAY_KEY][0].shape[0]
or item.shape[2:] != self._items[NDARRAY_KEY][0].shape[2:]
):
raise ValueError(
(
f"item of numpy ndarray of shape {item.shape} does not "
f"match the current batch shape of "
f"{self._items[NDARRAY_KEY][0].shape}"
)
)
self._items[NDARRAY_KEY].append(item)
else:
diff_keys = list(set(item.keys()) - set(self._items.keys()))
if len(diff_keys) > 0:
raise ValueError(
(
f"numpy dict passed for item, not all keys match "
f"with the prev_batch. difference: {diff_keys}"
)
)
if self._batch_index is None:
self._batch_index = {key: 0 for key in item}
for key, val in item.items():
if val.shape != self._items[key][0].shape:
self._batch_index[key] = 1
if val.shape != self._items[key][0].shape and (
val.shape[0] != self._items[key][0].shape[0]
or val.shape[2:] != self._items[key][0].shape[2:]
):
raise ValueError(
(
f"item with key {key} of shape {val.shape} does not "
f"match the current batch shape of "
f"{self._items[key][0].shape}"
)
)
self._items[key].append(val)
def stack(
self, as_list: bool = False
) -> Union[List[numpy.ndarray], Dict[str, numpy.ndarray]]:
batch_dict = OrderedDict()
for key, val in self._items.items():
if self._batch_index is None or self._batch_index[key] == 0:
batch_dict[key] = numpy.stack(val)
else:
batch_dict[key] = numpy.concatenate(val, axis=self._batch_index[key])
return batch_dict if not as_list else list(batch_dict.values())
|
Apache License 2.0
|
bfontaine/clj
|
clj/seqs.py
|
rest
|
python
|
def rest(coll):
return drop(1, coll)
|
Returns a possibly empty generator of the items after the first.
|
https://github.com/bfontaine/clj/blob/6f691f2367d69c7fd5908821e3dc630ef2034997/clj/seqs.py#L185-L189
|
import random
import collections
import itertools
_nil = object()
try:
_range = xrange
_filterfalse = itertools.ifilterfalse
except NameError:
_range = range
_filterfalse = itertools.filterfalse
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
def _is_collection_abc(x):
return isinstance(x, collections_abc.Sized) and isinstance(x, collections_abc.Iterable)
def _make_gen(g):
for e in g:
yield e
def distinct(coll):
seen = set()
for e in coll:
if e not in seen:
seen.add(e)
yield e
if isinstance(filter(lambda e: e, []), list):
def filter(f, coll):
for e in coll:
if f(e):
yield e
else:
filter = filter
def remove(pred, coll):
return _filterfalse(pred, coll)
def keep(f, coll):
return keep_indexed(lambda _, e: f(e), coll)
def keep_indexed(f, coll):
for i, e in enumerate(coll):
res = f(i, e)
if res is not None:
yield res
def cons(x, seq):
yield x
for e in seq:
yield e
def concat(*xs):
return itertools.chain(*xs)
if isinstance(map(lambda e: e, []), list):
def map(f, *colls):
for xs in zip(*colls):
yield f(*xs)
else:
map = map
def mapcat(f, *colls):
for coll in map(f, *colls):
for e in coll:
yield e
def cycle(coll):
els = []
for e in coll:
yield e
els.append(e)
while True:
for e in els:
yield e
def interleave(*colls):
iterators = [iter(coll) for coll in colls]
try:
while True:
values = [next(it) for it in iterators]
for v in values:
yield v
except StopIteration:
pass
def interpose(sep, coll):
first_ = True
for e in coll:
if first_:
first_ = False
else:
yield sep
yield e
|
MIT License
|
ducksboard/libsaas
|
libsaas/services/twilio/usage.py
|
Records.daily
|
python
|
def daily(self):
return RecordsDaily(self)
|
Return multiple usage records for each usage category,
each representing usage over a daily time-interval.
|
https://github.com/ducksboard/libsaas/blob/615981a3336f65be9d51ae95a48aed9ad3bd1c3c/libsaas/services/twilio/usage.py#L105-L110
|
from libsaas import http, parsers
from libsaas.services import base
from libsaas.services.twilio import resource
class RecordsBase(resource.TwilioResource):
path = 'Records'
@base.apimethod
def get(self, Category=None, StartDate=None, EndDate=None,
Page=None, PageSize=None, AfterSid=None):
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class RecordsDaily(RecordsBase):
path = 'Daily'
class RecordsMonthly(RecordsBase):
path = 'Monthly'
class RecordsYearly(RecordsBase):
path = 'Yearly'
class RecordsAllTime(RecordsBase):
path = 'AllTime'
class RecordsToday(RecordsBase):
path = 'Today'
class RecordsYesterday(RecordsBase):
path = 'Yesterday'
class RecordsThisMonth(RecordsBase):
path = 'ThisMonth'
class RecordsLastMonth(RecordsBase):
path = 'LastMonth'
class Records(RecordsBase):
@base.resource(RecordsDaily)
|
MIT License
|
globocom/gcloud-utils
|
gcloud_utils/storage.py
|
Storage.upload_path
|
python
|
def upload_path(self, storage_path_base, local_path_base):
for root, _, files in os.walk(local_path_base):
for file_to_upload in files:
full_path_upload = os.path.join(root, file_to_upload)
storage_path = os.path.join(storage_path_base, file_to_upload)
self.upload_file(storage_path, full_path_upload)
|
Upload all filer from local path to Storage
|
https://github.com/globocom/gcloud-utils/blob/9b01b2ae6ce42243e590ebe03d0ec36cb86716ce/gcloud_utils/storage.py#L79-L85
|
import os
import logging
from google.cloud import storage
from gcloud_utils.base_client import BaseClient
def _filter_suffix_files(blobs, suffix):
return [x for x in blobs if x.name.endswith(suffix)]
def _prepare_path(path):
if not os.path.exists(path):
os.makedirs(path)
return path
class Storage(BaseClient):
_MODEL_CLIENT = storage
def __init__(self, bucket, client=None, log_level=logging.ERROR):
super(Storage, self).__init__(client, log_level)
self._bucket = self._client.get_bucket(bucket)
def download_file(self, storage_path, local_path):
obj = self._bucket.get_blob(storage_path)
local_file_full_path = os.path.join(local_path, obj.name)
_prepare_path(os.path.dirname(local_file_full_path))
with open(local_file_full_path, 'wb') as local_file:
obj.download_to_file(local_file)
return local_file_full_path
def download_files(self, path, local_path, filter_suffix=None):
list_paths = self.list_files(path, filter_suffix=filter_suffix)
for path_to_download in list_paths:
self.download_file(path_to_download.name, local_path)
def get_abs_path(self, storage_path):
bucket_path = "gs://{}/".format(self._bucket.name)
return os.path.join(bucket_path, storage_path)
def get_file(self, file_path, local_path):
self.logger.debug("Download file...")
full_path = self.download_file(file_path, local_path)
return open(full_path)
def get_files_in_path(self, path, local_path):
files = self.list_files(path)
result = []
for file_blob in files:
result.append(self.get_file(file_blob.name,
"{}/{}".format(local_path, file_blob.name)))
return result
def list_files(self, path, filter_suffix=None):
blobs = self._bucket.list_blobs(prefix=path)
blobs_files = [x for x in blobs if not x.name.endswith("/")]
if filter_suffix is not None:
return _filter_suffix_files(blobs_files, filter_suffix)
return blobs_files
def path_exists_storage(self, path):
return self._bucket.blob(path).exists()
def upload_file(self, storage_path, local_path):
with open(local_path) as loc:
self.logger.debug("Upload file %s to %s", local_path, storage_path)
self._bucket.blob(storage_path).upload_from_file(loc)
|
Apache License 2.0
|
rhming/unicomdailytask
|
jsonpickle/unpickler.py
|
loadrepr
|
python
|
def loadrepr(reprstr):
module, evalstr = reprstr.split('/')
mylocals = locals()
localname = module
if '.' in localname:
localname = module.split('.', 1)[0]
mylocals[localname] = __import__(module)
return eval(evalstr)
|
Returns an instance of the object from the object's repr() string.
It involves the dynamic specification of code.
>>> obj = loadrepr('datetime/datetime.datetime.now()')
>>> obj.__class__.__name__
'datetime'
|
https://github.com/rhming/unicomdailytask/blob/542cfe86fe010748fc15b0eff45f16673c0f2b62/jsonpickle/unpickler.py#L750-L765
|
from __future__ import absolute_import, division, unicode_literals
import quopri
import sys
from . import compat
from . import util
from . import tags
from . import handlers
from .compat import numeric_types
from .backend import json
def decode(
string, backend=None, context=None, keys=False, reset=True, safe=False, classes=None
):
backend = backend or json
context = context or Unpickler(keys=keys, backend=backend, safe=safe)
data = backend.decode(string)
return context.restore(data, reset=reset, classes=classes)
def _safe_hasattr(obj, attr):
try:
object.__getattribute__(obj, attr)
return True
except AttributeError:
return False
def _is_json_key(key):
return isinstance(key, compat.string_types) and key.startswith(tags.JSON_KEY)
class _Proxy(object):
def __init__(self):
self.instance = None
def get(self):
return self.instance
def reset(self, instance):
self.instance = instance
class _IDProxy(_Proxy):
def __init__(self, objs, index):
self._index = index
self._objs = objs
def get(self):
return self._objs[self._index]
def _obj_setattr(obj, attr, proxy):
setattr(obj, attr, proxy.get())
def _obj_setvalue(obj, idx, proxy):
obj[idx] = proxy.get()
class Unpickler(object):
def __init__(self, backend=None, keys=False, safe=False):
self.backend = backend or json
self.keys = keys
self.safe = safe
self.reset()
def reset(self):
self._namedict = {}
self._namestack = []
self._obj_to_idx = {}
self._objs = []
self._proxies = []
self._classes = {}
def restore(self, obj, reset=True, classes=None):
if reset:
self.reset()
if classes:
self.register_classes(classes)
value = self._restore(obj)
if reset:
self._swap_proxies()
return value
def register_classes(self, classes):
if isinstance(classes, (list, tuple, set)):
for cls in classes:
self.register_classes(cls)
else:
self._classes[util.importable_name(classes)] = classes
def _swap_proxies(self):
for (obj, attr, proxy, method) in self._proxies:
method(obj, attr, proxy)
self._proxies = []
def _restore(self, obj):
if not isinstance(obj, (str, list, dict, set, tuple)):
def restore(x):
return x
else:
restore = self._restore_tags(obj)
return restore(obj)
def _restore_tags(self, obj):
try:
if not tags.RESERVED <= set(obj) and not type(obj) in (list, dict):
def restore(x):
return x
return restore
except TypeError:
pass
if has_tag(obj, tags.B64):
restore = self._restore_base64
elif has_tag(obj, tags.B85):
restore = self._restore_base85
elif has_tag(obj, tags.ID):
restore = self._restore_id
elif has_tag(obj, tags.ITERATOR):
restore = self._restore_iterator
elif has_tag(obj, tags.TYPE):
restore = self._restore_type
elif has_tag(obj, tags.REDUCE):
restore = self._restore_reduce
elif has_tag(obj, tags.OBJECT):
restore = self._restore_object
elif has_tag(obj, tags.FUNCTION):
restore = self._restore_function
elif has_tag(obj, tags.BYTES):
restore = self._restore_quopri
elif has_tag(obj, tags.REF):
restore = self._restore_ref
elif has_tag(obj, tags.REPR):
restore = self._restore_repr
elif util.is_list(obj):
restore = self._restore_list
elif has_tag(obj, tags.TUPLE):
restore = self._restore_tuple
elif has_tag(obj, tags.SET):
restore = self._restore_set
elif util.is_dictionary(obj):
restore = self._restore_dict
else:
def restore(x):
return x
return restore
def _restore_base64(self, obj):
return util.b64decode(obj[tags.B64].encode('utf-8'))
def _restore_base85(self, obj):
return util.b85decode(obj[tags.B85].encode('utf-8'))
def _restore_quopri(self, obj):
return quopri.decodestring(obj[tags.BYTES].encode('utf-8'))
def _restore_iterator(self, obj):
return iter(self._restore_list(obj[tags.ITERATOR]))
def _restore_reduce(self, obj):
proxy = _Proxy()
self._mkref(proxy)
reduce_val = list(map(self._restore, obj[tags.REDUCE]))
if len(reduce_val) < 5:
reduce_val.extend([None] * (5 - len(reduce_val)))
f, args, state, listitems, dictitems = reduce_val
if f == tags.NEWOBJ or getattr(f, '__name__', '') == '__newobj__':
cls = args[0]
if not isinstance(cls, type):
cls = self._restore(cls)
stage1 = cls.__new__(cls, *args[1:])
else:
stage1 = f(*args)
if state:
try:
stage1.__setstate__(state)
except AttributeError:
try:
for k, v in stage1.__dict__.items():
state.setdefault(k, v)
stage1.__dict__ = state
except AttributeError:
try:
for k, v in state.items():
setattr(stage1, k, v)
except Exception:
dict_state, slots_state = state
if dict_state:
stage1.__dict__.update(dict_state)
if slots_state:
for k, v in slots_state.items():
setattr(stage1, k, v)
if listitems:
try:
stage1.extend(listitems)
except AttributeError:
for x in listitems:
stage1.append(x)
if dictitems:
for k, v in dictitems:
stage1.__setitem__(k, v)
proxy.reset(stage1)
self._swapref(proxy, stage1)
return stage1
def _restore_id(self, obj):
try:
idx = obj[tags.ID]
return self._objs[idx]
except IndexError:
return _IDProxy(self._objs, idx)
def _restore_ref(self, obj):
return self._namedict.get(obj[tags.REF])
def _restore_type(self, obj):
typeref = loadclass(obj[tags.TYPE], classes=self._classes)
if typeref is None:
return obj
return typeref
def _restore_repr(self, obj):
if self.safe:
return None
obj = loadrepr(obj[tags.REPR])
return self._mkref(obj)
def _restore_object(self, obj):
class_name = obj[tags.OBJECT]
cls = loadclass(class_name, classes=self._classes)
handler = handlers.get(cls, handlers.get(class_name))
if handler is not None:
proxy = _Proxy()
self._mkref(proxy)
instance = handler(self).restore(obj)
proxy.reset(instance)
self._swapref(proxy, instance)
return instance
if cls is None:
return self._mkref(obj)
return self._restore_object_instance(obj, cls)
def _restore_function(self, obj):
return loadclass(obj[tags.FUNCTION], classes=self._classes)
def _loadfactory(self, obj):
try:
default_factory = obj['default_factory']
except KeyError:
return None
del obj['default_factory']
return self._restore(default_factory)
def _restore_object_instance(self, obj, cls):
proxy = _Proxy()
self._mkref(proxy)
factory = self._loadfactory(obj)
if has_tag(obj, tags.NEWARGSEX):
args, kwargs = obj[tags.NEWARGSEX]
else:
args = getargs(obj, classes=self._classes)
kwargs = {}
if args:
args = self._restore(args)
if kwargs:
kwargs = self._restore(kwargs)
is_oldstyle = not (isinstance(cls, type) or getattr(cls, '__meta__', None))
try:
if (not is_oldstyle) and hasattr(cls, '__new__'):
if factory:
instance = cls.__new__(cls, factory, *args, **kwargs)
instance.default_factory = factory
else:
instance = cls.__new__(cls, *args, **kwargs)
else:
instance = object.__new__(cls)
except TypeError:
is_oldstyle = True
if is_oldstyle:
try:
instance = cls(*args)
except TypeError:
try:
instance = make_blank_classic(cls)
except Exception:
return self._mkref(obj)
proxy.reset(instance)
self._swapref(proxy, instance)
if isinstance(instance, tuple):
return instance
instance = self._restore_object_instance_variables(obj, instance)
if _safe_hasattr(instance, 'default_factory') and isinstance(
instance.default_factory, _Proxy
):
instance.default_factory = instance.default_factory.get()
return instance
def _restore_from_dict(self, obj, instance, ignorereserved=True):
restore_key = self._restore_key_fn()
method = _obj_setattr
deferred = {}
for k, v in util.items(obj):
if ignorereserved and k in tags.RESERVED:
continue
if isinstance(k, numeric_types):
str_k = k.__str__()
else:
str_k = k
self._namestack.append(str_k)
k = restore_key(k)
value = self._restore(v)
if util.is_noncomplex(instance) or util.is_dictionary_subclass(instance):
try:
if k == '__dict__':
setattr(instance, k, value)
else:
instance[k] = value
except TypeError:
if k != '__dict__':
deferred[k] = value
self._namestack.pop()
continue
else:
setattr(instance, k, value)
if isinstance(value, _Proxy):
self._proxies.append((instance, k, value, method))
self._namestack.pop()
if deferred:
instance = instance.__class__(deferred)
return instance
def _restore_object_instance_variables(self, obj, instance):
instance = self._restore_from_dict(obj, instance)
if has_tag(obj, tags.SEQ):
if hasattr(instance, 'append'):
for v in obj[tags.SEQ]:
instance.append(self._restore(v))
elif hasattr(instance, 'add'):
for v in obj[tags.SEQ]:
instance.add(self._restore(v))
if has_tag(obj, tags.STATE):
instance = self._restore_state(obj, instance)
return instance
def _restore_state(self, obj, instance):
state = self._restore(obj[tags.STATE])
has_slots = (
isinstance(state, tuple) and len(state) == 2 and isinstance(state[1], dict)
)
has_slots_and_dict = has_slots and isinstance(state[0], dict)
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
elif isinstance(state, dict):
instance = self._restore_from_dict(state, instance, ignorereserved=False)
elif has_slots:
instance = self._restore_from_dict(state[1], instance, ignorereserved=False)
if has_slots_and_dict:
instance = self._restore_from_dict(
state[0], instance, ignorereserved=False
)
elif not hasattr(instance, '__getnewargs__') and not hasattr(
instance, '__getnewargs_ex__'
):
instance = state
return instance
def _restore_list(self, obj):
parent = []
self._mkref(parent)
children = [self._restore(v) for v in obj]
parent.extend(children)
method = _obj_setvalue
proxies = [
(parent, idx, value, method)
for idx, value in enumerate(parent)
if isinstance(value, _Proxy)
]
self._proxies.extend(proxies)
return parent
def _restore_tuple(self, obj):
return tuple([self._restore(v) for v in obj[tags.TUPLE]])
def _restore_set(self, obj):
return {self._restore(v) for v in obj[tags.SET]}
def _restore_dict(self, obj):
data = {}
self._mkref(data)
if self.keys:
for k, v in util.items(obj):
if _is_json_key(k):
continue
if isinstance(k, numeric_types):
str_k = k.__str__()
else:
str_k = k
self._namestack.append(str_k)
data[k] = self._restore(v)
self._namestack.pop()
for k, v in util.items(obj):
if not _is_json_key(k):
continue
self._namestack.append(k)
k = self._restore_pickled_key(k)
data[k] = result = self._restore(v)
if isinstance(result, _Proxy):
self._proxies.append((data, k, result, _obj_setvalue))
self._namestack.pop()
else:
for k, v in util.items(obj):
if isinstance(k, numeric_types):
str_k = k.__str__()
else:
str_k = k
self._namestack.append(str_k)
data[k] = self._restore(v)
self._namestack.pop()
return data
def _restore_key_fn(self):
if self.keys:
restore_key = self._restore_pickled_key
else:
def restore_key(key):
return key
return restore_key
def _restore_pickled_key(self, key):
if _is_json_key(key):
key = decode(
key[len(tags.JSON_KEY) :],
backend=self.backend,
context=self,
keys=True,
reset=False,
)
return key
def _refname(self):
return '/' + '/'.join(self._namestack)
def _mkref(self, obj):
obj_id = id(obj)
try:
self._obj_to_idx[obj_id]
except KeyError:
self._obj_to_idx[obj_id] = len(self._objs)
self._objs.append(obj)
self._namedict[self._refname()] = obj
return obj
def _swapref(self, proxy, instance):
proxy_id = id(proxy)
instance_id = id(instance)
instance_index = self._obj_to_idx[proxy_id]
self._obj_to_idx[instance_id] = instance_index
del self._obj_to_idx[proxy_id]
self._objs[instance_index] = instance
self._namedict[self._refname()] = instance
def loadclass(module_and_name, classes=None):
if classes:
try:
return classes[module_and_name]
except KeyError:
pass
names = module_and_name.split('.')
for up_to in range(len(names) - 1, 0, -1):
module = util.untranslate_module_name('.'.join(names[:up_to]))
try:
__import__(module)
obj = sys.modules[module]
for class_name in names[up_to:]:
obj = getattr(obj, class_name)
return obj
except (AttributeError, ImportError, ValueError):
continue
return None
def getargs(obj, classes=None):
if has_tag(obj, tags.NEWARGSEX):
raise ValueError("__newargs_ex__ returns both args and kwargs")
if has_tag(obj, tags.NEWARGS):
return obj[tags.NEWARGS]
if has_tag(obj, tags.INITARGS):
return obj[tags.INITARGS]
try:
seq_list = obj[tags.SEQ]
obj_dict = obj[tags.OBJECT]
except KeyError:
return []
typeref = loadclass(obj_dict, classes=classes)
if not typeref:
return []
if hasattr(typeref, '_fields'):
if len(typeref._fields) == len(seq_list):
return seq_list
return []
class _trivialclassic:
def make_blank_classic(cls):
instance = _trivialclassic()
instance.__class__ = cls
return instance
|
MIT License
|
geyingli/unif
|
uf/application/dilated.py
|
DilatedLM.export
|
python
|
def export(self, export_dir, loop=1):
if loop != self._loop:
self._loop = loop
self._session_mode = None
return super(LMModule, self).export(export_dir)
|
Export model into SavedModel files.
Args:
export_dir: str. Directory to which the model is saved.
loop: int. Number of inference loop to rewrite the input.
Returns:
None
|
https://github.com/geyingli/unif/blob/a6c9c94f60a7b906d9bd410bb446c4e3f2540ffc/uf/application/dilated.py#L99-L112
|
import random
import numpy as np
from ..tools import tf
from .base import LMModule
from .bert import get_bert_config, get_word_piece_tokenizer, get_key_to_depths
from ..modeling.dilated import DLM
from .. import utils
class DilatedLM(LMModule):
_INFER_ATTRIBUTES = {
'max_seq_length': (
'An integer that defines max sequence length of input tokens, '
'which typically equals `len(tokenize(segments)) + 1'),
'init_checkpoint': (
'A string that directs to the checkpoint file used for '
'initialization')}
def __init__(self,
config_file,
vocab_file,
max_seq_length=128,
init_checkpoint=None,
output_dir=None,
gpu_ids=None,
replace_prob=0.05,
add_prob=0.05,
subtract_prob=0.05,
do_lower_case=True,
truncate_method='LIFO'):
super(LMModule, self).__init__(
init_checkpoint, output_dir, gpu_ids)
self.batch_size = 0
self.max_seq_length = max_seq_length
self.truncate_method = truncate_method
self._replace_prob = replace_prob
self._add_prob = add_prob
self._subtract_prob = subtract_prob
self._loop = 1
self.__init_args__ = locals()
self.bert_config = get_bert_config(config_file)
self.tokenizer = get_word_piece_tokenizer(vocab_file, do_lower_case)
self._key_to_depths = get_key_to_depths(
self.bert_config.num_hidden_layers)
if '[CLS]' not in self.tokenizer.vocab:
self.tokenizer.add('[CLS]')
self.bert_config.vocab_size += 1
tf.logging.info('Add necessary token `[CLS]` into vocabulary.')
if '[SEP]' not in self.tokenizer.vocab:
self.tokenizer.add('[SEP]')
self.bert_config.vocab_size += 1
tf.logging.info('Add necessary token `[SEP]` into vocabulary.')
if '[SPAD]' not in self.tokenizer.vocab:
self.tokenizer.add('[SPAD]')
self.bert_config.vocab_size += 1
tf.logging.info('Add necessary token `[SPAD]` into vocabulary.')
def predict(self, X=None, X_tokenized=None,
batch_size=8, loop=1):
if loop != self._loop:
self._loop = loop
self._session_mode = None
return super(LMModule, self).predict(
X, X_tokenized, batch_size)
|
Apache License 2.0
|
willcl-ark/lnd_grpc
|
lnd_grpc/protos/rpc_pb2_grpc.py
|
LightningServicer.SendCoins
|
python
|
def SendCoins(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
* lncli: `sendcoins`
SendCoins executes a request to send coins to a particular address. Unlike
SendMany, this RPC call only allows creating a single output at a time. If
neither target_conf, or sat_per_byte are set, then the internal wallet will
consult its fee model to determine a fee for the default confirmation
target.
|
https://github.com/willcl-ark/lnd_grpc/blob/cf938c51c201f078e8bbe9e19ffc2d038f3abf7f/lnd_grpc/protos/rpc_pb2_grpc.py#L471-L481
|
import grpc
from lnd_grpc.protos import rpc_pb2 as lnd__grpc_dot_protos_dot_rpc__pb2
class WalletUnlockerStub(object):
def __init__(self, channel):
self.GenSeed = channel.unary_unary(
'/lnrpc.WalletUnlocker/GenSeed',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.GenSeedRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.GenSeedResponse.FromString,
)
self.InitWallet = channel.unary_unary(
'/lnrpc.WalletUnlocker/InitWallet',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.InitWalletRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.InitWalletResponse.FromString,
)
self.UnlockWallet = channel.unary_unary(
'/lnrpc.WalletUnlocker/UnlockWallet',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.UnlockWalletRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.UnlockWalletResponse.FromString,
)
self.ChangePassword = channel.unary_unary(
'/lnrpc.WalletUnlocker/ChangePassword',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChangePasswordRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChangePasswordResponse.FromString,
)
class WalletUnlockerServicer(object):
def GenSeed(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def InitWallet(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UnlockWallet(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChangePassword(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WalletUnlockerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GenSeed': grpc.unary_unary_rpc_method_handler(
servicer.GenSeed,
request_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.GenSeedRequest.FromString,
response_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.GenSeedResponse.SerializeToString,
),
'InitWallet': grpc.unary_unary_rpc_method_handler(
servicer.InitWallet,
request_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.InitWalletRequest.FromString,
response_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.InitWalletResponse.SerializeToString,
),
'UnlockWallet': grpc.unary_unary_rpc_method_handler(
servicer.UnlockWallet,
request_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.UnlockWalletRequest.FromString,
response_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.UnlockWalletResponse.SerializeToString,
),
'ChangePassword': grpc.unary_unary_rpc_method_handler(
servicer.ChangePassword,
request_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChangePasswordRequest.FromString,
response_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChangePasswordResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'lnrpc.WalletUnlocker', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class LightningStub(object):
pass
def __init__(self, channel):
self.WalletBalance = channel.unary_unary(
'/lnrpc.Lightning/WalletBalance',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.WalletBalanceRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.WalletBalanceResponse.FromString,
)
self.ChannelBalance = channel.unary_unary(
'/lnrpc.Lightning/ChannelBalance',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelBalanceRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelBalanceResponse.FromString,
)
self.GetTransactions = channel.unary_unary(
'/lnrpc.Lightning/GetTransactions',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.GetTransactionsRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.TransactionDetails.FromString,
)
self.EstimateFee = channel.unary_unary(
'/lnrpc.Lightning/EstimateFee',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.EstimateFeeRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.EstimateFeeResponse.FromString,
)
self.SendCoins = channel.unary_unary(
'/lnrpc.Lightning/SendCoins',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendCoinsRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendCoinsResponse.FromString,
)
self.ListUnspent = channel.unary_unary(
'/lnrpc.Lightning/ListUnspent',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListUnspentRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListUnspentResponse.FromString,
)
self.SubscribeTransactions = channel.unary_stream(
'/lnrpc.Lightning/SubscribeTransactions',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.GetTransactionsRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.Transaction.FromString,
)
self.SendMany = channel.unary_unary(
'/lnrpc.Lightning/SendMany',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendManyRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendManyResponse.FromString,
)
self.NewAddress = channel.unary_unary(
'/lnrpc.Lightning/NewAddress',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.NewAddressRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.NewAddressResponse.FromString,
)
self.SignMessage = channel.unary_unary(
'/lnrpc.Lightning/SignMessage',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.SignMessageRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.SignMessageResponse.FromString,
)
self.VerifyMessage = channel.unary_unary(
'/lnrpc.Lightning/VerifyMessage',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.VerifyMessageRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.VerifyMessageResponse.FromString,
)
self.ConnectPeer = channel.unary_unary(
'/lnrpc.Lightning/ConnectPeer',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ConnectPeerRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ConnectPeerResponse.FromString,
)
self.DisconnectPeer = channel.unary_unary(
'/lnrpc.Lightning/DisconnectPeer',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.DisconnectPeerRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.DisconnectPeerResponse.FromString,
)
self.ListPeers = channel.unary_unary(
'/lnrpc.Lightning/ListPeers',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListPeersRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListPeersResponse.FromString,
)
self.GetInfo = channel.unary_unary(
'/lnrpc.Lightning/GetInfo',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.GetInfoRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.GetInfoResponse.FromString,
)
self.PendingChannels = channel.unary_unary(
'/lnrpc.Lightning/PendingChannels',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.PendingChannelsRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.PendingChannelsResponse.FromString,
)
self.ListChannels = channel.unary_unary(
'/lnrpc.Lightning/ListChannels',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListChannelsRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListChannelsResponse.FromString,
)
self.SubscribeChannelEvents = channel.unary_stream(
'/lnrpc.Lightning/SubscribeChannelEvents',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelEventSubscription.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelEventUpdate.FromString,
)
self.ClosedChannels = channel.unary_unary(
'/lnrpc.Lightning/ClosedChannels',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ClosedChannelsRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ClosedChannelsResponse.FromString,
)
self.OpenChannelSync = channel.unary_unary(
'/lnrpc.Lightning/OpenChannelSync',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.OpenChannelRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelPoint.FromString,
)
self.OpenChannel = channel.unary_stream(
'/lnrpc.Lightning/OpenChannel',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.OpenChannelRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.OpenStatusUpdate.FromString,
)
self.CloseChannel = channel.unary_stream(
'/lnrpc.Lightning/CloseChannel',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.CloseChannelRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.CloseStatusUpdate.FromString,
)
self.AbandonChannel = channel.unary_unary(
'/lnrpc.Lightning/AbandonChannel',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.AbandonChannelRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.AbandonChannelResponse.FromString,
)
self.SendPayment = channel.stream_stream(
'/lnrpc.Lightning/SendPayment',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendResponse.FromString,
)
self.SendPaymentSync = channel.unary_unary(
'/lnrpc.Lightning/SendPaymentSync',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendResponse.FromString,
)
self.SendToRoute = channel.stream_stream(
'/lnrpc.Lightning/SendToRoute',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendToRouteRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendResponse.FromString,
)
self.SendToRouteSync = channel.unary_unary(
'/lnrpc.Lightning/SendToRouteSync',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendToRouteRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.SendResponse.FromString,
)
self.AddInvoice = channel.unary_unary(
'/lnrpc.Lightning/AddInvoice',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.Invoice.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.AddInvoiceResponse.FromString,
)
self.ListInvoices = channel.unary_unary(
'/lnrpc.Lightning/ListInvoices',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListInvoiceRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListInvoiceResponse.FromString,
)
self.LookupInvoice = channel.unary_unary(
'/lnrpc.Lightning/LookupInvoice',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.PaymentHash.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.Invoice.FromString,
)
self.SubscribeInvoices = channel.unary_stream(
'/lnrpc.Lightning/SubscribeInvoices',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.InvoiceSubscription.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.Invoice.FromString,
)
self.DecodePayReq = channel.unary_unary(
'/lnrpc.Lightning/DecodePayReq',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.PayReqString.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.PayReq.FromString,
)
self.ListPayments = channel.unary_unary(
'/lnrpc.Lightning/ListPayments',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListPaymentsRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ListPaymentsResponse.FromString,
)
self.DeleteAllPayments = channel.unary_unary(
'/lnrpc.Lightning/DeleteAllPayments',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.DeleteAllPaymentsRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.DeleteAllPaymentsResponse.FromString,
)
self.DescribeGraph = channel.unary_unary(
'/lnrpc.Lightning/DescribeGraph',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelGraphRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelGraph.FromString,
)
self.GetChanInfo = channel.unary_unary(
'/lnrpc.Lightning/GetChanInfo',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChanInfoRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelEdge.FromString,
)
self.GetNodeInfo = channel.unary_unary(
'/lnrpc.Lightning/GetNodeInfo',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.NodeInfoRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.NodeInfo.FromString,
)
self.QueryRoutes = channel.unary_unary(
'/lnrpc.Lightning/QueryRoutes',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.QueryRoutesRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.QueryRoutesResponse.FromString,
)
self.GetNetworkInfo = channel.unary_unary(
'/lnrpc.Lightning/GetNetworkInfo',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.NetworkInfoRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.NetworkInfo.FromString,
)
self.StopDaemon = channel.unary_unary(
'/lnrpc.Lightning/StopDaemon',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.StopRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.StopResponse.FromString,
)
self.SubscribeChannelGraph = channel.unary_stream(
'/lnrpc.Lightning/SubscribeChannelGraph',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.GraphTopologySubscription.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.GraphTopologyUpdate.FromString,
)
self.DebugLevel = channel.unary_unary(
'/lnrpc.Lightning/DebugLevel',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.DebugLevelRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.DebugLevelResponse.FromString,
)
self.FeeReport = channel.unary_unary(
'/lnrpc.Lightning/FeeReport',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.FeeReportRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.FeeReportResponse.FromString,
)
self.UpdateChannelPolicy = channel.unary_unary(
'/lnrpc.Lightning/UpdateChannelPolicy',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.PolicyUpdateRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.PolicyUpdateResponse.FromString,
)
self.ForwardingHistory = channel.unary_unary(
'/lnrpc.Lightning/ForwardingHistory',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ForwardingHistoryRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ForwardingHistoryResponse.FromString,
)
self.ExportChannelBackup = channel.unary_unary(
'/lnrpc.Lightning/ExportChannelBackup',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ExportChannelBackupRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelBackup.FromString,
)
self.ExportAllChannelBackups = channel.unary_unary(
'/lnrpc.Lightning/ExportAllChannelBackups',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChanBackupExportRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChanBackupSnapshot.FromString,
)
self.VerifyChanBackup = channel.unary_unary(
'/lnrpc.Lightning/VerifyChanBackup',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChanBackupSnapshot.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.VerifyChanBackupResponse.FromString,
)
self.RestoreChannelBackups = channel.unary_unary(
'/lnrpc.Lightning/RestoreChannelBackups',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.RestoreChanBackupRequest.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.RestoreBackupResponse.FromString,
)
self.SubscribeChannelBackups = channel.unary_stream(
'/lnrpc.Lightning/SubscribeChannelBackups',
request_serializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChannelBackupSubscription.SerializeToString,
response_deserializer=lnd__grpc_dot_protos_dot_rpc__pb2.ChanBackupSnapshot.FromString,
)
class LightningServicer(object):
pass
def WalletBalance(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChannelBalance(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransactions(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EstimateFee(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
MIT License
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.