id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
143,848 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/__init__.py
|
wdb_server.BaseWebSocketHandler
|
class BaseWebSocketHandler(tornado.websocket.WebSocketHandler):
"""
Base class, used for doing the basic host checks before proceeding.
"""
def open(self, *args, **kwargs):
protocol = self.request.headers.get(
'X-Forwarded-Proto', self.request.protocol
)
host = '{protocol}://{host}'.format(
protocol=protocol, host=self.request.headers['Host']
)
if self.request.headers['Origin'] != host:
self.warn('Origin and host are not the same, closing websocket...')
self.close()
return
self.on_open(*args, **kwargs)
def on_open(self, *args, **kwargs):
"""
Method that should be overriden, containing the logic that should
happen when a new websocket connection opens. At this point, the
connection is already verified.
Does nothing by default.
"""
pass
|
class BaseWebSocketHandler(tornado.websocket.WebSocketHandler):
'''
Base class, used for doing the basic host checks before proceeding.
'''
def open(self, *args, **kwargs):
pass
def on_open(self, *args, **kwargs):
'''
Method that should be overriden, containing the logic that should
happen when a new websocket connection opens. At this point, the
connection is already verified.
Does nothing by default.
'''
pass
| 3 | 2 | 11 | 1 | 7 | 3 | 2 | 0.6 | 1 | 0 | 0 | 2 | 2 | 0 | 2 | 25 | 28 | 4 | 15 | 5 | 12 | 9 | 11 | 5 | 8 | 2 | 2 | 1 | 3 |
143,849 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/__init__.py
|
wdb_server.DebugHandler
|
class DebugHandler(tornado.web.RequestHandler):
def debug(self, fn):
def run():
from wdb import Wdb
Wdb.get().run_file(fn)
Process(target=run).start()
self.redirect('/')
def get(self, fn):
self.debug(fn)
def post(self, fn):
fn = self.request.arguments.get('debug_file')
if fn and fn[0]:
self.debug(fn[0].decode('utf-8'))
|
class DebugHandler(tornado.web.RequestHandler):
def debug(self, fn):
pass
def run():
pass
def get(self, fn):
pass
def post(self, fn):
pass
| 5 | 0 | 5 | 1 | 4 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 3 | 0 | 3 | 84 | 17 | 4 | 13 | 6 | 7 | 0 | 13 | 6 | 7 | 2 | 2 | 1 | 5 |
143,850 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/__init__.py
|
wdb_server.HomeHandler
|
class HomeHandler(tornado.web.RequestHandler):
def get(self):
self.render('home.html')
def post(self):
theme = self.request.arguments.get('theme')
if theme and theme[0]:
StyleHandler.theme = theme[0].decode('utf-8')
self.redirect('/')
|
class HomeHandler(tornado.web.RequestHandler):
def get(self):
pass
def post(self):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 2 | 0 | 2 | 83 | 9 | 1 | 8 | 4 | 5 | 0 | 8 | 4 | 5 | 2 | 2 | 1 | 3 |
143,851 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/__init__.py
|
wdb_server.MainHandler
|
class MainHandler(tornado.web.RequestHandler):
def get(self, type_, uuid):
self.render(
'wdb.html', uuid=uuid, new_version=server.new_version, type_=type_
)
|
class MainHandler(tornado.web.RequestHandler):
def get(self, type_, uuid):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 82 | 5 | 0 | 5 | 2 | 3 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,852 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/__init__.py
|
wdb_server.StyleHandler
|
class StyleHandler(tornado.web.RequestHandler):
themes = [
theme.replace('wdb-', '').replace('.css', '')
for theme in os.listdir(os.path.join(static_path, 'stylesheets'))
if theme.startswith('wdb-')
]
def get(self):
self.redirect(self.static_url('stylesheets/wdb-%s.css' % self.theme))
|
class StyleHandler(tornado.web.RequestHandler):
def get(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 82 | 9 | 1 | 8 | 3 | 6 | 0 | 4 | 3 | 2 | 1 | 2 | 0 | 1 |
143,853 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/__init__.py
|
wdb_server.SyncWebSocketHandler
|
class SyncWebSocketHandler(BaseWebSocketHandler):
def write(self, message):
log.debug('server -> syncsocket: %s' % message)
self.write_message(message)
def on_open(self):
self.uuid = str(uuid4())
syncwebsockets.add(self.uuid, self)
if not LibPythonWatcher:
syncwebsockets.send(self.uuid, 'StartLoop')
def on_message(self, message):
if '|' in message:
cmd, data = message.split('|', 1)
else:
cmd, data = message, ''
if cmd == 'ListSockets':
for uuid in sockets.uuids:
syncwebsockets.send(
self.uuid,
'AddSocket',
{
'uuid': uuid,
'filename': sockets.get_filename(uuid)
if tornado.options.options.show_filename
else '',
},
)
elif cmd == 'ListWebsockets':
for uuid in websockets.uuids:
syncwebsockets.send(self.uuid, 'AddWebSocket', uuid)
elif cmd == 'ListBreaks':
for brk in breakpoints.get():
syncwebsockets.send(self.uuid, 'AddBreak', brk)
elif cmd == 'RemoveBreak':
brk = json.loads(data)
breakpoints.remove(brk)
# If it was here, it wasn't temporary
brk['temporary'] = False
sockets.broadcast('Unbreak', brk)
elif cmd == 'RemoveUUID':
sockets.close(data)
sockets.remove(data)
websockets.close(data)
websockets.remove(data)
elif cmd == 'ListProcesses':
refresh_process(self.uuid)
elif cmd == 'Pause':
if int(data) == os.getpid():
log.debug('Pausing self')
def self_shell(variables):
# Debugging self
import wdb
wdb.set_trace()
Process(target=self_shell, args=(globals(),)).start()
else:
log.debug('Pausing %s' % data)
tornado.process.Subprocess(
['gdb', '-p', data, '-batch']
+ [
"-eval-command=call %s" % hook
for hook in [
'PyGILState_Ensure()',
'PyRun_SimpleString('
'"import wdb; wdb.set_trace(skip=1)"'
')',
'PyGILState_Release($1)',
]
]
)
elif cmd == 'RunFile':
file_name = data
def run():
from wdb import Wdb
Wdb.get().run_file(file_name)
Process(target=run).start()
elif cmd == 'RunShell':
def run():
from wdb import Wdb
Wdb.get().shell()
Process(target=run).start()
def on_close(self):
if hasattr(self, 'uuid'):
syncwebsockets.remove(self.uuid)
|
class SyncWebSocketHandler(BaseWebSocketHandler):
def write(self, message):
pass
def on_open(self):
pass
def on_message(self, message):
pass
def self_shell(variables):
pass
def run():
pass
def run():
pass
def on_close(self):
pass
| 8 | 0 | 15 | 2 | 12 | 0 | 3 | 0.03 | 1 | 5 | 2 | 0 | 4 | 1 | 4 | 29 | 96 | 15 | 79 | 16 | 68 | 2 | 49 | 16 | 38 | 16 | 3 | 2 | 24 |
143,854 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/__init__.py
|
wdb_server.WebSocketHandler
|
class WebSocketHandler(BaseWebSocketHandler):
def write(self, message):
log.debug('socket -> websocket: %s' % message)
message = message.decode('utf-8')
if message.startswith('BreakSet|') or message.startswith(
'BreakUnset|'
):
log.debug('Intercepted break')
cmd, brk = message.split('|', 1)
brk = json.loads(brk)
if not brk['temporary']:
del brk['temporary']
if cmd == 'BreakSet':
breakpoints.add(brk)
elif cmd == 'BreakUnset':
breakpoints.remove(brk)
self.write_message(message)
def on_open(self, uuid):
self.uuid = uuid
if isinstance(self.uuid, bytes):
self.uuid = self.uuid.decode('utf-8')
if self.uuid in websockets.uuids:
log.warn(
'Websocket already opened for %s. Closing previous one'
% self.uuid
)
websockets.send(self.uuid, 'Die')
websockets.close(uuid)
if self.uuid not in sockets.uuids:
log.warn(
'Websocket opened for %s with no correponding socket'
% self.uuid
)
sockets.send(self.uuid, 'Die')
self.close()
return
log.info('Websocket opened for %s' % self.uuid)
websockets.add(self.uuid, self)
def on_message(self, message):
log.debug('websocket -> socket: %s' % message)
if message.startswith('Broadcast|'):
message = message.split('|', 1)[1]
sockets.broadcast(message)
else:
sockets.send(self.uuid, message)
def on_close(self):
if hasattr(self, 'uuid'):
log.info('Websocket closed for %s' % self.uuid)
if not tornado.options.options.detached_session:
sockets.send(self.uuid, 'Close')
sockets.close(self.uuid)
|
class WebSocketHandler(BaseWebSocketHandler):
def write(self, message):
pass
def on_open(self, uuid):
pass
def on_message(self, message):
pass
def on_close(self):
pass
| 5 | 0 | 14 | 1 | 13 | 0 | 4 | 0 | 1 | 1 | 0 | 0 | 4 | 1 | 4 | 29 | 59 | 8 | 51 | 7 | 46 | 0 | 41 | 7 | 36 | 5 | 3 | 3 | 14 |
143,855 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/state.py
|
wdb_server.state.BaseSockets
|
class BaseSockets(object):
def __init__(self):
self._sockets = {}
def send(self, uuid, data, message=None):
if message:
data = data + '|' + json.dumps(message)
if isinstance(data, unicode_type):
data = data.encode('utf-8')
sck = self.get(uuid)
if sck:
self._send(sck, data)
else:
log.warn('No socket found for %s' % uuid)
def get(self, uuid):
return self._sockets.get(uuid)
def broadcast(self, cmd, message=None):
for uuid in list(self._sockets.keys()):
try:
log.debug('Broadcast to socket %s' % uuid)
self.send(uuid, cmd, message)
except Exception:
log.warn('Failed broadcast to socket %s' % uuid)
self.close(uuid)
self.remove(uuid)
def add(self, uuid, sck):
if uuid in self._sockets:
self.remove(uuid)
self.close(uuid)
self._sockets[uuid] = sck
def remove(self, uuid):
sck = self._sockets.pop(uuid, None)
if sck:
syncwebsockets.broadcast(
'Remove' + self.__class__.__name__.rstrip('s'), uuid
)
def close(self, uuid):
sck = self.get(uuid)
try:
sck.close()
except Exception:
log.warn('Failed close to socket %s' % uuid)
@property
def uuids(self):
return set(self._sockets.keys())
|
class BaseSockets(object):
def __init__(self):
pass
def send(self, uuid, data, message=None):
pass
def get(self, uuid):
pass
def broadcast(self, cmd, message=None):
pass
def add(self, uuid, sck):
pass
def remove(self, uuid):
pass
def close(self, uuid):
pass
@property
def uuids(self):
pass
| 10 | 0 | 5 | 0 | 5 | 0 | 2 | 0 | 1 | 3 | 0 | 2 | 8 | 1 | 8 | 8 | 52 | 8 | 44 | 15 | 34 | 0 | 40 | 14 | 31 | 4 | 1 | 2 | 16 |
143,856 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/state.py
|
wdb_server.state.Breakpoints
|
class Breakpoints(object):
def __init__(self):
self._breakpoints = []
def add(self, brk):
if brk not in self._breakpoints:
self._breakpoints.append(brk)
syncwebsockets.broadcast('AddBreak|' + json.dumps(brk))
def remove(self, brk):
if brk in self._breakpoints:
self._breakpoints.remove(brk)
syncwebsockets.broadcast('RemoveBreak|' + json.dumps(brk))
def get(self):
return self._breakpoints
|
class Breakpoints(object):
def __init__(self):
pass
def add(self, brk):
pass
def remove(self, brk):
pass
def get(self):
pass
| 5 | 0 | 3 | 0 | 3 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 4 | 1 | 4 | 4 | 16 | 3 | 13 | 6 | 8 | 0 | 13 | 6 | 8 | 2 | 1 | 1 | 6 |
143,857 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/state.py
|
wdb_server.state.Sockets
|
class Sockets(BaseSockets):
def __init__(self):
super(Sockets, self).__init__()
self._filenames = {}
def add(self, uuid, sck):
super(Sockets, self).add(uuid, sck)
syncwebsockets.broadcast('AddSocket', {'uuid': uuid})
def remove(self, uuid):
super(Sockets, self).remove(uuid)
self._filenames.pop(uuid, None)
def get_filename(self, uuid):
return self._filenames.get(uuid, '')
def set_filename(self, uuid, filename):
self._filenames[uuid] = filename
syncwebsockets.broadcast(
'AddSocket',
{
'uuid': uuid,
'filename': (
filename if tornado.options.options.show_filename else ''
),
},
)
def _send(self, sck, data):
sck.write(pack("!i", len(data)))
sck.write(data)
|
class Sockets(BaseSockets):
def __init__(self):
pass
def add(self, uuid, sck):
pass
def remove(self, uuid):
pass
def get_filename(self, uuid):
pass
def set_filename(self, uuid, filename):
pass
def _send(self, sck, data):
pass
| 7 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 6 | 1 | 6 | 14 | 31 | 5 | 26 | 8 | 19 | 0 | 18 | 8 | 11 | 2 | 2 | 0 | 7 |
143,858 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/state.py
|
wdb_server.state.SyncWebSockets
|
class SyncWebSockets(WebSockets):
# Not really need an uuid here but it avoids duplication
def add(self, uuid, sck):
super(WebSockets, self).add(uuid, sck)
|
class SyncWebSockets(WebSockets):
def add(self, uuid, sck):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 11 | 4 | 0 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
143,859 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/state.py
|
wdb_server.state.WebSockets
|
class WebSockets(BaseSockets):
def _send(self, sck, data):
if sck.ws_connection:
sck.write_message(data)
else:
log.warn('Websocket is closed')
def add(self, uuid, sck):
super(WebSockets, self).add(uuid, sck)
syncwebsockets.broadcast('AddWebSocket', uuid)
|
class WebSockets(BaseSockets):
def _send(self, sck, data):
pass
def add(self, uuid, sck):
pass
| 3 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 1 | 0 | 1 | 2 | 0 | 2 | 10 | 10 | 1 | 9 | 3 | 6 | 0 | 8 | 3 | 5 | 2 | 2 | 1 | 3 |
143,860 |
Kozea/wdb
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kozea_wdb/test/test_utils.py
|
test.test_utils.test_method.cls
|
class cls(object):
def f(self, a, b=2, *args, **kwargs):
assert get_args(sys._getframe()) == OrderedDict(
(
('self', self),
('a', a),
('b', b),
('*args', args),
('**kwargs', kwargs),
)
)
|
class cls(object):
def f(self, a, b=2, *args, **kwargs):
pass
| 2 | 0 | 10 | 0 | 10 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 11 | 0 | 11 | 2 | 9 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,861 |
Kozea/wdb
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kozea_wdb/client/wdb/_compat.py
|
wdb._compat.Socket
|
class Socket(object):
"""A Socket compatible with multiprocessing.connection.Client, that
uses socket objects."""
# https://github.com/akheron/cpython/blob/3.3/Lib/multiprocessing/connection.py#L349
def __init__(self, address):
self._handle = socket.socket()
self._handle.connect(address)
self._handle.setblocking(1)
def send_bytes(self, buf):
self._check_closed()
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
if n > 16384:
# The payload is large so Nagle's algorithm won't be triggered
# and we'd better avoid the cost of concatenation.
chunks = [header, buf]
elif n > 0:
# Issue #20540: concatenate before sending, to avoid delays
# dueto Nagle's algorithm on a TCP socket.
chunks = [header + buf]
else:
# This code path is necessary to avoid "broken pipe" errors
# when sending a 0-length buffer if the other end closed the
# pipe.
chunks = [header]
for chunk in chunks:
self._handle.sendall(chunk)
def _safe_recv(self, *args, **kwargs):
while True:
try:
return self._handle.recv(*args, **kwargs)
except socket.error as e:
# Interrupted system call
if e.errno != errno.EINTR:
raise
def recv_bytes(self):
self._check_closed()
size, = struct.unpack("!i", self._safe_recv(4))
return self._safe_recv(size)
def _check_closed(self):
if self._handle is None:
raise IOError("handle is closed")
def close(self):
self._check_closed()
self._handle.close()
self._handle = None
def poll(self, timeout=0.0):
"""Whether there is any input available to be read"""
self._check_closed()
return self._poll(timeout)
def _poll(self, timeout):
r = wait([self._handle], timeout)
return bool(r)
|
class Socket(object):
'''A Socket compatible with multiprocessing.connection.Client, that
uses socket objects.'''
def __init__(self, address):
pass
def send_bytes(self, buf):
pass
def _safe_recv(self, *args, **kwargs):
pass
def recv_bytes(self):
pass
def _check_closed(self):
pass
def close(self):
pass
def poll(self, timeout=0.0):
'''Whether there is any input available to be read'''
pass
def _poll(self, timeout):
pass
| 9 | 2 | 6 | 0 | 5 | 1 | 2 | 0.32 | 1 | 2 | 0 | 0 | 8 | 1 | 8 | 8 | 62 | 8 | 41 | 17 | 32 | 13 | 39 | 16 | 30 | 4 | 1 | 3 | 15 |
143,862 |
Kozea/wdb
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kozea_wdb/client/wdb/ext.py
|
wdb.ext.add_w_builtin.w
|
class w(object):
"""Global shortcuts"""
@property
def tf(self):
set_trace(sys._getframe().f_back)
@property
def start(self):
start_trace(sys._getframe().f_back)
@property
def stop(self):
stop_trace(sys._getframe().f_back)
@property
def trace(self):
trace(sys._getframe().f_back)
|
class w(object):
'''Global shortcuts'''
@property
def tf(self):
pass
@property
def start(self):
pass
@property
def stop(self):
pass
@property
def trace(self):
pass
| 9 | 1 | 2 | 0 | 2 | 0 | 1 | 0.08 | 1 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 18 | 4 | 13 | 9 | 4 | 1 | 9 | 5 | 4 | 1 | 1 | 0 | 4 |
143,863 |
Kozea/wdb
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kozea_wdb/client/wdb/ext.py
|
wdb.ext.wdb_tornado.WdbOff
|
class WdbOff(RequestHandler):
def get(self):
Wdb.enabled = False
self.write('Wdb is now off')
|
class WdbOff(RequestHandler):
def get(self):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 4 | 0 | 4 | 2 | 2 | 0 | 4 | 2 | 2 | 1 | 1 | 0 | 1 |
143,864 |
Kozea/wdb
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kozea_wdb/client/wdb/ext.py
|
wdb.ext.wdb_tornado.WdbOn
|
class WdbOn(RequestHandler):
def get(self):
Wdb.enabled = True
self.write('Wdb is now on')
|
class WdbOn(RequestHandler):
def get(self):
pass
| 2 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 4 | 0 | 4 | 2 | 2 | 0 | 4 | 2 | 2 | 1 | 1 | 0 | 1 |
143,865 |
Kozea/wdb
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Kozea_wdb/server/wdb_server/utils.py
|
wdb_server.utils.LibPythonWatcher
|
class LibPythonWatcher(object):
def __init__(self, extra_search_path=None):
inotify = pyinotify.WatchManager()
self.files = glob('/usr/lib/libpython*')
if not self.files:
self.files = glob('/lib/libpython*')
if extra_search_path is not None:
# Handle custom installation paths
for root, dirnames, filenames in os.walk(extra_search_path):
for filename in fnmatch.filter(filenames, 'libpython*'):
self.files.append(os.path.join(root, filename))
log.debug('Watching for %s' % self.files)
self.notifier = pyinotify.TornadoAsyncNotifier(
inotify, ioloop, self.notified, pyinotify.ProcessEvent()
)
inotify.add_watch(
self.files,
pyinotify.EventsCodes.ALL_FLAGS['IN_OPEN']
| pyinotify.EventsCodes.ALL_FLAGS['IN_CLOSE_NOWRITE'],
)
def notified(self, notifier):
log.debug('Got notified for %s' % self.files)
refresh_process()
log.debug('Process refreshed')
def close(self):
log.debug('Closing for %s' % self.files)
self.notifier.stop()
|
class LibPythonWatcher(object):
def __init__(self, extra_search_path=None):
pass
def notified(self, notifier):
pass
def close(self):
pass
| 4 | 0 | 9 | 1 | 8 | 0 | 2 | 0.04 | 1 | 0 | 0 | 0 | 3 | 2 | 3 | 3 | 31 | 4 | 26 | 9 | 22 | 1 | 20 | 9 | 16 | 5 | 1 | 3 | 7 |
143,866 |
Kozea/wdb
|
Kozea_wdb/test/scripts/forks.py
|
test.scripts.forks.Process1
|
class Process1(Process):
def run(self):
print('Process 1 start')
wtf()
print('Process 1 end')
|
class Process1(Process):
def run(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 5 | 0 | 5 | 2 | 3 | 0 | 5 | 2 | 3 | 1 | 1 | 0 | 1 |
143,867 |
Kozea/wdb
|
Kozea_wdb/server/wdb_server/__init__.py
|
wdb_server.ActionHandler
|
class ActionHandler(tornado.web.RequestHandler):
def get(self, uuid, action):
if action == 'close':
sockets.close(uuid)
sockets.remove(uuid)
websockets.close(uuid)
websockets.remove(uuid)
self.redirect('/')
|
class ActionHandler(tornado.web.RequestHandler):
def get(self, uuid, action):
pass
| 2 | 0 | 7 | 0 | 7 | 0 | 2 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 82 | 8 | 0 | 8 | 2 | 6 | 0 | 8 | 2 | 6 | 2 | 2 | 1 | 2 |
143,868 |
Kozea/wdb
|
Kozea_wdb/client/wdb/utils.py
|
wdb.utils.IterableEllipsis
|
class IterableEllipsis(object):
def __init__(self, size):
self.size = size
|
class IterableEllipsis(object):
def __init__(self, size):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 3 | 0 | 3 | 3 | 1 | 0 | 3 | 3 | 1 | 1 | 1 | 0 | 1 |
143,869 |
Kozea/wdb
|
Kozea_wdb/client/wdb/utils.py
|
wdb.utils.timeout_of
|
class timeout_of(object):
def __init__(self, time, strict=False):
self.time = time
try:
# Ignoring when not active + disabling if no alarm signal (Windows)
signal.signal(signal.SIGALRM, signal.SIG_IGN)
except Exception:
if strict:
raise Exception('Not running because timeout is not available')
self.active = False
else:
self.active = True
def timeout(self, signum, frame):
raise Exception('Timeout')
def __enter__(self):
if not self.active:
return
signal.signal(signal.SIGALRM, self.timeout)
signal.setitimer(signal.ITIMER_REAL, self.time)
def __exit__(self, *args):
if not self.active:
return
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, signal.SIG_IGN)
|
class timeout_of(object):
def __init__(self, time, strict=False):
pass
def timeout(self, signum, frame):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
| 5 | 0 | 6 | 1 | 6 | 0 | 2 | 0.04 | 1 | 1 | 0 | 0 | 4 | 2 | 4 | 4 | 29 | 5 | 23 | 7 | 18 | 1 | 23 | 7 | 18 | 3 | 1 | 2 | 8 |
143,870 |
Kozea/wdb
|
Kozea_wdb/client/wdb/ui.py
|
wdb.ui.ReprEncoder
|
class ReprEncoder(JSONEncoder):
"""JSON encoder using repr for objects"""
def default(self, obj):
return repr(obj)
|
class ReprEncoder(JSONEncoder):
'''JSON encoder using repr for objects'''
def default(self, obj):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 5 | 5 | 1 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,871 |
Kozea/wdb
|
Kozea_wdb/test/scripts/objects.py
|
test.scripts.objects.A
|
class A(object):
def __init__(self, n):
self.n = n
def __repr__(self):
return '<A object with n=%d>' % self.n
|
class A(object):
def __init__(self, n):
pass
def __repr__(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 1 | 2 | 2 | 6 | 1 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 1 | 0 | 2 |
143,872 |
Kozea/wdb
|
Kozea_wdb/test/scripts/threads.py
|
test.scripts.threads.Thread1
|
class Thread1(Thread):
def run(self):
print('Thread 1 start')
wtf()
print('Thread 1 end')
|
class Thread1(Thread):
def run(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 26 | 5 | 0 | 5 | 2 | 3 | 0 | 5 | 2 | 3 | 1 | 1 | 0 | 1 |
143,873 |
Kozea/wdb
|
Kozea_wdb/test/scripts/threads.py
|
test.scripts.threads.Thread2
|
class Thread2(Thread):
def run(self):
print('Thread 2 start')
wtf()
print('Thread 2 end')
|
class Thread2(Thread):
def run(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 26 | 5 | 0 | 5 | 2 | 3 | 0 | 5 | 2 | 3 | 1 | 1 | 0 | 1 |
143,874 |
Kozea/wdb
|
Kozea_wdb/test/scripts/tornado_server.py
|
test.scripts.tornado_server.MainHandler
|
class MainHandler(tornado.web.RequestHandler):
def get(self):
a = 2
b = -2
c = 1 / (a + b) < 0 # <strong> Err œ
print(c <b> a)
relay_error()
self.write("Hello, world")
|
class MainHandler(tornado.web.RequestHandler):
def get(self):
pass
| 2 | 0 | 7 | 0 | 7 | 1 | 1 | 0.13 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 82 | 8 | 0 | 8 | 5 | 6 | 1 | 8 | 5 | 6 | 1 | 2 | 0 | 1 |
143,875 |
Kozea/wdb
|
Kozea_wdb/test/scripts/tornado_server.py
|
test.scripts.tornado_server.OkHandler
|
class OkHandler(tornado.web.RequestHandler):
def get(self):
self.write("Ok")
|
class OkHandler(tornado.web.RequestHandler):
def get(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 82 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,876 |
Kozea/wdb
|
Kozea_wdb/pytest_wdb/test_wdb.py
|
test_wdb.FakeWdbServer
|
class FakeWdbServer(Process):
def __init__(self, stops=False):
wdb.SOCKET_SERVER = 'localhost'
wdb.SOCKET_PORT = 18273
wdb.WDB_NO_BROWSER_AUTO_OPEN = True
self.stops = stops
self.lock = Lock()
super(FakeWdbServer, self).__init__()
def __enter__(self):
self.start()
self.lock.acquire()
def __exit__(self, *args):
self.lock.release()
self.join()
wdb.Wdb.pop()
def run(self):
listener = Listener(('localhost', 18273))
try:
listener._listener._socket.settimeout(10)
except Exception:
pass
connection = listener.accept()
# uuid
connection.recv_bytes().decode('utf-8')
# ServerBreaks
connection.recv_bytes().decode('utf-8')
# Empty breaks
connection.send_bytes(b'{}')
# Continuing
if self.stops:
connection.recv_bytes().decode('utf-8')
connection.send_bytes(b'Continue')
self.lock.acquire()
connection.close()
listener.close()
self.lock.release()
|
class FakeWdbServer(Process):
def __init__(self, stops=False):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def run(self):
pass
| 5 | 0 | 9 | 0 | 8 | 1 | 2 | 0.13 | 1 | 4 | 1 | 0 | 4 | 2 | 4 | 4 | 40 | 4 | 32 | 9 | 27 | 4 | 32 | 9 | 27 | 3 | 1 | 1 | 6 |
143,877 |
Kozea/wdb
|
Kozea_wdb/client/wdb/__init__.py
|
wdb.Wdb
|
class Wdb(object):
"""Wdb debugger main class"""
_instances = {}
_sockets = []
enabled = True
breakpoints = set()
watchers = defaultdict(set)
@staticmethod
def get(no_create=False, server=None, port=None, force_uuid=None):
"""Get the thread local singleton"""
pid = os.getpid()
thread = threading.current_thread()
wdb = Wdb._instances.get((pid, thread))
if not wdb and not no_create:
wdb = object.__new__(Wdb)
Wdb.__init__(wdb, server, port, force_uuid)
wdb.pid = pid
wdb.thread = thread
Wdb._instances[(pid, thread)] = wdb
elif wdb:
if (
server is not None
and wdb.server != server
or port is not None
and wdb.port != port
):
log.warn('Different server/port set, ignoring')
else:
wdb.reconnect_if_needed()
return wdb
@staticmethod
def pop():
"""Remove instance from instance list"""
pid = os.getpid()
thread = threading.current_thread()
Wdb._instances.pop((pid, thread))
def __new__(cls, server=None, port=None):
return cls.get(server=server, port=port)
def __init__(self, server=None, port=None, force_uuid=None):
log.debug('New wdb instance %r' % self)
self.obj_cache = {}
self.compile_cache = {}
self.tracing = False
self.begun = False
self.connected = False
self.closed = None # Handle request long ignores for ext
self.stepping = False
self.extra_vars = {}
self.last_obj = None
self.reset()
self.uuid = force_uuid or str(uuid4())
self.state = Running(None)
self.full = False
self.below = 0
self.under = None
self.server = server or SOCKET_SERVER
self.port = port or SOCKET_PORT
self.interaction_stack = []
self._importmagic_index = None
self._importmagic_index_lock = threading.RLock()
self.index_imports()
self._socket = None
self.connect()
self.get_breakpoints()
def run_file(self, filename):
"""Run the file `filename` with trace"""
import __main__
__main__.__dict__.clear()
__main__.__dict__.update(
{
"__name__": "__main__",
"__file__": filename,
"__builtins__": __builtins__,
}
)
with open(filename, "rb") as fp:
statement = compile(fp.read(), filename, 'exec')
self.run(statement, filename)
def run(self, cmd, fn=None, globals=None, locals=None):
"""Run the cmd `cmd` with trace"""
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
if isinstance(cmd, str):
str_cmd = cmd
cmd = compile(str_cmd, fn or "<wdb>", "exec")
self.compile_cache[id(cmd)] = str_cmd
if fn:
from linecache import getline
lno = 1
while True:
line = getline(fn, lno, globals)
if line is None:
lno = None
break
if executable_line(line):
break
lno += 1
self.start_trace()
if lno is not None:
self.breakpoints.add(LineBreakpoint(fn, lno, temporary=True))
try:
execute(cmd, globals, locals)
finally:
self.stop_trace()
def reset(self):
"""Refresh linecache"""
import linecache
linecache.checkcache()
def reconnect_if_needed(self):
try:
# Sending PING twice
self.send('PING')
self.send('PING')
log.debug('Dual ping sent')
except socket.error:
log.warning('socket error on ping, connection lost retrying')
self._socket = None
self.connected = False
self.begun = False
self.connect()
def connect(self):
"""Connect to wdb server"""
log.info('Connecting socket on %s:%d' % (self.server, self.port))
tries = 0
while not self._socket and tries < 10:
try:
time.sleep(0.2 * tries)
self._socket = Socket((self.server, self.port))
except socket.error:
tries += 1
log.warning(
'You must start/install wdb.server '
'(Retrying on %s:%d) [Try #%d/10]'
% (self.server, self.port, tries)
)
self._socket = None
if not self._socket:
log.warning('Could not connect to server')
return
Wdb._sockets.append(self._socket)
self._socket.send_bytes(self.uuid.encode('utf-8'))
def get_breakpoints(self):
log.info('Getting server breakpoints')
self.send('ServerBreaks')
breaks = self.receive()
try:
breaks = loads(breaks)
except JSONDecodeError:
breaks = []
self._init_breakpoints = breaks
for brk in breaks:
self.set_break(
brk['fn'], brk['lno'], False, brk['cond'], brk['fun']
)
log.info('Server breakpoints added')
def index_imports(self):
if not importmagic or self._importmagic_index:
return
self._importmagic_index_lock.acquire()
def index(self):
log.info('Indexing imports')
index = importmagic.SymbolIndex()
index.build_index(sys.path)
self._importmagic_index = index
log.info('Indexing imports done')
index_thread = Thread(
target=index, args=(self,), name='wdb_importmagic_build_index'
)
# Don't wait for completion, let it die alone:
index_thread.daemon = True
index_thread.start()
self._importmagic_index_lock.release()
def breakpoints_to_json(self):
return [brk.to_dict() for brk in self.breakpoints]
def _get_under_code_ref(self):
code = getattr(self.under, '__code__', None)
if not code and hasattr(self.under, '__call__'):
# Allow for callable objects
code = getattr(self.under.__call__, '__code__', None)
return code
def _walk_frame_ancestry(self, frame):
iframe = frame
while iframe is not None:
yield iframe
iframe = iframe.f_back
def check_below(self, frame):
stop_frame = self.state.frame
if not any((self.below, self.under)):
return frame == stop_frame, False
under_code = self._get_under_code_ref()
if under_code:
stop_frame = None
for iframe in self._walk_frame_ancestry(frame):
if iframe.f_code == under_code:
stop_frame = iframe
if not stop_frame:
return False, False
below = 0
for iframe in self._walk_frame_ancestry(frame):
if stop_frame == iframe:
break
below += 1
return below == self.below, below == self.below
def trace_dispatch(self, frame, event, arg):
"""This function is called every line,
function call, function return and exception during trace"""
fun = getattr(self, 'handle_' + event, None)
if not fun:
return self.trace_dispatch
below, continue_below = self.check_below(frame)
if (
self.state.stops(frame, event)
or (event == 'line' and self.breaks(frame))
or (event == 'exception' and (self.full or below))
):
fun(frame, arg)
if event == 'return' and frame == self.state.frame:
# Upping state
if self.state.up():
# No more frames
self.stop_trace()
return
# Threading / Multiprocessing support
co = self.state.frame.f_code
if (
co.co_filename.endswith('threading.py')
and co.co_name.endswith('_bootstrap_inner')
) or (
self.state.frame.f_code.co_filename.endswith(
os.path.join('multiprocessing', 'process.py')
)
and self.state.frame.f_code.co_name == '_bootstrap'
):
# Thread / Process is dead
self.stop_trace()
self.die()
return
if (
event == 'call'
and not self.stepping
and not self.full
and not continue_below
and not self.get_file_breaks(frame.f_code.co_filename)
):
# Don't trace anymore here
return
return self.trace_dispatch
def trace_debug_dispatch(self, frame, event, arg):
"""Utility function to add debug to tracing"""
trace_log.info(
'Frame:%s. Event: %s. Arg: %r' % (pretty_frame(frame), event, arg)
)
trace_log.debug(
'state %r breaks ? %s stops ? %s'
% (
self.state,
self.breaks(frame, no_remove=True),
self.state.stops(frame, event),
)
)
if event == 'return':
trace_log.debug(
'Return: frame: %s, state: %s, state.f_back: %s'
% (
pretty_frame(frame),
pretty_frame(self.state.frame),
pretty_frame(self.state.frame.f_back),
)
)
if self.trace_dispatch(frame, event, arg):
return self.trace_debug_dispatch
trace_log.debug("No trace %s" % pretty_frame(frame))
def start_trace(self, full=False, frame=None, below=0, under=None):
"""Start tracing from here"""
if self.tracing:
return
self.reset()
log.info('Starting trace')
frame = frame or sys._getframe().f_back
# Setting trace without pausing
self.set_trace(frame, break_=False)
self.tracing = True
self.below = below
self.under = under
self.full = full
def set_trace(self, frame=None, break_=True):
"""Break at current state"""
# We are already tracing, do nothing
trace_log.info(
'Setting trace %s (stepping %s) (current_trace: %s)'
% (
pretty_frame(frame or sys._getframe().f_back),
self.stepping,
sys.gettrace(),
)
)
if self.stepping or self.closed:
return
self.reset()
trace = (
self.trace_dispatch
if trace_log.level >= 30
else self.trace_debug_dispatch
)
trace_frame = frame = frame or sys._getframe().f_back
while frame:
frame.f_trace = trace
frame = frame.f_back
self.state = Step(trace_frame) if break_ else Running(trace_frame)
sys.settrace(trace)
def stop_trace(self, frame=None):
"""Stop tracing from here"""
self.tracing = False
self.full = False
frame = frame or sys._getframe().f_back
while frame:
del frame.f_trace
frame = frame.f_back
sys.settrace(None)
log.info('Stopping trace')
def set_until(self, frame, lineno=None):
"""Stop on the next line number."""
self.state = Until(frame, frame.f_lineno)
def set_step(self, frame):
"""Stop on the next line."""
self.state = Step(frame)
def set_next(self, frame):
"""Stop on the next line in current frame."""
self.state = Next(frame)
def set_return(self, frame):
"""Stop when returning from the given frame."""
self.state = Return(frame)
def set_continue(self, frame):
"""Don't stop anymore"""
self.state = Running(frame)
if not self.tracing and not self.breakpoints:
# If we were in a set_trace and there's no breakpoint to trace for
# Run without trace
self.stop_trace()
def get_break(self, filename, lineno, temporary, cond, funcname):
if lineno and not cond:
return LineBreakpoint(filename, lineno, temporary)
elif cond:
return ConditionalBreakpoint(filename, lineno, cond, temporary)
elif funcname:
return FunctionBreakpoint(filename, funcname, temporary)
else:
return Breakpoint(filename, temporary)
def set_break(
self, filename, lineno=None, temporary=False, cond=None, funcname=None
):
"""Put a breakpoint for filename"""
log.info(
'Setting break fn:%s lno:%s tmp:%s cond:%s fun:%s'
% (filename, lineno, temporary, cond, funcname)
)
breakpoint = self.get_break(
filename, lineno, temporary, cond, funcname
)
self.breakpoints.add(breakpoint)
log.info('Breakpoint %r added' % breakpoint)
return breakpoint
def clear_break(
self, filename, lineno=None, temporary=False, cond=None, funcname=None
):
"""Remove a breakpoint"""
log.info(
'Removing break fn:%s lno:%s tmp:%s cond:%s fun:%s'
% (filename, lineno, temporary, cond, funcname)
)
breakpoint = self.get_break(
filename, lineno, temporary or False, cond, funcname
)
if temporary is None and breakpoint not in self.breakpoints:
breakpoint = self.get_break(filename, lineno, True, cond, funcname)
try:
self.breakpoints.remove(breakpoint)
log.info('Breakpoint %r removed' % breakpoint)
except Exception:
log.info('Breakpoint %r not removed: not found' % breakpoint)
def safe_repr(self, obj):
"""Like a repr but without exception"""
try:
return repr(obj)
except Exception as e:
return '??? Broken repr (%s: %s)' % (type(e).__name__, e)
def safe_better_repr(
self, obj, context=None, html=True, level=0, full=False
):
"""Repr with inspect links on objects"""
context = context and dict(context) or {}
recursion = id(obj) in context
if not recursion:
context[id(obj)] = obj
try:
rv = self.better_repr(obj, context, html, level + 1, full)
except Exception:
rv = None
if rv:
return rv
self.obj_cache[id(obj)] = obj
if html:
return '<a href="%d" class="inspect">%s%s</a>' % (
id(obj),
'Recursion of ' if recursion else '',
escape(self.safe_repr(obj)),
)
return '%s%s' % (
'Recursion of ' if recursion else '',
self.safe_repr(obj),
)
def better_repr(self, obj, context=None, html=True, level=1, full=False):
"""Repr with html decorations or indentation"""
abbreviate = (lambda x, level, **kw: x) if full else cut_if_too_long
def get_too_long_repr(ie):
r = '[%d more…]' % ie.size
if html:
self.obj_cache[id(obj)] = obj
return '<a href="dump/%d" class="inspect">%s</a>' % (
id(obj),
r,
)
return r
if isinstance(obj, dict):
if isinstance(obj, OrderedDict):
dict_sorted = lambda it, f: it
else:
dict_sorted = sorted
dict_repr = ' ' * (level - 1)
if type(obj) != dict:
dict_repr = type(obj).__name__ + '({'
closer = '})'
else:
dict_repr = '{'
closer = '}'
if len(obj) > 2:
dict_repr += '\n' + ' ' * level
if html:
dict_repr += '''<table class="
mdl-data-table mdl-js-data-table
mdl-data-table--selectable mdl-shadow--2dp">'''
dict_repr += ''.join(
[
(
'<tr><td class="key">'
+ self.safe_repr(key)
+ ':'
+ '</td>'
'<td class="val '
+ 'mdl-data-table__cell--non-numeric">'
+ self.safe_better_repr(
val, context, html, level, full
)
+ '</td></tr>'
)
if not isinstance(key, IterableEllipsis)
else (
'<tr><td colspan="2" class="ellipse">'
+ get_too_long_repr(key)
+ '</td></tr>'
)
for key, val in abbreviate(
dict_sorted(obj.items(), key=lambda x: x[0]),
level,
tuple_=True,
)
]
)
dict_repr += '</table>'
else:
dict_repr += ('\n' + ' ' * level).join(
[
self.safe_repr(key)
+ ': '
+ self.safe_better_repr(
val, context, html, level, full
)
if not isinstance(key, IterableEllipsis)
else get_too_long_repr(key)
for key, val in abbreviate(
dict_sorted(obj.items(), key=lambda x: x[0]),
level,
tuple_=True,
)
]
)
closer = '\n' + ' ' * (level - 1) + closer
else:
dict_repr += ', '.join(
[
self.safe_repr(key)
+ ': '
+ self.safe_better_repr(
val, context, html, level, full
)
for key, val in dict_sorted(
obj.items(), key=lambda x: x[0]
)
]
)
dict_repr += closer
return dict_repr
if any(
[
isinstance(obj, list),
isinstance(obj, set),
isinstance(obj, tuple),
]
):
iter_repr = ' ' * (level - 1)
if type(obj) == list:
iter_repr = '['
closer = ']'
elif type(obj) == set:
iter_repr = '{'
closer = '}'
elif type(obj) == tuple:
iter_repr = '('
closer = ')'
else:
iter_repr = escape(obj.__class__.__name__) + '(['
closer = '])'
splitter = ', '
if len(obj) > 2 and html:
splitter += '\n' + ' ' * level
iter_repr += '\n' + ' ' * level
closer = '\n' + ' ' * (level - 1) + closer
iter_repr += splitter.join(
[
self.safe_better_repr(val, context, html, level, full)
if not isinstance(val, IterableEllipsis)
else get_too_long_repr(val)
for val in abbreviate(obj, level)
]
)
iter_repr += closer
return iter_repr
@contextmanager
def capture_output(self, with_hook=True):
"""Steal stream output, return them in string, restore them"""
self.hooked = ''
def display_hook(obj):
# That's some dirty hack
self.hooked += self.safe_better_repr(obj)
self.last_obj = obj
stdout, stderr = sys.stdout, sys.stderr
if with_hook:
d_hook = sys.displayhook
sys.displayhook = display_hook
sys.stdout, sys.stderr = StringIO(), StringIO()
out, err = [], []
try:
yield out, err
finally:
out.extend(sys.stdout.getvalue().splitlines())
err.extend(sys.stderr.getvalue().splitlines())
if with_hook:
sys.displayhook = d_hook
sys.stdout, sys.stderr = stdout, stderr
def dmp(self, thing):
"""Dump the content of an object in a dict for wdb.js"""
def safe_getattr(key):
"""Avoid crash on getattr"""
try:
return getattr(thing, key)
except Exception as e:
return 'Error getting attr "%s" from "%s" (%s: %s)' % (
key,
thing,
type(e).__name__,
e,
)
return dict(
(
escape(key),
{
'val': self.safe_better_repr(safe_getattr(key)),
'type': type(safe_getattr(key)).__name__,
},
)
for key in dir(thing)
)
def get_file(self, filename):
"""Get file source from cache"""
import linecache
# Hack for frozen importlib bootstrap
if filename == '<frozen importlib._bootstrap>':
filename = os.path.join(
os.path.dirname(linecache.__file__),
'importlib',
'_bootstrap.py',
)
return to_unicode_string(
''.join(linecache.getlines(filename)), filename
)
def get_stack(self, f, t):
"""Build the stack from frame and traceback"""
stack = []
if t and t.tb_frame == f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
def get_trace(self, frame, tb):
"""Get a dict of the traceback for wdb.js use"""
import linecache
frames = []
stack, _ = self.get_stack(frame, tb)
current = 0
for i, (stack_frame, lno) in enumerate(stack):
code = stack_frame.f_code
filename = code.co_filename or '<unspecified>'
line = None
if filename[0] == '<' and filename[-1] == '>':
line = get_source_from_byte_code(code)
fn = filename
else:
fn = os.path.abspath(filename)
if not line:
linecache.checkcache(filename)
line = linecache.getline(filename, lno, stack_frame.f_globals)
if not line:
line = self.compile_cache.get(id(code), '')
line = to_unicode_string(line, filename)
line = line and line.strip()
startlnos = dis.findlinestarts(code)
lastlineno = list(startlnos)[-1][1]
if frame == stack_frame:
current = i
frames.append(
{
'file': fn,
'function': code.co_name,
'flno': code.co_firstlineno,
'llno': lastlineno,
'lno': lno,
'code': line,
'level': i,
'current': frame == stack_frame,
}
)
# While in exception always put the context to the top
return stack, frames, current
def send(self, data):
"""Send data through websocket"""
log.debug('Sending %s' % data)
if not self._socket:
log.warn('No connection')
return
self._socket.send_bytes(data.encode('utf-8'))
def receive(self, timeout=None):
"""Receive data through websocket"""
log.debug('Receiving')
if not self._socket:
log.warn('No connection')
return
try:
if timeout:
rv = self._socket.poll(timeout)
if not rv:
log.info('Connection timeouted')
return 'Quit'
data = self._socket.recv_bytes()
except Exception:
log.error('Connection lost')
return 'Quit'
log.debug('Got %s' % data)
return data.decode('utf-8')
def open_browser(self, type_='debug'):
if not self.connected:
log.debug('Launching browser and wait for connection')
web_url = 'http://%s:%d/%s/session/%s' % (
WEB_SERVER or 'localhost',
WEB_PORT or 1984,
type_,
self.uuid,
)
server = WEB_SERVER or '[wdb.server]'
if WEB_PORT:
server += ':%s' % WEB_PORT
if WDB_NO_BROWSER_AUTO_OPEN:
log.warning(
'You can now launch your browser at '
'http://%s/%s/session/%s' % (server, type_, self.uuid)
)
elif not webbrowser.open(web_url):
log.warning(
'Unable to open browser, '
'please go to http://%s/%s/session/%s'
% (server, type_, self.uuid)
)
self.connected = True
def shell(self, source=None, vars=None):
self.interaction(
sys._getframe(),
exception_description='Shell',
shell=True,
shell_vars=vars,
source=source,
)
def interaction(
self,
frame,
tb=None,
exception='Wdb',
exception_description='Stepping',
init=None,
shell=False,
shell_vars=None,
source=None,
iframe_mode=False,
timeout=None,
post_mortem=False,
):
"""User interaction handling blocking on socket receive"""
log.info(
'Interaction %r %r %r %r'
% (frame, tb, exception, exception_description)
)
self.reconnect_if_needed()
self.stepping = not shell
if not iframe_mode:
opts = {}
if shell:
opts['type_'] = 'shell'
if post_mortem:
opts['type_'] = 'pm'
self.open_browser(**opts)
lvl = len(self.interaction_stack)
if lvl:
exception_description += ' [recursive%s]' % (
'^%d' % lvl if lvl > 1 else ''
)
interaction = Interaction(
self,
frame,
tb,
exception,
exception_description,
init=init,
shell=shell,
shell_vars=shell_vars,
source=source,
timeout=timeout,
)
self.interaction_stack.append(interaction)
# For meta debugging purpose
self._ui = interaction
if self.begun:
# Each new state sends the trace and selects a frame
interaction.init()
else:
self.begun = True
interaction.loop()
self.interaction_stack.pop()
if lvl:
self.interaction_stack[-1].init()
def handle_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
fun = frame.f_code.co_name
log.info('Calling: %r' % fun)
init = 'Echo|%s' % dump(
{
'for': '__call__',
'val': '%s(%s)'
% (
fun,
', '.join(
[
'%s=%s' % (key, self.safe_better_repr(value))
for key, value in get_args(frame).items()
]
),
),
}
)
self.interaction(
frame, init=init, exception_description='Calling %s' % fun
)
def handle_line(self, frame, arg):
"""This function is called when we stop or break at this line."""
log.info('Stopping at line %s' % pretty_frame(frame))
self.interaction(frame)
def handle_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
self.obj_cache[id(return_value)] = return_value
self.extra_vars['__return__'] = return_value
fun = frame.f_code.co_name
log.info('Returning from %r with value: %r' % (fun, return_value))
init = 'Echo|%s' % dump(
{'for': '__return__', 'val': self.safe_better_repr(return_value)}
)
self.interaction(
frame,
init=init,
exception_description='Returning from %s with value %s'
% (fun, return_value),
)
def handle_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
type_, value, tb = exc_info
# Python 3 is broken see http://bugs.python.org/issue17413
_value = value
if not isinstance(_value, BaseException):
_value = type_(value)
fake_exc_info = type_, _value, tb
log.error('Exception during trace', exc_info=fake_exc_info)
self.obj_cache[id(exc_info)] = exc_info
self.extra_vars['__exception__'] = exc_info
exception = type_.__name__
exception_description = str(value)
init = 'Echo|%s' % dump(
{
'for': '__exception__',
'val': escape('%s: %s') % (exception, exception_description),
}
)
# User exception is 4 frames away from exception
frame = frame or sys._getframe().f_back.f_back.f_back.f_back
self.interaction(
frame, tb, exception, exception_description, init=init
)
def breaks(self, frame, no_remove=False):
"""Return True if there's a breakpoint at frame"""
for breakpoint in set(self.breakpoints):
if breakpoint.breaks(frame):
if breakpoint.temporary and not no_remove:
self.breakpoints.remove(breakpoint)
return True
return False
def get_file_breaks(self, filename):
"""List all file `filename` breakpoints"""
return [
breakpoint
for breakpoint in self.breakpoints
if breakpoint.on_file(filename)
]
def get_breaks_lno(self, filename):
"""List all line numbers that have a breakpoint"""
return list(
filter(
lambda x: x is not None,
[
getattr(breakpoint, 'line', None)
for breakpoint in self.breakpoints
if breakpoint.on_file(filename)
],
)
)
def die(self):
"""Time to quit"""
log.info('Time to die')
if self.connected:
try:
self.send('Die')
except Exception:
pass
if self._socket:
self._socket.close()
self.pop()
|
class Wdb(object):
'''Wdb debugger main class'''
@staticmethod
def get(no_create=False, server=None, port=None, force_uuid=None):
'''Get the thread local singleton'''
pass
@staticmethod
def pop():
'''Remove instance from instance list'''
pass
def __new__(cls, server=None, port=None):
pass
def __init__(self, server=None, port=None, force_uuid=None):
pass
def run_file(self, filename):
'''Run the file `filename` with trace'''
pass
def run_file(self, filename):
'''Run the cmd `cmd` with trace'''
pass
def reset(self):
'''Refresh linecache'''
pass
def reconnect_if_needed(self):
pass
def connect(self):
'''Connect to wdb server'''
pass
def get_breakpoints(self):
pass
def index_imports(self):
pass
def index_imports(self):
pass
def breakpoints_to_json(self):
pass
def _get_under_code_ref(self):
pass
def _walk_frame_ancestry(self, frame):
pass
def check_below(self, frame):
pass
def trace_dispatch(self, frame, event, arg):
'''This function is called every line,
function call, function return and exception during trace'''
pass
def trace_debug_dispatch(self, frame, event, arg):
'''Utility function to add debug to tracing'''
pass
def start_trace(self, full=False, frame=None, below=0, under=None):
'''Start tracing from here'''
pass
def set_trace(self, frame=None, break_=True):
'''Break at current state'''
pass
def stop_trace(self, frame=None):
'''Stop tracing from here'''
pass
def set_until(self, frame, lineno=None):
'''Stop on the next line number.'''
pass
def set_step(self, frame):
'''Stop on the next line.'''
pass
def set_next(self, frame):
'''Stop on the next line in current frame.'''
pass
def set_return(self, frame):
'''Stop when returning from the given frame.'''
pass
def set_continue(self, frame):
'''Don't stop anymore'''
pass
def get_breakpoints(self):
pass
def set_break(
self, filename, lineno=None, temporary=False, cond=None, funcname=None
):
'''Put a breakpoint for filename'''
pass
def clear_break(
self, filename, lineno=None, temporary=False, cond=None, funcname=None
):
'''Remove a breakpoint'''
pass
def safe_repr(self, obj):
'''Like a repr but without exception'''
pass
def safe_better_repr(
self, obj, context=None, html=True, level=0, full=False
):
'''Repr with inspect links on objects'''
pass
def better_repr(self, obj, context=None, html=True, level=1, full=False):
'''Repr with html decorations or indentation'''
pass
def get_too_long_repr(ie):
pass
@contextmanager
def capture_output(self, with_hook=True):
'''Steal stream output, return them in string, restore them'''
pass
def display_hook(obj):
pass
def dmp(self, thing):
'''Dump the content of an object in a dict for wdb.js'''
pass
def safe_getattr(key):
'''Avoid crash on getattr'''
pass
def get_file(self, filename):
'''Get file source from cache'''
pass
def get_stack(self, f, t):
'''Build the stack from frame and traceback'''
pass
def get_trace(self, frame, tb):
'''Get a dict of the traceback for wdb.js use'''
pass
def send(self, data):
'''Send data through websocket'''
pass
def receive(self, timeout=None):
'''Receive data through websocket'''
pass
def open_browser(self, type_='debug'):
pass
def shell(self, source=None, vars=None):
pass
def interaction(
self,
frame,
tb=None,
exception='Wdb',
exception_description='Stepping',
init=None,
shell=False,
shell_vars=None,
source=None,
iframe_mode=False,
timeout=None,
post_mortem=False,
):
'''User interaction handling blocking on socket receive'''
pass
def handle_call(self, frame, argument_list):
'''This method is called when there is the remote possibility
that we ever need to stop in this function.'''
pass
def handle_line(self, frame, arg):
'''This function is called when we stop or break at this line.'''
pass
def handle_return(self, frame, return_value):
'''This function is called when a return trap is set here.'''
pass
def handle_exception(self, frame, exc_info):
'''This function is called if an exception occurs,
but only if we are to stop at or just below this level.'''
pass
def breaks(self, frame, no_remove=False):
'''Return True if there's a breakpoint at frame'''
pass
def get_file_breaks(self, filename):
'''List all file `filename` breakpoints'''
pass
def get_breaks_lno(self, filename):
'''List all line numbers that have a breakpoint'''
pass
def die(self):
'''Time to quit'''
pass
| 57 | 39 | 18 | 1 | 16 | 1 | 3 | 0.08 | 1 | 25 | 12 | 0 | 47 | 23 | 49 | 49 | 979 | 105 | 813 | 182 | 731 | 63 | 515 | 157 | 455 | 15 | 1 | 3 | 158 |
143,878 |
Kozea/wdb
|
Kozea_wdb/client/wdb/breakpoint.py
|
wdb.breakpoint.Breakpoint
|
class Breakpoint(object):
"""Simple breakpoint that breaks if in file"""
def __init__(self, file, temporary=False):
self.fn = file
if not file.endswith(('.py', '.pyc', '.pyo')):
file = file_from_import(file)
self.file = canonic(file)
self.temporary = temporary
def on_file(self, filename):
return canonic(filename) == self.file
def breaks(self, frame):
return self.on_file(frame.f_code.co_filename)
def __repr__(self):
s = 'Temporary ' if self.temporary else ''
s += self.__class__.__name__
s += ' on file %s' % self.file
return s
def __eq__(self, other):
return self.file == other.file and self.temporary == other.temporary
def __hash__(self):
s = sha1()
s.update(repr(self).encode('utf-8'))
return int(s.hexdigest(), 16)
def to_dict(self):
return {
'fn': self.file,
'lno': getattr(self, 'line', None),
'cond': getattr(self, 'condition', None),
'fun': getattr(self, 'function', None),
'temporary': self.temporary,
}
|
class Breakpoint(object):
'''Simple breakpoint that breaks if in file'''
def __init__(self, file, temporary=False):
pass
def on_file(self, filename):
pass
def breaks(self, frame):
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
def __hash__(self):
pass
def to_dict(self):
pass
| 8 | 1 | 4 | 0 | 4 | 0 | 1 | 0.03 | 1 | 1 | 0 | 3 | 7 | 3 | 7 | 7 | 38 | 7 | 30 | 13 | 22 | 1 | 24 | 13 | 16 | 2 | 1 | 1 | 9 |
143,879 |
Kozea/wdb
|
Kozea_wdb/client/wdb/utils.py
|
wdb.utils.Html5Diff
|
class Html5Diff(HtmlDiff):
_table_template = """
<table class="diff">
%(header_row)s
<tbody>
%(data_rows)s
</tbody>
</table>"""
def _format_line(self, side, flag, linenum, text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side], linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text = (
text.replace("&", "&")
.replace(">", ">")
.replace("<", "<")
)
type_ = 'neutral'
if '\0+' in text:
type_ = 'add'
if '\0-' in text:
if type_ == 'add':
type_ = 'chg'
type_ = 'sub'
if '\0^' in text:
type_ = 'chg'
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ', ' ').rstrip()
return (
'<td class="diff_lno"%s>%s</td>'
'<td class="diff_line diff_line_%s">%s</td>'
% (id, linenum, type_, text)
)
def make_table(
self,
fromlines,
tolines,
fromdesc='',
todesc='',
context=False,
numlines=5,
):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines, tolines = self._tab_newline_replace(fromlines, tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(
fromlines,
tolines,
context_lines,
linejunk=self._linejunk,
charjunk=self._charjunk,
)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist, tolist, flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist, tolist, flaglist, next_href, next_id = self._convert_flags(
fromlist, tolist, flaglist, context, numlines
)
s = []
fmt = ' <tr>%s%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append(fmt % (fromlist[i], tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s</tr></thead>' % (
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th colspan="2" class="diff_header">%s</th>' % todesc,
)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s), header_row=header_row, prefix=self._prefix[1]
)
return (
table.replace('\0+', '<span class="diff_add">')
.replace('\0-', '<span class="diff_sub">')
.replace('\0^', '<span class="diff_chg">')
.replace('\1', '</span>')
.replace('\t', ' ')
)
|
class Html5Diff(HtmlDiff):
def _format_line(self, side, flag, linenum, text):
'''Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
'''
pass
def make_table(
self,
fromlines,
tolines,
fromdesc='',
todesc='',
context=False,
numlines=5,
):
'''Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
'''
pass
| 3 | 2 | 64 | 8 | 40 | 17 | 7 | 0.38 | 1 | 3 | 0 | 0 | 2 | 1 | 2 | 12 | 137 | 17 | 87 | 23 | 76 | 33 | 43 | 14 | 40 | 7 | 2 | 3 | 13 |
143,880 |
Kozea/wdb
|
Kozea_wdb/client/wdb/breakpoint.py
|
wdb.breakpoint.FunctionBreakpoint
|
class FunctionBreakpoint(Breakpoint):
"""Breakpoint that breaks if in file in function"""
def __init__(self, file, function, temporary=False):
self.function = function
if not file.endswith(('.py', '.pyc', '.pyo')):
file = file_from_import(file, function)
self.file = canonic(file)
self.temporary = temporary
def breaks(self, frame):
return (
super(FunctionBreakpoint, self).breaks(frame)
and frame.f_code.co_name == self.function
)
def __repr__(self):
return (
super(FunctionBreakpoint, self).__repr__()
+ ' in function %s' % self.function
)
def __eq__(self, other):
return (
super(FunctionBreakpoint, self).__eq__(other)
and self.function == other.function
)
def __hash__(self):
return super(FunctionBreakpoint, self).__hash__()
|
class FunctionBreakpoint(Breakpoint):
'''Breakpoint that breaks if in file in function'''
def __init__(self, file, function, temporary=False):
pass
def breaks(self, frame):
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
def __hash__(self):
pass
| 6 | 1 | 5 | 0 | 5 | 0 | 1 | 0.04 | 1 | 1 | 0 | 0 | 5 | 3 | 5 | 12 | 30 | 5 | 24 | 9 | 18 | 1 | 15 | 9 | 9 | 2 | 2 | 1 | 6 |
143,881 |
Kozea/wdb
|
Kozea_wdb/client/wdb/breakpoint.py
|
wdb.breakpoint.ConditionalBreakpoint
|
class ConditionalBreakpoint(Breakpoint):
"""Breakpoint that breaks if condition is True at line in file"""
def __init__(self, file, line, condition, temporary=False):
self.line = line
self.condition = condition
super(ConditionalBreakpoint, self).__init__(file, temporary)
def breaks(self, frame):
try:
return (
super(ConditionalBreakpoint, self).breaks(frame)
and (self.line is None or frame.f_lineno == self.line)
and eval(self.condition, frame.f_globals, frame.f_locals)
)
except Exception:
# Break in case of
log.warning('Error in conditional break', exc_info=True)
return True
def __repr__(self):
return (
super(ConditionalBreakpoint, self).__repr__()
+ ' under the condition %s' % self.condition
)
def __eq__(self, other):
return (
super(ConditionalBreakpoint, self).__eq__(other)
and self.condition == other.condition
)
def __hash__(self):
return super(ConditionalBreakpoint, self).__hash__()
|
class ConditionalBreakpoint(Breakpoint):
'''Breakpoint that breaks if condition is True at line in file'''
def __init__(self, file, line, condition, temporary=False):
pass
def breaks(self, frame):
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
def __hash__(self):
pass
| 6 | 1 | 5 | 0 | 5 | 0 | 1 | 0.07 | 1 | 2 | 0 | 0 | 5 | 2 | 5 | 12 | 34 | 5 | 27 | 8 | 21 | 2 | 17 | 8 | 11 | 2 | 2 | 1 | 6 |
143,882 |
Kozea/wdb
|
Kozea_wdb/client/wdb/ext.py
|
wdb.ext.WdbMiddleware
|
class WdbMiddleware(object):
def __init__(self, app, start_disabled=False):
_patch_tcpserver()
self.app = app
Wdb.enabled = not start_disabled
def __call__(self, environ, start_response):
path = environ.get('PATH_INFO', '')
if path == '/__wdb/on':
# Enable wdb
Wdb.enabled = True
start_response('200 OK', [('Content-Type', 'text/html')])
return (to_bytes('Wdb is now on'),)
if path == '/__wdb/shell':
def f():
# Enable wdb
wdb = Wdb.get()
Wdb.enabled = True
start_response(
'200 OK',
[('Content-Type', 'text/html'), ('X-Thing', wdb.uuid)],
)
yield to_bytes(' ' * 4096)
wdb = set_trace()
wdb.die()
yield to_bytes('Exited')
return f()
if Wdb.enabled:
def trace_wsgi(environ, start_response):
wdb = Wdb.get()
wdb.closed = False
appiter = None
try:
with trace(close_on_exit=True, under=self.app):
appiter = self.app(environ, start_response)
for item in appiter:
yield item
except Exception:
exc_info = sys.exc_info()
try:
start_response(
'500 INTERNAL SERVER ERROR',
[('Content-Type', 'text/html')],
)
except AssertionError:
log.exception(
'Exception with wdb off and headers already set',
exc_info=exc_info,
)
yield '\n'.join(
traceback.format_exception(*exc_info)
).replace('\n', '\n<br>\n').encode('utf-8')
else:
yield _handle_off()
finally:
hasattr(appiter, 'close') and appiter.close()
wdb.closed = False
return trace_wsgi(environ, start_response)
def catch(environ, start_response):
appiter = None
try:
appiter = self.app(environ, start_response)
for item in appiter:
yield item
except Exception:
exc_info = sys.exc_info()
try:
start_response(
'500 INTERNAL SERVER ERROR',
[('Content-Type', 'text/html')],
)
except AssertionError:
log.exception(
'Exception with wdb off and headers already set',
exc_info=exc_info,
)
yield '\n'.join(
traceback.format_exception(*exc_info)
).replace('\n', '\n<br>\n').encode('utf-8')
else:
yield _handle_off()
finally:
# Close set_trace debuggers
stop_trace(close_on_exit=True)
hasattr(appiter, 'close') and appiter.close()
return catch(environ, start_response)
|
class WdbMiddleware(object):
def __init__(self, app, start_disabled=False):
pass
def __call__(self, environ, start_response):
pass
def f():
pass
def trace_wsgi(environ, start_response):
pass
def catch(environ, start_response):
pass
| 6 | 0 | 32 | 2 | 29 | 1 | 3 | 0.04 | 1 | 4 | 2 | 0 | 2 | 1 | 2 | 2 | 94 | 9 | 82 | 16 | 76 | 3 | 61 | 16 | 55 | 4 | 1 | 3 | 14 |
143,883 |
Kozea/wdb
|
Kozea_wdb/client/wdb/state.py
|
wdb.state.Next
|
class Next(State):
"""Nexting state: stop if same frame"""
def stops(self, frame, event):
return self.frame == frame
|
class Next(State):
'''Nexting state: stop if same frame'''
def stops(self, frame, event):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 4 | 5 | 1 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,884 |
Kozea/wdb
|
Kozea_wdb/client/wdb/state.py
|
wdb.state.Return
|
class Return(Next):
"""Returning state: Stop on return event if same frame"""
def stops(self, frame, event):
return self.frame == frame and event == 'return'
|
class Return(Next):
'''Returning state: Stop on return event if same frame'''
def stops(self, frame, event):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 5 | 5 | 1 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
143,885 |
Kozea/wdb
|
Kozea_wdb/client/wdb/state.py
|
wdb.state.State
|
class State(object):
def __init__(self, frame):
self.frame = frame
def up(self):
"""Go up in stack and return True if top frame"""
if self.frame:
self.frame = self.frame.f_back
return self.frame is None
def __repr__(self):
return '<State is %s for %s>' % (
self.__class__.__name__,
pretty_frame(self.frame),
)
|
class State(object):
def __init__(self, frame):
pass
def up(self):
'''Go up in stack and return True if top frame'''
pass
def __repr__(self):
pass
| 4 | 1 | 4 | 0 | 4 | 0 | 1 | 0.08 | 1 | 0 | 0 | 4 | 3 | 1 | 3 | 3 | 15 | 2 | 12 | 5 | 8 | 1 | 9 | 5 | 5 | 2 | 1 | 1 | 4 |
143,886 |
Kozea/wdb
|
Kozea_wdb/client/wdb/state.py
|
wdb.state.Step
|
class Step(State):
"""Stepping state: always stopping"""
def stops(self, frame, event):
return True
|
class Step(State):
'''Stepping state: always stopping'''
def stops(self, frame, event):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 0.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 4 | 5 | 1 | 3 | 2 | 1 | 1 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
143,887 |
Kozea/wdb
|
Kozea_wdb/client/wdb/state.py
|
wdb.state.Until
|
class Until(State):
"""Nexting until state: stop if same frame and is next line"""
def __init__(self, frame, lno):
self.frame = frame
self.lno = lno + 1
def stops(self, frame, event):
return self.frame == frame and frame.f_lineno >= self.lno
|
class Until(State):
'''Nexting until state: stop if same frame and is next line'''
def __init__(self, frame, lno):
pass
def stops(self, frame, event):
pass
| 3 | 1 | 3 | 0 | 3 | 0 | 1 | 0.17 | 1 | 0 | 0 | 0 | 2 | 2 | 2 | 5 | 9 | 2 | 6 | 5 | 3 | 1 | 6 | 5 | 3 | 1 | 2 | 0 | 2 |
143,888 |
Kozea/wdb
|
Kozea_wdb/client/wdb/__init__.py
|
wdb.trace
|
class trace(object):
def __init__(self, **kwargs):
"""Make a tracing context with `with trace():`"""
self.kwargs = kwargs
def __enter__(self):
# 2 calls to get here
kwargs = dict(self.kwargs)
if 'close_on_exit' in kwargs:
kwargs.pop('close_on_exit')
kwargs.setdefault('frame', sys._getframe().f_back)
start_trace(**kwargs)
def __exit__(self, *args):
kwargs = {}
kwargs['frame'] = self.kwargs.get('frame', sys._getframe().f_back)
kwargs['close_on_exit'] = self.kwargs.get('close_on_exit', False)
stop_trace(**kwargs)
|
class trace(object):
def __init__(self, **kwargs):
'''Make a tracing context with `with trace():`'''
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
| 4 | 1 | 5 | 0 | 4 | 1 | 1 | 0.14 | 1 | 1 | 0 | 0 | 3 | 1 | 3 | 3 | 18 | 2 | 14 | 7 | 10 | 2 | 14 | 7 | 10 | 2 | 1 | 1 | 4 |
143,889 |
Kozea/wdb
|
Kozea_wdb/client/wdb/ui.py
|
wdb.ui.Interaction
|
class Interaction(object):
hooks = {
'update_watchers': [
'start',
'eval',
'watch',
'init',
'select',
'unwatch',
]
}
def __init__(
self,
db,
frame,
tb,
exception,
exception_description,
init=None,
shell=False,
shell_vars=None,
source=None,
timeout=None,
):
self.db = db
self.shell = shell
self.init_message = init
self.stack, self.trace, self.index = self.db.get_trace(frame, tb)
self.exception = exception
self.exception_description = exception_description
# Copy locals to avoid strange cpython behaviour
self.locals = list(map(lambda x: x[0].f_locals, self.stack))
self.htmldiff = Html5Diff(4)
self.timeout = timeout
if self.shell:
self.locals[self.index] = shell_vars or {}
if source:
with open(source) as f:
compiled_code = compile(f.read(), '<source>', 'exec')
# Executing in locals to keep local scope
# (http://bugs.python.org/issue16781)
execute(compiled_code, self.current_locals, self.current_locals)
def hook(self, kind):
for hook, events in self.hooks.items():
if kind in events:
getattr(self, hook)()
@property
def current(self):
return self.trace[self.index]
@property
def current_frame(self):
return self.stack[self.index][0]
@property
def current_locals(self):
return self.locals[self.index]
@property
def current_file(self):
return self.current['file']
def get_globals(self):
"""Get enriched globals"""
if self.shell:
globals_ = dict(_initial_globals)
else:
globals_ = dict(self.current_frame.f_globals)
globals_['_'] = self.db.last_obj
if cut is not None:
globals_.setdefault('cut', cut)
# For meta debuging purpose
globals_['___wdb'] = self.db
# Hack for function scope eval
globals_.update(self.current_locals)
for var, val in self.db.extra_vars.items():
globals_[var] = val
self.db.extra_items = {}
return globals_
def init(self):
self.db.send(
'Title|%s'
% dump(
{
'title': self.exception,
'subtitle': self.exception_description,
}
)
)
if self.shell:
self.db.send('Shell')
else:
self.db.send(
'Trace|%s' % dump({'trace': self.trace, 'cwd': os.getcwd()})
)
self.db.send(
'SelectCheck|%s'
% dump({'frame': self.current, 'name': self.current_file})
)
if self.init_message:
self.db.send(self.init_message)
self.init_message = None
self.hook('init')
def parse_command(self, message):
# Parse received message
if '|' in message:
return message.split('|', 1)
return message, ''
def loop(self):
stop = False
while not stop:
self.db.send('UPDATE_FILENAME|%s' % self.current_file)
try:
stop = self.interact()
except Exception:
log.exception('Error in loop')
try:
exc = self.handle_exc()
type_, value = sys.exc_info()[:2]
link = (
'<a href="https://github.com/Kozea/wdb/issues/new?'
'title=%s&body=%s&labels=defect" class="nogood">'
'Please click here to report it on Github</a>'
) % (
quote('%s: %s' % (type_.__name__, str(value))),
quote('```\n%s\n```\n' % traceback.format_exc()),
)
self.db.send(
'Echo|%s'
% dump(
{
'for': 'Error in Wdb, this is bad',
'val': exc + '<br>' + link,
}
)
)
except Exception:
log.exception('Error in loop exception handling')
self.db.send(
'Echo|%s'
% dump(
{
'for': 'Too many errors',
'val': (
"Don't really know what to say. "
"Maybe it will work tomorrow."
),
}
)
)
def interact(self):
try:
message = self.db.receive(self.timeout)
# Only timeout at first request
self.timeout = None
except KeyboardInterrupt:
# Quit on KeyboardInterrupt
message = 'Quit'
cmd, data = self.parse_command(message)
cmd = cmd.lower()
log.debug('Cmd %s #Data %d' % (cmd, len(data)))
fun = getattr(self, 'do_' + cmd, None)
if fun:
rv = fun(data)
self.hook(cmd)
return rv
log.warning('Unknown command %s' % cmd)
def update_watchers(self):
watched = {}
for watcher in self.db.watchers[self.current_file]:
try:
watched[watcher] = self.db.safe_better_repr(
eval_(watcher, self.get_globals(), self.current_locals)
)
except Exception as e:
watched[watcher] = type(e).__name__
self.db.send('Watched|%s' % dump(watched))
def notify_exc(self, msg):
log.info(msg, exc_info=True)
self.db.send(
'Log|%s'
% dump({'message': '%s\n%s' % (msg, traceback.format_exc())})
)
def do_start(self, data):
self.started = True
# Getting breakpoints
log.debug('Getting breakpoints')
self.db.send(
'Init|%s'
% dump(
{
'cwd': os.getcwd(),
'version': __version__,
'breaks': self.db.breakpoints_to_json(),
}
)
)
self.db.send(
'Title|%s'
% dump(
{
'title': self.exception,
'subtitle': self.exception_description,
}
)
)
if self.shell:
self.db.send('Shell')
else:
self.db.send('Trace|%s' % dump({'trace': self.trace}))
# In case of exception always be at top frame to start
self.index = len(self.stack) - 1
self.db.send(
'SelectCheck|%s'
% dump({'frame': self.current, 'name': self.current_file})
)
if self.init_message:
self.db.send(self.init_message)
self.init_message = None
def do_select(self, data):
self.index = int(data)
self.db.send(
'SelectCheck|%s'
% dump({'frame': self.current, 'name': self.current_file})
)
def do_file(self, data):
fn = data
file = self.db.get_file(fn)
self.db.send(
'Select|%s'
% dump({'frame': self.current, 'name': fn, 'file': file})
)
def do_inspect(self, data):
if '/' in data:
mode, data = data.split('/', 1)
else:
mode = 'inspect'
try:
thing = self.db.obj_cache.get(int(data))
except Exception:
self.fail('Inspect')
return
if mode == 'dump':
self.db.send(
'Print|%s'
% dump(
{
'for': self.db.safe_better_repr(thing, html=False),
'result': self.db.safe_better_repr(thing, full=True),
}
)
)
return
if isinstance(thing, tuple) and len(thing) == 3:
type_, value, tb = thing
iter_tb = tb
while iter_tb.tb_next is not None:
iter_tb = iter_tb.tb_next
self.db.extra_vars['__recursive_exception__'] = value
self.db.interaction(
iter_tb.tb_frame, tb, type_.__name__, str(value)
)
return
self.db.send(
'Dump|%s'
% dump(
{
'for': self.db.safe_repr(thing),
'val': self.db.dmp(thing),
'doc': get_doc(thing),
'source': get_source(thing),
}
)
)
def do_dump(self, data):
try:
thing = eval_(data, self.get_globals(), self.current_locals)
except Exception:
self.fail('Dump')
return
self.db.send(
'Dump|%s'
% dump(
{
'for': u('%s ⟶ %s ') % (data, self.db.safe_repr(thing)),
'val': self.db.dmp(thing),
'doc': get_doc(thing),
'source': get_source(thing),
}
)
)
def do_trace(self, data):
self.db.send('Trace|%s' % dump({'trace': self.trace}))
def do_eval(self, data):
redir = None
imports = []
raw_data = data.strip()
if raw_data.startswith('!<'):
filename = raw_data[2:].strip()
try:
with open(filename, 'r') as f:
raw_data = f.read()
except Exception:
self.fail('Eval', 'Unable to read from file %s' % filename)
return
lines = raw_data.split('\n')
if '>!' in lines[-1]:
try:
last_line, redir, append = tokenize_redir(raw_data)
except TokenError:
last_line = redir = None
append = False
if redir and last_line:
indent = len(lines[-1]) - len(lines[-1].lstrip())
lines[-1] = indent * u(' ') + last_line
raw_data = '\n'.join(lines)
data = raw_data
# Keep spaces
raw_data = raw_data.replace(' ', u(' '))
# Compensate prompt for multi line
raw_data = raw_data.replace('\n', '\n' + u(' ' * 4))
duration = None
with self.db.capture_output(with_hook=redir is None) as (out, err):
compiled_code = None
try:
compiled_code = compile(data, '<stdin>', 'single')
except Exception:
try:
compiled_code = compile(data, '<stdin>', 'exec')
except Exception:
maybe_hook = self.handle_exc()
# Hack from codeop
e1 = e2 = None
try:
compiled_code = compile(data + '\n', '<stdin>', 'exec')
except Exception as e:
e1 = e
try:
compile(data + '\n\n', '<stdin>', 'exec')
except Exception as e:
e2 = e
if not compiled_code:
if repr(e1) != repr(e2):
# Multiline not terminated
self.db.send('NewLine')
return
else:
self.db.hooked = maybe_hook
loc = self.current_locals
start = time.time()
if compiled_code is not None:
self.db.compile_cache[id(compiled_code)] = data
try:
execute(compiled_code, self.get_globals(), loc)
except NameError as e:
m = re.match("name '(.+)' is not defined", str(e))
if m:
name = m.groups()[0]
if self.db._importmagic_index:
scores = self.db._importmagic_index.symbol_scores(
name
)
for _, module, variable in scores:
if variable is None:
imports.append('import %s' % module)
else:
imports.append(
'from %s import %s'
% (module, variable)
)
elif importable_module(name):
imports.append('import %s' % name)
self.db.hooked = self.handle_exc()
except Exception:
self.db.hooked = self.handle_exc()
duration = int((time.time() - start) * 1000 * 1000)
if redir and not self.db.hooked:
try:
with open(redir, 'a' if append else 'w') as f:
f.write('\n'.join(out) + '\n'.join(err) + '\n')
except Exception:
self.fail('Eval', 'Unable to write to file %s' % redir)
return
self.db.send(
'Print|%s'
% dump(
{
'for': raw_data,
'result': escape(
'%s to file %s'
% ('Appended' if append else 'Written', redir)
),
}
)
)
else:
rv = escape('\n'.join(out) + '\n'.join(err))
try:
dump(rv)
except Exception:
rv = rv.decode('ascii', 'ignore')
if rv and self.db.hooked:
result = self.db.hooked + '\n' + rv
elif rv:
result = rv
else:
result = self.db.hooked
self.db.send(
'Print|%s'
% dump(
{'for': raw_data, 'result': result, 'duration': duration}
)
)
if imports:
self.db.send('Suggest|%s' % dump({'imports': imports}))
def do_ping(self, data):
self.db.send('Pong')
def do_step(self, data):
self.db.set_step(self.current_frame)
return True
def do_next(self, data):
self.db.set_next(self.current_frame)
return True
def do_continue(self, data):
self.db.stepping = False
self.db.set_continue(self.current_frame)
return True
def do_return(self, data):
self.db.set_return(self.current_frame)
return True
def do_until(self, data):
self.db.set_until(self.current_frame)
return True
def do_close(self, data):
self.db.stepping = False
if self.db.closed is not None:
# Ignore set_trace till end
self.db.closed = True
self.db.set_continue(self.current_frame)
return True
def do_break(self, data):
from linecache import getline
brk = loads(data)
def break_fail(x):
return self.fail(
'Break',
'Break on %s failed' % ('%s:%s' % (brk['fn'], brk['lno'])),
message=x,
)
if not brk.get('fn'):
break_fail('Can’t break with no current file')
return
if brk['lno'] is not None:
try:
lno = int(brk['lno'])
except Exception:
break_fail(
'Wrong breakpoint format must be '
'[file][:lineno][#function][,condition].'
)
return
line = getline(brk['fn'], lno, self.current_frame.f_globals)
if not line:
for path in sys.path:
line = getline(
os.path.join(path, brk['fn']),
brk['lno'],
self.current_frame.f_globals,
)
if line:
break
if not line:
break_fail('Line does not exist')
return
if not executable_line(line):
break_fail('Blank line or comment')
return
breakpoint = self.db.set_break(
brk['fn'], brk['lno'], brk['temporary'], brk['cond'], brk['fun']
)
break_set = breakpoint.to_dict()
break_set['temporary'] = brk['temporary']
self.db.send('BreakSet|%s' % dump(break_set))
def do_unbreak(self, data):
brk = loads(data)
lno = brk['lno'] and int(brk['lno'])
self.db.clear_break(
brk['fn'], lno, brk['temporary'], brk['cond'], brk['fun']
)
self.db.send('BreakUnset|%s' % data)
def do_breakpoints(self, data):
self.db.send(
'Print|%s'
% dump({'for': 'Breakpoints', 'result': self.db.breakpoints})
)
def do_watch(self, data):
self.db.watchers[self.current_file].add(data)
self.db.send('Ack')
def do_unwatch(self, data):
self.db.watchers[self.current_file].remove(data)
def do_jump(self, data):
lno = int(data)
if self.index != len(self.trace) - 1:
log.error('Must be at bottom frame')
return
try:
self.current_frame.f_lineno = lno
except ValueError:
self.fail('Unbreak')
return
self.current['lno'] = lno
self.db.send('Trace|%s' % dump({'trace': self.trace}))
self.db.send(
'SelectCheck|%s'
% dump({'frame': self.current, 'name': self.current_file})
)
def do_complete(self, data):
completion = loads(data)
manual = completion.pop('manual', False)
if manual:
timeout = 5
else:
timeout = 0.1
source = completion.pop('source')
pos = completion.pop('pos')
if not Interpreter:
self.db.send('Suggest')
return
try:
script = Interpreter(
source, [self.current_locals, self.get_globals()], **completion
)
with timeout_of(timeout, not manual):
completions = script.completions()
except Exception:
self.db.send('Suggest')
if log.level < WARNING:
self.notify_exc('Completion failed for %s' % data)
return
try:
with timeout_of(timeout / 2, not manual):
funs = script.call_signatures() or []
except Exception:
self.db.send('Suggest')
if log.level < WARNING:
self.notify_exc('Completion of function failed for %s' % data)
return
before = source[:pos]
after = source[pos:]
like = ''
if len(completions):
completion = completions[0]
base = completion.name[
: len(completion.name) - len(completion.complete)
]
if len(base):
like = before[-len(base) :]
if len(like):
before = before[: -len(like)]
try:
suggest_obj = {
'data': {'start': before, 'end': after, 'like': like},
'params': [
{
'params': [
p.description.replace('\n', '') for p in fun.params
],
'index': fun.index,
'module': fun.module_name,
'call_name': fun.name,
}
for fun in funs
],
'completions': [
{
'base': comp.name[
: len(comp.name) - len(comp.complete)
],
'complete': comp.complete,
'description': comp.description,
}
for comp in completions
if comp.name.endswith(comp.complete)
],
}
self.db.send('Suggest|%s' % dump(suggest_obj))
except Exception:
self.db.send('Suggest')
self.notify_exc('Completion generation failed for %s' % data)
def do_save(self, data):
fn, src = data.split('|', 1)
if not os.path.exists(fn):
return
try:
encoding = _detect_lines_encoding(src.splitlines())
with inplace(fn, encoding=encoding) as (_, w):
w.write(src)
except Exception as e:
self.db.send(
'Echo|%s' % dump({'for': 'Error during save', 'val': str(e)})
)
else:
self.db.send(
'Echo|%s'
% dump({'for': 'Save succesful', 'val': 'Wrote %s' % fn})
)
def do_external(self, data):
default = {'linux': 'xdg-open', 'win32': '', 'darwin': 'open'}.get(
sys.platform, 'open'
)
editor = os.getenv('EDITOR', os.getenv('VISUAL', default))
if editor:
cmd = editor.split(' ')
else:
cmd = []
try:
Popen(cmd + [data])
except Exception:
self.fail('External open')
def do_display(self, data):
if ';' in data:
mime, data = data.split(';', 1)
forced = True
else:
mime = 'text/html'
forced = False
try:
thing = eval_(data, self.get_globals(), self.current_locals)
except Exception:
self.fail('Display')
return
else:
thing = force_bytes(thing)
if magic and not forced:
mime = magic.from_buffer(thing, mime=True)
self.db.send(
'Display|%s'
% dump(
{
'for': u('%s (%s)') % (data, mime),
'val': from_bytes(b64encode(thing)),
'type': mime,
}
)
)
def do_disable(self, data):
self.db.__class__.enabled = False
self.db.stepping = False
self.db.stop_trace()
self.db.die()
return True
def do_quit(self, data):
self.db.stepping = False
self.db.stop_trace()
return True
def do_restart(self, data):
try:
# Try re-execing as-is
os.execvp(sys.argv[0], sys.argv)
except Exception:
# Put the python executable in front
python = sys.executable
os.execl(python, python, *sys.argv)
def do_diff(self, data):
if '?' not in data and '<>' not in data:
self.fail(
'Diff',
'Diff error',
'You must provide two expression '
'separated by "?" or "<>" to make a diff',
)
return
pretty = '?' in data
expressions = [
expression.strip()
for expression in (
data.split('?') if '?' in data else data.split('<>')
)
]
strings = []
for expression in expressions:
try:
strings.append(
eval_(expression, self.get_globals(), self.current_locals)
)
except Exception:
self.fail(
'Diff',
"Diff failed: Expression %s "
"failed to evaluate to a string" % expression,
)
return
render = (
(
(
lambda x: self.db.better_repr(x, html=False)
or self.db.safe_repr(x)
)
)
if pretty
else str
)
strings = [
render(string) if not is_str(string) else string
for string in strings
]
self.db.send(
'RawHTML|%s'
% dump(
{
'for': u('Difference between %s')
% (' and '.join(expressions)),
'val': self.htmldiff.make_table(
strings[0].splitlines(keepends=True),
strings[1].splitlines(keepends=True),
expressions[0],
expressions[1],
),
}
)
)
def do_find(self, data):
if ' in ' not in data and ' of ' not in data:
self.fail(
'Find',
'Find error',
'Syntax for find is: "key in expression" '
'or "value testing function of expression"',
)
if ' in ' in data:
key, expr = data.split(' in ')
else:
key, expr = data.split(' of ')
try:
value = eval_(expr, self.get_globals(), self.current_locals)
except Exception:
self.fail('Find')
return
if ' in ' in data:
matches = search_key_in_obj(key, value, path='%s.' % expr)
else:
matches = search_value_in_obj(key, value, path='%s.' % expr)
self.db.send(
'Print|%s'
% dump(
{
'for': 'Finding %s in %s' % (key, expr),
'result': 'Found:\n%s'
% '\n'.join(
[
'%s: -> %s' % (k, escape(self.db.safe_repr(val)))
for k, val in matches
]
)
if matches
else 'Not found',
}
)
)
def handle_exc(self):
"""Return a formated exception traceback for wdb.js use"""
exc_info = sys.exc_info()
type_, value = exc_info[:2]
self.db.obj_cache[id(exc_info)] = exc_info
return '<a href="%d" class="inspect">%s: %s</a>' % (
id(exc_info),
escape(type_.__name__),
escape(repr(value)),
)
def fail(self, cmd, title=None, message=None):
"""Send back captured exceptions"""
if message is None:
message = self.handle_exc()
else:
message = escape(message)
self.db.send(
'Echo|%s'
% dump({'for': escape(title or '%s failed' % cmd), 'val': message})
)
|
class Interaction(object):
def __init__(
self,
db,
frame,
tb,
exception,
exception_description,
init=None,
shell=False,
shell_vars=None,
source=None,
timeout=None,
):
pass
def hook(self, kind):
pass
@property
def current(self):
pass
@property
def current_frame(self):
pass
@property
def current_locals(self):
pass
@property
def current_file(self):
pass
def get_globals(self):
'''Get enriched globals'''
pass
def init(self):
pass
def parse_command(self, message):
pass
def loop(self):
pass
def interact(self):
pass
def update_watchers(self):
pass
def notify_exc(self, msg):
pass
def do_start(self, data):
pass
def do_select(self, data):
pass
def do_file(self, data):
pass
def do_inspect(self, data):
pass
def do_dump(self, data):
pass
def do_trace(self, data):
pass
def do_eval(self, data):
pass
def do_ping(self, data):
pass
def do_step(self, data):
pass
def do_next(self, data):
pass
def do_continue(self, data):
pass
def do_return(self, data):
pass
def do_until(self, data):
pass
def do_close(self, data):
pass
def do_break(self, data):
pass
def break_fail(x):
pass
def do_unbreak(self, data):
pass
def do_breakpoints(self, data):
pass
def do_watch(self, data):
pass
def do_unwatch(self, data):
pass
def do_jump(self, data):
pass
def do_complete(self, data):
pass
def do_save(self, data):
pass
def do_external(self, data):
pass
def do_display(self, data):
pass
def do_disable(self, data):
pass
def do_quit(self, data):
pass
def do_restart(self, data):
pass
def do_diff(self, data):
pass
def do_find(self, data):
pass
def handle_exc(self):
'''Return a formated exception traceback for wdb.js use'''
pass
def fail(self, cmd, title=None, message=None):
'''Send back captured exceptions'''
pass
| 50 | 3 | 18 | 1 | 17 | 0 | 3 | 0.03 | 1 | 16 | 2 | 0 | 44 | 12 | 44 | 44 | 861 | 86 | 755 | 161 | 692 | 22 | 458 | 138 | 411 | 28 | 1 | 7 | 138 |
143,890 |
Kozea/wdb
|
Kozea_wdb/client/wdb/breakpoint.py
|
wdb.breakpoint.LineBreakpoint
|
class LineBreakpoint(Breakpoint):
"""Simple breakpoint that breaks if in file at line"""
def __init__(self, file, line, temporary=False):
self.line = line
super(LineBreakpoint, self).__init__(file, temporary)
def breaks(self, frame):
return (
super(LineBreakpoint, self).breaks(frame)
and frame.f_lineno == self.line
)
def __repr__(self):
return (
super(LineBreakpoint, self).__repr__() + ' on line %d' % self.line
)
def __eq__(self, other):
return (
super(LineBreakpoint, self).__eq__(other)
and self.line == other.line
)
def __hash__(self):
return super(LineBreakpoint, self).__hash__()
|
class LineBreakpoint(Breakpoint):
'''Simple breakpoint that breaks if in file at line'''
def __init__(self, file, line, temporary=False):
pass
def breaks(self, frame):
pass
def __repr__(self):
pass
def __eq__(self, other):
pass
def __hash__(self):
pass
| 6 | 1 | 4 | 0 | 4 | 0 | 1 | 0.05 | 1 | 1 | 0 | 0 | 5 | 1 | 5 | 12 | 26 | 5 | 20 | 7 | 14 | 1 | 12 | 7 | 6 | 1 | 2 | 0 | 5 |
143,891 |
KrishnaswamyLab/PHATE
|
Python/phate/phate.py
|
phate.phate.PHATE
|
class PHATE(BaseEstimator):
"""PHATE operator which performs dimensionality reduction.
Potential of Heat-diffusion for Affinity-based Trajectory Embedding
(PHATE) embeds high dimensional single-cell data into two or three
dimensions for visualization of biological progressions as described
in Moon et al, 2019 [1]_.
Parameters
----------
n_components : int, optional, default: 2
number of dimensions in which the data will be embedded
knn : int, optional, default: 5
number of nearest neighbors on which to build kernel
decay : int, optional, default: 40
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
n_landmark : int, optional, default: 2000
number of landmarks to use in fast PHATE
t : int, optional, default: 'auto'
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the knee point in the Von Neumann Entropy of
the diffusion operator
gamma : float, optional, default: 1
Informational distance constant between -1 and 1.
`gamma=1` gives the PHATE log potential, `gamma=0` gives
a square root potential.
n_pca : int, optional, default: 100
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time.
mds_solver : {'sgd', 'smacof'}, optional (default: 'sgd')
which solver to use for metric MDS. SGD is substantially faster,
but produces slightly less optimal results. Note that SMACOF was used
for all figures in the PHATE paper.
knn_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. Custom distance
functions of form `f(x, y) = d` are also accepted. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix. Distance matrices are assumed to have zeros
down the diagonal, while affinity matrices are assumed to have
non-zero values down the diagonal. This is detected automatically using
`data[0,0]`. You can override this detection with
`knn_dist='precomputed_distance'` or `knn_dist='precomputed_affinity'`.
knn_max : int, optional, default: None
Maximum number of neighbors for which alpha decaying kernel
is computed for each point. For very large datasets, setting `knn_max`
to a small multiple of `knn` can speed up computation significantly.
mds_dist : string, optional, default: 'euclidean'
Distance metric for MDS. Recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used. Custom distance
functions of form `f(x, y) = d` are also accepted
mds : string, optional, default: 'metric'
choose from ['classic', 'metric', 'nonmetric'].
Selects which MDS algorithm is used for dimensionality reduction
n_jobs : integer, optional, default: 1
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
random_state : integer or numpy.RandomState, optional, default: None
The generator used to initialize SMACOF (metric, nonmetric) MDS
If an integer is given, it fixes the seed
Defaults to the global `numpy` random number generator
verbose : `int` or `boolean`, optional (default: 1)
If `True` or `> 0`, print status messages
potential_method : deprecated.
Use `gamma=1` for log transformation and `gamma=0` for square root
transformation.
kwargs : additional arguments for `graphtools.Graph`
Attributes
----------
X : array-like, shape=[n_samples, n_dimensions]
embedding : array-like, shape=[n_samples, n_components]
Stores the position of the dataset in the embedding space
graph : graphtools.base.BaseGraph
The graph built on the input data
optimal_t : int
The automatically selected t, when t = 'auto'.
When t is given, optimal_t is None.
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20,
... branch_length=100)
>>> tree_data.shape
(2000, 100)
>>> phate_operator = phate.PHATE(knn=5, decay=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(2000, 2)
>>> phate.plot.scatter2d(tree_phate, c=tree_clusters)
References
----------
.. [1] Moon KR, van Dijk D, Zheng W, *et al.* (2019),
*Visualizing transitions and structure for biological data exploration*,
`Nature Biotechnology <https://doi.org/10.1038/s41587-019-0336-3>`_.
"""
def __init__(
self,
n_components=2,
knn=5,
decay=40,
n_landmark=2000,
t="auto",
gamma=1,
n_pca=100,
mds_solver="sgd",
knn_dist="euclidean",
knn_max=None,
mds_dist="euclidean",
mds="metric",
n_jobs=1,
random_state=None,
verbose=1,
**kwargs
):
if "k" in kwargs:
warnings.warn("k is deprecated. Please use knn in future.", FutureWarning)
knn = kwargs["k"]
del kwargs["k"]
if "a" in kwargs:
warnings.warn("a is deprecated. Please use decay in future.", FutureWarning)
decay = kwargs["a"]
del kwargs["a"]
self.n_components = n_components
self.decay = decay
self.knn = knn
self.t = t
self.n_landmark = n_landmark
self.mds = mds
self.n_pca = n_pca
self.knn_dist = knn_dist
self.knn_max = knn_max
self.mds_dist = mds_dist
self.mds_solver = mds_solver
self.random_state = random_state
self.kwargs = kwargs
self.graph = None
self._diff_potential = None
self.embedding = None
self.X = None
self.optimal_t = None
if "alpha_decay" in kwargs:
warnings.warn(
"alpha_decay is deprecated. Use `decay=None`"
" to disable alpha decay in future.",
FutureWarning,
)
if not kwargs["alpha_decay"]:
self.decay = None
del kwargs["alpha_decay"]
if "njobs" in kwargs:
warnings.warn(
"njobs is deprecated. Please use n_jobs in future.", FutureWarning
)
n_jobs = kwargs["njobs"]
del kwargs["njobs"]
self.n_jobs = n_jobs
if "potential_method" in kwargs:
if kwargs["potential_method"] == "log":
gamma = 1
elif kwargs["potential_method"] == "sqrt":
gamma = 0
else:
raise ValueError(
"potential_method {} not recognized. Please "
"use gamma between -1 and 1".format(kwargs["potential_method"])
)
warnings.warn(
"potential_method is deprecated. "
"Setting gamma to {} to achieve"
" {} transformation.".format(gamma, kwargs["potential_method"]),
FutureWarning,
)
del kwargs["potential_method"]
elif gamma > 0.99 and gamma < 1:
warnings.warn(
"0.99 < gamma < 1 is numerically unstable. " "Setting gamma to 0.99",
RuntimeWarning,
)
gamma = 0.99
self.gamma = gamma
if verbose is True:
verbose = 1
elif verbose is False:
verbose = 0
self.verbose = verbose
self._check_params()
_logger.set_level(verbose)
@property
def diff_op(self):
"""diff_op : array-like, shape=[n_samples, n_samples] or [n_landmark, n_landmark]
The diffusion operator built from the graph
"""
if self.graph is not None:
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
diff_op = self.graph.landmark_op
else:
diff_op = self.graph.diff_op
if sparse.issparse(diff_op):
diff_op = diff_op.toarray()
return diff_op
else:
raise NotFittedError(
"This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method."
)
@property
def diff_potential(self):
"""Interpolates the PHATE potential to one entry per cell
This is equivalent to calculating infinite-dimensional PHATE,
or running PHATE without the MDS step.
Returns
-------
diff_potential : ndarray, shape=[n_samples, min(n_landmark, n_samples)]
"""
diff_potential = self._calculate_potential()
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
diff_potential = self.graph.interpolate(diff_potential)
return diff_potential
def _check_params(self):
"""Check PHATE parameters
This allows us to fail early - otherwise certain unacceptable
parameter choices, such as mds='mmds', would only fail after
minutes of runtime.
Raises
------
ValueError : unacceptable choice of parameters
"""
utils.check_positive(n_components=self.n_components, knn=self.knn)
utils.check_int(
n_components=self.n_components, knn=self.knn, n_jobs=self.n_jobs
)
utils.check_between(-1, 1, gamma=self.gamma)
utils.check_if_not(None, utils.check_positive, decay=self.decay)
utils.check_if_not(
None,
utils.check_positive,
utils.check_int,
n_landmark=self.n_landmark,
n_pca=self.n_pca,
knn_max=self.knn_max,
)
utils.check_if_not("auto", utils.check_positive, utils.check_int, t=self.t)
if not callable(self.knn_dist):
utils.check_in(
[
"euclidean",
"precomputed",
"cosine",
"correlation",
"cityblock",
"l1",
"l2",
"manhattan",
"braycurtis",
"canberra",
"chebyshev",
"dice",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
"precomputed_affinity",
"precomputed_distance",
],
knn_dist=self.knn_dist,
)
if not callable(self.mds_dist):
utils.check_in(
[
"euclidean",
"cosine",
"correlation",
"braycurtis",
"canberra",
"chebyshev",
"cityblock",
"dice",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
],
mds_dist=self.mds_dist,
)
utils.check_in(["classic", "metric", "nonmetric"], mds=self.mds)
utils.check_in(["sgd", "smacof"], mds_solver=self.mds_solver)
def _set_graph_params(self, **params):
try:
self.graph.set_params(**params)
except AttributeError:
# graph not defined
pass
def _reset_graph(self):
self.graph = None
self._reset_potential()
def _reset_potential(self):
self._diff_potential = None
self._reset_embedding()
def _reset_embedding(self):
self.embedding = None
def set_params(self, **params):
"""Set the parameters on this estimator.
Any parameters not given as named arguments will be left at their
current value.
Parameters
----------
n_components : int, optional, default: 2
number of dimensions in which the data will be embedded
knn : int, optional, default: 5
number of nearest neighbors on which to build kernel
decay : int, optional, default: 40
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
n_landmark : int, optional, default: 2000
number of landmarks to use in fast PHATE
t : int, optional, default: 'auto'
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the knee point in the Von Neumann Entropy of
the diffusion operator
gamma : float, optional, default: 1
Informational distance constant between -1 and 1.
`gamma=1` gives the PHATE log potential, `gamma=0` gives
a square root potential.
n_pca : int, optional, default: 100
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time.
mds_solver : {'sgd', 'smacof'}, optional (default: 'sgd')
which solver to use for metric MDS. SGD is substantially faster,
but produces slightly less optimal results. Note that SMACOF was used
for all figures in the PHATE paper.
knn_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. Custom distance
functions of form `f(x, y) = d` are also accepted. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix. Distance matrices are assumed to have zeros
down the diagonal, while affinity matrices are assumed to have
non-zero values down the diagonal. This is detected automatically
using `data[0,0]`. You can override this detection with
`knn_dist='precomputed_distance'` or `knn_dist='precomputed_affinity'`.
knn_max : int, optional, default: None
Maximum number of neighbors for which alpha decaying kernel
is computed for each point. For very large datasets, setting `knn_max`
to a small multiple of `knn` can speed up computation significantly.
mds_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used
distance metric for MDS
mds : string, optional, default: 'metric'
choose from ['classic', 'metric', 'nonmetric'].
Selects which MDS algorithm is used for dimensionality reduction
n_jobs : integer, optional, default: 1
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
random_state : integer or numpy.RandomState, optional, default: None
The generator used to initialize SMACOF (metric, nonmetric) MDS
If an integer is given, it fixes the seed
Defaults to the global `numpy` random number generator
verbose : `int` or `boolean`, optional (default: 1)
If `True` or `> 0`, print status messages
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=50, n_branch=5,
... branch_length=50)
>>> tree_data.shape
(250, 50)
>>> phate_operator = phate.PHATE(knn=5, decay=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(250, 2)
>>> phate_operator.set_params(n_components=10)
PHATE(decay=20, knn=5, knn_dist='euclidean', mds='metric',
mds_dist='euclidean', n_components=10, n_jobs=1, n_landmark=2000,
n_pca=100, potential_method='log', random_state=None, t=150,
verbose=1)
>>> tree_phate = phate_operator.transform()
>>> tree_phate.shape
(250, 10)
>>> # plt.scatter(tree_phate[:,0], tree_phate[:,1], c=tree_clusters)
>>> # plt.show()
Returns
-------
self
"""
reset_kernel = False
reset_potential = False
reset_embedding = False
# mds parameters
if "n_components" in params and params["n_components"] != self.n_components:
self.n_components = params["n_components"]
reset_embedding = True
del params["n_components"]
if "mds" in params and params["mds"] != self.mds:
self.mds = params["mds"]
reset_embedding = True
del params["mds"]
if "mds_solver" in params and params["mds_solver"] != self.mds_solver:
self.mds_solver = params["mds_solver"]
reset_embedding = True
del params["mds_solver"]
if "mds_dist" in params and params["mds_dist"] != self.mds_dist:
self.mds_dist = params["mds_dist"]
reset_embedding = True
del params["mds_dist"]
# diff potential parameters
if "t" in params and params["t"] != self.t:
self.t = params["t"]
reset_potential = True
del params["t"]
if "potential_method" in params:
if params["potential_method"] == "log":
params["gamma"] = 1
elif params["potential_method"] == "sqrt":
params["gamma"] = 0
else:
raise ValueError(
"potential_method {} not recognized. Please "
"use gamma between -1 and 1".format(params["potential_method"])
)
warnings.warn(
"potential_method is deprecated. Setting gamma to {} to "
"achieve {} transformation.".format(
params["gamma"], params["potential_method"]
),
FutureWarning,
)
del params["potential_method"]
if "gamma" in params and params["gamma"] != self.gamma:
self.gamma = params["gamma"]
reset_potential = True
del params["gamma"]
# kernel parameters
if "k" in params and params["k"] != self.knn:
self.knn = params["k"]
reset_kernel = True
del params["k"]
if "a" in params and params["a"] != self.decay:
self.decay = params["a"]
reset_kernel = True
del params["a"]
if "knn" in params and params["knn"] != self.knn:
self.knn = params["knn"]
reset_kernel = True
del params["knn"]
if "knn_max" in params and params["knn_max"] != self.knn_max:
self.knn_max = params["knn_max"]
reset_kernel = True
del params["knn_max"]
if "decay" in params and params["decay"] != self.decay:
self.decay = params["decay"]
reset_kernel = True
del params["decay"]
if "n_pca" in params:
if self.X is not None and params["n_pca"] >= np.min(self.X.shape):
params["n_pca"] = None
if params["n_pca"] != self.n_pca:
self.n_pca = params["n_pca"]
reset_kernel = True
del params["n_pca"]
if "knn_dist" in params and params["knn_dist"] != self.knn_dist:
self.knn_dist = params["knn_dist"]
reset_kernel = True
del params["knn_dist"]
if "n_landmark" in params and params["n_landmark"] != self.n_landmark:
if self.n_landmark is None or params["n_landmark"] is None:
# need a different type of graph, reset entirely
self._reset_graph()
else:
self._set_graph_params(n_landmark=params["n_landmark"])
self.n_landmark = params["n_landmark"]
del params["n_landmark"]
# parameters that don't change the embedding
if "n_jobs" in params:
self.n_jobs = params["n_jobs"]
self._set_graph_params(n_jobs=params["n_jobs"])
del params["n_jobs"]
if "random_state" in params:
self.random_state = params["random_state"]
self._set_graph_params(random_state=params["random_state"])
del params["random_state"]
if "verbose" in params:
self.verbose = params["verbose"]
_logger.set_level(self.verbose)
self._set_graph_params(verbose=params["verbose"])
del params["verbose"]
if reset_kernel:
# can't reset the graph kernel without making a new graph
self._reset_graph()
if reset_potential:
self._reset_potential()
if reset_embedding:
self._reset_embedding()
self._set_graph_params(**params)
self._check_params()
return self
def reset_mds(self, **kwargs):
"""
Deprecated. Reset parameters related to multidimensional scaling
Parameters
----------
n_components : int, optional, default: None
If given, sets number of dimensions in which the data
will be embedded
mds : string, optional, default: None
choose from ['classic', 'metric', 'nonmetric']
If given, sets which MDS algorithm is used for
dimensionality reduction
mds_dist : string, optional, default: None
recommended values: 'euclidean' and 'cosine'
Any metric from scipy.spatial.distance can be used
If given, sets the distance metric for MDS
"""
warnings.warn(
"PHATE.reset_mds is deprecated. " "Please use PHATE.set_params in future.",
FutureWarning,
)
self.set_params(**kwargs)
def reset_potential(self, **kwargs):
"""
Deprecated. Reset parameters related to the diffusion potential
Parameters
----------
t : int or 'auto', optional, default: None
Power to which the diffusion operator is powered
If given, sets the level of diffusion
potential_method : string, optional, default: None
choose from ['log', 'sqrt']
If given, sets which transformation of the diffusional
operator is used to compute the diffusion potential
"""
warnings.warn(
"PHATE.reset_potential is deprecated. "
"Please use PHATE.set_params in future.",
FutureWarning,
)
self.set_params(**kwargs)
def _parse_input(self, X):
# passing graphs to PHATE
if isinstance(X, graphtools.graphs.LandmarkGraph) or (
isinstance(X, graphtools.base.BaseGraph) and self.n_landmark is None
):
self.graph = X
X = X.data
n_pca = self.graph.n_pca
update_graph = False
if isinstance(self.graph, graphtools.graphs.TraditionalGraph):
precomputed = self.graph.precomputed
else:
precomputed = None
return X, n_pca, precomputed, update_graph
elif isinstance(X, graphtools.base.BaseGraph):
self.graph = None
X = X.kernel
precomputed = "affinity"
n_pca = None
update_graph = False
return X, n_pca, precomputed, update_graph
else:
try:
if isinstance(X, pygsp.graphs.Graph):
self.graph = None
X = X.W
precomputed = "adjacency"
update_graph = False
n_pca = None
return X, n_pca, precomputed, update_graph
except NameError:
# pygsp not installed
pass
# checks on regular data
update_graph = True
try:
if isinstance(X, anndata.AnnData):
X = X.X
except NameError:
# anndata not installed
pass
if not callable(self.knn_dist) and self.knn_dist.startswith("precomputed"):
if self.knn_dist == "precomputed":
# automatic detection
if isinstance(X, sparse.coo_matrix):
X = X.tocsr()
if X[0, 0] == 0:
precomputed = "distance"
else:
precomputed = "affinity"
elif self.knn_dist in ["precomputed_affinity", "precomputed_distance"]:
precomputed = self.knn_dist.split("_")[1]
else:
raise ValueError(
"knn_dist {} not recognized. Did you mean "
"'precomputed_distance', "
"'precomputed_affinity', or 'precomputed' "
"(automatically detects distance or affinity)?"
)
n_pca = None
else:
precomputed = None
if self.n_pca is None or self.n_pca >= np.min(X.shape):
n_pca = None
else:
n_pca = self.n_pca
return X, n_pca, precomputed, update_graph
def _update_graph(self, X, precomputed, n_pca, n_landmark):
if self.X is not None and not utils.matrix_is_equivalent(X, self.X):
"""
If the same data is used, we can reuse existing kernel and
diffusion matrices. Otherwise we have to recompute.
"""
self._reset_graph()
else:
try:
self.graph.set_params(
decay=self.decay,
knn=self.knn,
knn_max=self.knn_max,
distance=self.knn_dist,
precomputed=precomputed,
n_jobs=self.n_jobs,
verbose=self.verbose,
n_pca=n_pca,
n_landmark=n_landmark,
random_state=self.random_state,
)
_logger.info("Using precomputed graph and diffusion operator...")
except ValueError as e:
# something changed that should have invalidated the graph
_logger.debug("Reset graph due to {}".format(str(e)))
self._reset_graph()
def fit(self, X):
"""Computes the diffusion operator
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
Returns
-------
phate_operator : PHATE
The estimator object
"""
X, n_pca, precomputed, update_graph = self._parse_input(X)
if precomputed is None:
_logger.info(
"Running PHATE on {} observations and {} variables.".format(
X.shape[0], X.shape[1]
)
)
else:
_logger.info(
"Running PHATE on precomputed {} matrix with {} observations.".format(
precomputed, X.shape[0]
)
)
if self.n_landmark is None or X.shape[0] <= self.n_landmark:
n_landmark = None
else:
n_landmark = self.n_landmark
if self.graph is not None and update_graph:
self._update_graph(X, precomputed, n_pca, n_landmark)
self.X = X
if self.graph is None:
with _logger.log_task("graph and diffusion operator"):
self.graph = graphtools.Graph(
X,
n_pca=n_pca,
n_landmark=n_landmark,
distance=self.knn_dist,
precomputed=precomputed,
knn=self.knn,
knn_max=self.knn_max,
decay=self.decay,
thresh=1e-4,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
**(self.kwargs)
)
# landmark op doesn't build unless forced
self.diff_op
return self
def transform(self, X=None, t_max=100, plot_optimal_t=False, ax=None):
"""Computes the position of the cells in the embedding space
Parameters
----------
X : array, optional, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Not required, since PHATE does not currently embed
cells not given in the input matrix to `PHATE.fit()`.
Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
t_max : int, optional, default: 100
maximum t to test if `t` is set to 'auto'
plot_optimal_t : boolean, optional, default: False
If true and `t` is set to 'auto', plot the Von Neumann
entropy used to select t
ax : matplotlib.axes.Axes, optional
If given and `plot_optimal_t` is true, plot will be drawn
on the given axis.
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE
"""
if self.graph is None:
raise NotFittedError(
"This PHATE instance is not fitted yet. Call "
"'fit' with appropriate arguments before "
"using this method."
)
elif X is not None and not utils.matrix_is_equivalent(X, self.X):
# fit to external data
warnings.warn(
"Pre-fit PHATE should not be used to transform a "
"new data matrix. Please fit PHATE to the new"
" data by running 'fit' with the new data.",
RuntimeWarning,
)
if (
isinstance(self.graph, graphtools.graphs.TraditionalGraph)
and self.graph.precomputed is not None
):
raise ValueError(
"Cannot transform additional data using a "
"precomputed distance matrix."
)
else:
if self.embedding is None:
self.transform()
transitions = self.graph.extend_to_data(X)
return self.graph.interpolate(self.embedding, transitions)
else:
diff_potential = self._calculate_potential(
t_max=t_max, plot_optimal_t=plot_optimal_t, ax=ax
)
if self.embedding is None:
with _logger.log_task("{} MDS".format(self.mds)):
self.embedding = mds.embed_MDS(
diff_potential,
ndim=self.n_components,
how=self.mds,
solver=self.mds_solver,
distance_metric=self.mds_dist,
n_jobs=self.n_jobs,
seed=self.random_state,
verbose=max(self.verbose - 1, 0),
)
if isinstance(self.graph, graphtools.graphs.LandmarkGraph):
_logger.debug("Extending to original data...")
return self.graph.interpolate(self.embedding)
else:
return self.embedding
def fit_transform(self, X, **kwargs):
"""Computes the diffusion operator and the position of the cells in the
embedding space
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData` If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
kwargs : further arguments for `PHATE.transform()`
Keyword arguments as specified in :func:`~phate.PHATE.transform`
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE
"""
with _logger.log_task("PHATE"):
self.fit(X)
embedding = self.transform(**kwargs)
return embedding
def _calculate_potential(self, t=None, t_max=100, plot_optimal_t=False, ax=None):
"""Calculates the diffusion potential
Parameters
----------
t : int
power to which the diffusion operator is powered
sets the level of diffusion
t_max : int, default: 100
Maximum value of `t` to test
plot_optimal_t : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
diff_potential : array-like, shape=[n_samples, n_samples]
The diffusion potential fit on the input data
"""
if t is None:
t = self.t
if self._diff_potential is None:
if t == "auto":
t = self._find_optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax)
else:
t = self.t
with _logger.log_task("diffusion potential"):
# diffused diffusion operator
diff_op_t = np.linalg.matrix_power(self.diff_op, t)
if self.gamma == 1:
# handling small values
diff_op_t = diff_op_t + 1e-7
self._diff_potential = -1 * np.log(diff_op_t)
elif self.gamma == -1:
self._diff_potential = diff_op_t
else:
c = (1 - self.gamma) / 2
self._diff_potential = ((diff_op_t) ** c) / c
elif plot_optimal_t:
self._find_optimal_t(t_max=t_max, plot=plot_optimal_t, ax=ax)
return self._diff_potential
def _von_neumann_entropy(self, t_max=100):
"""Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t`
"""
t = np.arange(t_max)
return t, vne.compute_von_neumann_entropy(self.diff_op, t_max=t_max)
def _find_optimal_t(self, t_max=100, plot=False, ax=None):
"""Find the optimal value of t
Selects the optimal value of t based on the knee point of the
Von Neumann Entropy of the diffusion operator.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
plot : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
t_opt : int
The optimal value of t
"""
with _logger.log_task("optimal t"):
t, h = self._von_neumann_entropy(t_max=t_max)
t_opt = vne.find_knee_point(y=h, x=t)
_logger.info("Automatically selected t = {}".format(t_opt))
if plot:
if ax is None:
fig, ax = plt.subplots()
show = True
else:
show = False
ax.plot(t, h)
ax.scatter(t_opt, h[t == t_opt], marker="*", c="k", s=50)
ax.set_xlabel("t")
ax.set_ylabel("Von Neumann Entropy")
ax.set_title("Optimal t = {}".format(t_opt))
if show:
plt.show()
self.optimal_t = t_opt
return t_opt
|
class PHATE(BaseEstimator):
'''PHATE operator which performs dimensionality reduction.
Potential of Heat-diffusion for Affinity-based Trajectory Embedding
(PHATE) embeds high dimensional single-cell data into two or three
dimensions for visualization of biological progressions as described
in Moon et al, 2019 [1]_.
Parameters
----------
n_components : int, optional, default: 2
number of dimensions in which the data will be embedded
knn : int, optional, default: 5
number of nearest neighbors on which to build kernel
decay : int, optional, default: 40
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
n_landmark : int, optional, default: 2000
number of landmarks to use in fast PHATE
t : int, optional, default: 'auto'
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the knee point in the Von Neumann Entropy of
the diffusion operator
gamma : float, optional, default: 1
Informational distance constant between -1 and 1.
`gamma=1` gives the PHATE log potential, `gamma=0` gives
a square root potential.
n_pca : int, optional, default: 100
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time.
mds_solver : {'sgd', 'smacof'}, optional (default: 'sgd')
which solver to use for metric MDS. SGD is substantially faster,
but produces slightly less optimal results. Note that SMACOF was used
for all figures in the PHATE paper.
knn_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. Custom distance
functions of form `f(x, y) = d` are also accepted. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix. Distance matrices are assumed to have zeros
down the diagonal, while affinity matrices are assumed to have
non-zero values down the diagonal. This is detected automatically using
`data[0,0]`. You can override this detection with
`knn_dist='precomputed_distance'` or `knn_dist='precomputed_affinity'`.
knn_max : int, optional, default: None
Maximum number of neighbors for which alpha decaying kernel
is computed for each point. For very large datasets, setting `knn_max`
to a small multiple of `knn` can speed up computation significantly.
mds_dist : string, optional, default: 'euclidean'
Distance metric for MDS. Recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used. Custom distance
functions of form `f(x, y) = d` are also accepted
mds : string, optional, default: 'metric'
choose from ['classic', 'metric', 'nonmetric'].
Selects which MDS algorithm is used for dimensionality reduction
n_jobs : integer, optional, default: 1
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
random_state : integer or numpy.RandomState, optional, default: None
The generator used to initialize SMACOF (metric, nonmetric) MDS
If an integer is given, it fixes the seed
Defaults to the global `numpy` random number generator
verbose : `int` or `boolean`, optional (default: 1)
If `True` or `> 0`, print status messages
potential_method : deprecated.
Use `gamma=1` for log transformation and `gamma=0` for square root
transformation.
kwargs : additional arguments for `graphtools.Graph`
Attributes
----------
X : array-like, shape=[n_samples, n_dimensions]
embedding : array-like, shape=[n_samples, n_components]
Stores the position of the dataset in the embedding space
graph : graphtools.base.BaseGraph
The graph built on the input data
optimal_t : int
The automatically selected t, when t = 'auto'.
When t is given, optimal_t is None.
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20,
... branch_length=100)
>>> tree_data.shape
(2000, 100)
>>> phate_operator = phate.PHATE(knn=5, decay=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(2000, 2)
>>> phate.plot.scatter2d(tree_phate, c=tree_clusters)
References
----------
.. [1] Moon KR, van Dijk D, Zheng W, *et al.* (2019),
*Visualizing transitions and structure for biological data exploration*,
`Nature Biotechnology <https://doi.org/10.1038/s41587-019-0336-3>`_.
'''
def __init__(
self,
n_components=2,
knn=5,
decay=40,
n_landmark=2000,
t="auto",
gamma=1,
n_pca=100,
mds_solver="sgd",
knn_dist="euclidean",
knn_max=None,
mds_dist="euclidean",
mds="metric",
n_jobs=1,
random_state=None,
verbose=1,
**kwargs
):
pass
@property
def diff_op(self):
'''diff_op : array-like, shape=[n_samples, n_samples] or [n_landmark, n_landmark]
The diffusion operator built from the graph
'''
pass
@property
def diff_potential(self):
'''Interpolates the PHATE potential to one entry per cell
This is equivalent to calculating infinite-dimensional PHATE,
or running PHATE without the MDS step.
Returns
-------
diff_potential : ndarray, shape=[n_samples, min(n_landmark, n_samples)]
'''
pass
def _check_params(self):
'''Check PHATE parameters
This allows us to fail early - otherwise certain unacceptable
parameter choices, such as mds='mmds', would only fail after
minutes of runtime.
Raises
------
ValueError : unacceptable choice of parameters
'''
pass
def _set_graph_params(self, **params):
pass
def _reset_graph(self):
pass
def _reset_potential(self):
pass
def _reset_embedding(self):
pass
def set_params(self, **params):
'''Set the parameters on this estimator.
Any parameters not given as named arguments will be left at their
current value.
Parameters
----------
n_components : int, optional, default: 2
number of dimensions in which the data will be embedded
knn : int, optional, default: 5
number of nearest neighbors on which to build kernel
decay : int, optional, default: 40
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
n_landmark : int, optional, default: 2000
number of landmarks to use in fast PHATE
t : int, optional, default: 'auto'
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the knee point in the Von Neumann Entropy of
the diffusion operator
gamma : float, optional, default: 1
Informational distance constant between -1 and 1.
`gamma=1` gives the PHATE log potential, `gamma=0` gives
a square root potential.
n_pca : int, optional, default: 100
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time.
mds_solver : {'sgd', 'smacof'}, optional (default: 'sgd')
which solver to use for metric MDS. SGD is substantially faster,
but produces slightly less optimal results. Note that SMACOF was used
for all figures in the PHATE paper.
knn_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. Custom distance
functions of form `f(x, y) = d` are also accepted. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix. Distance matrices are assumed to have zeros
down the diagonal, while affinity matrices are assumed to have
non-zero values down the diagonal. This is detected automatically
using `data[0,0]`. You can override this detection with
`knn_dist='precomputed_distance'` or `knn_dist='precomputed_affinity'`.
knn_max : int, optional, default: None
Maximum number of neighbors for which alpha decaying kernel
is computed for each point. For very large datasets, setting `knn_max`
to a small multiple of `knn` can speed up computation significantly.
mds_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean' and 'cosine'
Any metric from `scipy.spatial.distance` can be used
distance metric for MDS
mds : string, optional, default: 'metric'
choose from ['classic', 'metric', 'nonmetric'].
Selects which MDS algorithm is used for dimensionality reduction
n_jobs : integer, optional, default: 1
The number of jobs to use for the computation.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for
n_jobs = -2, all CPUs but one are used
random_state : integer or numpy.RandomState, optional, default: None
The generator used to initialize SMACOF (metric, nonmetric) MDS
If an integer is given, it fixes the seed
Defaults to the global `numpy` random number generator
verbose : `int` or `boolean`, optional (default: 1)
If `True` or `> 0`, print status messages
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=50, n_branch=5,
... branch_length=50)
>>> tree_data.shape
(250, 50)
>>> phate_operator = phate.PHATE(knn=5, decay=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(250, 2)
>>> phate_operator.set_params(n_components=10)
PHATE(decay=20, knn=5, knn_dist='euclidean', mds='metric',
mds_dist='euclidean', n_components=10, n_jobs=1, n_landmark=2000,
n_pca=100, potential_method='log', random_state=None, t=150,
verbose=1)
>>> tree_phate = phate_operator.transform()
>>> tree_phate.shape
(250, 10)
>>> # plt.scatter(tree_phate[:,0], tree_phate[:,1], c=tree_clusters)
>>> # plt.show()
Returns
-------
self
'''
pass
def reset_mds(self, **kwargs):
'''
Deprecated. Reset parameters related to multidimensional scaling
Parameters
----------
n_components : int, optional, default: None
If given, sets number of dimensions in which the data
will be embedded
mds : string, optional, default: None
choose from ['classic', 'metric', 'nonmetric']
If given, sets which MDS algorithm is used for
dimensionality reduction
mds_dist : string, optional, default: None
recommended values: 'euclidean' and 'cosine'
Any metric from scipy.spatial.distance can be used
If given, sets the distance metric for MDS
'''
pass
def reset_potential(self, **kwargs):
'''
Deprecated. Reset parameters related to the diffusion potential
Parameters
----------
t : int or 'auto', optional, default: None
Power to which the diffusion operator is powered
If given, sets the level of diffusion
potential_method : string, optional, default: None
choose from ['log', 'sqrt']
If given, sets which transformation of the diffusional
operator is used to compute the diffusion potential
'''
pass
def _parse_input(self, X):
pass
def _update_graph(self, X, precomputed, n_pca, n_landmark):
pass
def fit(self, X):
'''Computes the diffusion operator
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
Returns
-------
phate_operator : PHATE
The estimator object
'''
pass
def transform(self, X=None, t_max=100, plot_optimal_t=False, ax=None):
'''Computes the position of the cells in the embedding space
Parameters
----------
X : array, optional, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Not required, since PHATE does not currently embed
cells not given in the input matrix to `PHATE.fit()`.
Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData`. If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
t_max : int, optional, default: 100
maximum t to test if `t` is set to 'auto'
plot_optimal_t : boolean, optional, default: False
If true and `t` is set to 'auto', plot the Von Neumann
entropy used to select t
ax : matplotlib.axes.Axes, optional
If given and `plot_optimal_t` is true, plot will be drawn
on the given axis.
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE
'''
pass
def fit_transform(self, X, **kwargs):
'''Computes the diffusion operator and the position of the cells in the
embedding space
Parameters
----------
X : array, shape=[n_samples, n_features]
input data with `n_samples` samples and `n_dimensions`
dimensions. Accepted data types: `numpy.ndarray`,
`scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData` If
`knn_dist` is 'precomputed', `data` should be a n_samples x
n_samples distance or affinity matrix
kwargs : further arguments for `PHATE.transform()`
Keyword arguments as specified in :func:`~phate.PHATE.transform`
Returns
-------
embedding : array, shape=[n_samples, n_dimensions]
The cells embedded in a lower dimensional space using PHATE
'''
pass
def _calculate_potential(self, t=None, t_max=100, plot_optimal_t=False, ax=None):
'''Calculates the diffusion potential
Parameters
----------
t : int
power to which the diffusion operator is powered
sets the level of diffusion
t_max : int, default: 100
Maximum value of `t` to test
plot_optimal_t : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
diff_potential : array-like, shape=[n_samples, n_samples]
The diffusion potential fit on the input data
'''
pass
def _von_neumann_entropy(self, t_max=100):
'''Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t`
'''
pass
def _find_optimal_t(self, t_max=100, plot=False, ax=None):
'''Find the optimal value of t
Selects the optimal value of t based on the knee point of the
Von Neumann Entropy of the diffusion operator.
Parameters
----------
t_max : int, default: 100
Maximum value of t to test
plot : boolean, default: False
If true, plots the Von Neumann Entropy and knee point
ax : matplotlib.Axes, default: None
If plot=True and ax is not None, plots the VNE on the given axis
Otherwise, creates a new axis and displays the plot
Returns
-------
t_opt : int
The optimal value of t
'''
pass
| 22 | 13 | 46 | 4 | 29 | 14 | 5 | 0.67 | 1 | 6 | 0 | 0 | 19 | 21 | 19 | 19 | 1,032 | 122 | 545 | 82 | 505 | 365 | 328 | 61 | 308 | 27 | 1 | 3 | 97 |
143,892 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.kNNPyGSPGraph
|
class kNNPyGSPGraph(kNNGraph, PyGSPGraph):
pass
|
class kNNPyGSPGraph(kNNGraph, PyGSPGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,893 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.kNNLandmarkPyGSPGraph
|
class kNNLandmarkPyGSPGraph(kNNGraph, LandmarkGraph, PyGSPGraph):
pass
|
class kNNLandmarkPyGSPGraph(kNNGraph, LandmarkGraph, PyGSPGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,894 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/base.py
|
graphtools.base.Data
|
class Data(Base):
"""Parent class that handles the import and dimensionality reduction of data
Parameters
----------
data : array-like, shape=[n_samples,n_features]
accepted types: `numpy.ndarray`, `scipy.sparse.spmatrix`.
`pandas.DataFrame`, `pandas.SparseDataFrame`.
n_pca : {`int`, `None`, `bool`, 'auto'}, optional (default: `None`)
number of PC dimensions to retain for graph building.
If n_pca in `[None, False, 0]`, uses the original data.
If 'auto' or `True` then estimate using a singular value threshold
Note: if data is sparse, uses SVD instead of PCA
TODO: should we subtract and store the mean?
rank_threshold : `float`, 'auto', optional (default: 'auto')
threshold to use when estimating rank for
`n_pca in [True, 'auto']`.
If 'auto', this threshold is
s_max * eps * max(n_samples, n_features)
where s_max is the maximum singular value of the data matrix
and eps is numerical precision. [press2007]_.
random_state : `int` or `None`, optional (default: `None`)
Random state for random PCA
Attributes
----------
data : array-like, shape=[n_samples,n_features]
Original data matrix
n_pca : int or `None`
data_nu : array-like, shape=[n_samples,n_pca]
Reduced data matrix
data_pca : sklearn.decomposition.PCA or sklearn.decomposition.TruncatedSVD
sklearn PCA operator
"""
def __init__(
self, data, n_pca=None, rank_threshold=None, random_state=None, **kwargs
):
self._check_data(data)
n_pca, rank_threshold = self._parse_n_pca_threshold(data, n_pca, rank_threshold)
if utils.is_SparseDataFrame(data):
data = data.to_coo()
elif utils.is_DataFrame(data):
try:
# sparse data
data = data.sparse.to_coo()
except AttributeError:
# dense data
data = np.array(data)
elif utils.is_Anndata(data):
data = data.X
self.data = data
self.n_pca = n_pca
self.rank_threshold = rank_threshold
self.random_state = random_state
self.data_nu = self._reduce_data()
super().__init__(**kwargs)
def _parse_n_pca_threshold(self, data, n_pca, rank_threshold):
if isinstance(n_pca, str):
n_pca = n_pca.lower()
if n_pca != "auto":
raise ValueError(
"n_pca must be an integer "
"0 <= n_pca < min(n_samples,n_features), "
"or in [None, False, True, 'auto']."
)
if isinstance(n_pca, numbers.Number):
if not float(n_pca).is_integer(): # cast it to integer
n_pcaR = np.round(n_pca).astype(int)
warnings.warn(
"Cannot perform PCA to fractional {} dimensions. "
"Rounding to {}".format(n_pca, n_pcaR),
RuntimeWarning,
)
n_pca = n_pcaR
if n_pca < 0:
raise ValueError(
"n_pca cannot be negative. "
"Please supply an integer "
"0 <= n_pca < min(n_samples,n_features) or None"
)
elif np.min(data.shape) <= n_pca:
warnings.warn(
"Cannot perform PCA to {} dimensions on "
"data with min(n_samples, n_features) = {}".format(
n_pca, np.min(data.shape)
),
RuntimeWarning,
)
n_pca = 0
if n_pca in [0, False, None]: # cast 0, False to None.
n_pca = None
elif n_pca is True: # notify that we're going to estimate rank.
n_pca = "auto"
_logger.log_info(
"Estimating n_pca from matrix rank. "
"Supply an integer n_pca "
"for fixed amount."
)
if not any([isinstance(n_pca, numbers.Number), n_pca is None, n_pca == "auto"]):
raise ValueError(
"n_pca was not an instance of numbers.Number, "
"could not be cast to False, and not None. "
"Please supply an integer "
"0 <= n_pca < min(n_samples,n_features) or None"
)
if rank_threshold is not None and n_pca != "auto":
warnings.warn(
"n_pca = {}, therefore rank_threshold of {} "
"will not be used. To use rank thresholding, "
"set n_pca = True".format(n_pca, rank_threshold),
RuntimeWarning,
)
if n_pca == "auto":
if isinstance(rank_threshold, str):
rank_threshold = rank_threshold.lower()
if rank_threshold is None:
rank_threshold = "auto"
if isinstance(rank_threshold, numbers.Number):
if rank_threshold <= 0:
raise ValueError(
"rank_threshold must be positive float or 'auto'. "
)
else:
if rank_threshold != "auto":
raise ValueError(
"rank_threshold must be positive float or 'auto'. "
)
return n_pca, rank_threshold
def _check_data(self, data):
if len(data.shape) != 2:
msg = "Expected 2D array, got {}D array " "instead (shape: {}.) ".format(
len(data.shape), data.shape
)
if len(data.shape) < 2:
msg += (
"\nReshape your data either using array.reshape(-1, 1) "
"if your data has a single feature or array.reshape(1, -1) if "
"it contains a single sample."
)
raise ValueError(msg)
def _reduce_data(self):
"""Private method to reduce data dimension.
If data is dense, uses randomized PCA. If data is sparse, uses
randomized SVD.
TODO: should we subtract and store the mean?
TODO: Fix the rank estimation so we do not compute the full SVD.
Returns
-------
Reduced data matrix
"""
if self.n_pca is not None and (
self.n_pca == "auto" or self.n_pca < self.data.shape[1]
):
with _logger.log_task("PCA"):
n_pca = self.data.shape[1] - 1 if self.n_pca == "auto" else self.n_pca
if sparse.issparse(self.data):
if (
isinstance(self.data, sparse.coo_matrix)
or isinstance(self.data, sparse.lil_matrix)
or isinstance(self.data, sparse.dok_matrix)
):
self.data = self.data.tocsr()
self.data_pca = TruncatedSVD(n_pca, random_state=self.random_state)
else:
self.data_pca = PCA(
n_pca, svd_solver="randomized", random_state=self.random_state
)
self.data_pca.fit(self.data)
if self.n_pca == "auto":
s = self.data_pca.singular_values_
smax = s.max()
if self.rank_threshold == "auto":
threshold = (
smax * np.finfo(self.data.dtype).eps * max(self.data.shape)
)
self.rank_threshold = threshold
threshold = self.rank_threshold
gate = np.where(s >= threshold)[0]
self.n_pca = gate.shape[0]
if self.n_pca == 0:
raise ValueError(
"Supplied threshold {} was greater than "
"maximum singular value {} "
"for the data matrix".format(threshold, smax)
)
_logger.log_info(
"Using rank estimate of {} as n_pca".format(self.n_pca)
)
# reset the sklearn operator
op = self.data_pca # for line-width brevity..
op.components_ = op.components_[gate, :]
op.explained_variance_ = op.explained_variance_[gate]
op.explained_variance_ratio_ = op.explained_variance_ratio_[gate]
op.singular_values_ = op.singular_values_[gate]
self.data_pca = (
op # im not clear if this is needed due to assignment rules
)
data_nu = self.data_pca.transform(self.data)
return data_nu
else:
data_nu = self.data
if sparse.issparse(data_nu) and not isinstance(
data_nu, (sparse.csr_matrix, sparse.csc_matrix, sparse.bsr_matrix)
):
data_nu = data_nu.tocsr()
return data_nu
def get_params(self):
"""Get parameters from this object"""
return {"n_pca": self.n_pca, "random_state": self.random_state}
def set_params(self, **params):
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
- n_pca
- random_state
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
"""
if "n_pca" in params and params["n_pca"] != self.n_pca:
raise ValueError("Cannot update n_pca. Please create a new graph")
if "random_state" in params:
self.random_state = params["random_state"]
super().set_params(**params)
return self
def transform(self, Y):
"""Transform input data `Y` to reduced data space defined by `self.data`
Takes data in the same ambient space as `self.data` and transforms it
to be in the same reduced space as `self.data_nu`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_features]
n_features must be the same as `self.data`.
Returns
-------
Transformed data, shape=[n_samples_y, n_pca]
Raises
------
ValueError : if Y.shape[1] != self.data.shape[1]
"""
try:
# try PCA first
return self.data_pca.transform(Y)
except ValueError:
# shape is wrong
raise ValueError(
"data of shape {0} cannot be transformed"
" to graph built on data of shape {1}. "
"Expected shape ({2}, {3})".format(
Y.shape, self.data.shape, Y.shape[0], self.data.shape[1]
)
)
except AttributeError: # no pca, try to return data
if len(Y.shape) < 2 or Y.shape[1] != self.data.shape[1]:
# shape is wrong
raise ValueError(
"data of shape {0} cannot be transformed"
" to graph built on data of shape {1}. "
"Expected shape ({2}, {3})".format(
Y.shape, self.data.shape, Y.shape[0], self.data.shape[1]
)
)
else:
return Y
def inverse_transform(self, Y, columns=None):
"""Transform input data `Y` to ambient data space defined by `self.data`
Takes data in the same reduced space as `self.data_nu` and transforms
it to be in the same ambient space as `self.data`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_pca]
n_features must be the same as `self.data_nu`.
columns : list-like
list of integers referring to column indices in the original data
space to be returned. Avoids recomputing the full matrix where only
a few dimensions of the ambient space are of interest
Returns
-------
Inverse transformed data, shape=[n_samples_y, n_features]
Raises
------
ValueError : if Y.shape[1] != self.data_nu.shape[1]
"""
try:
if not hasattr(self, "data_pca"):
# no pca performed
try:
if Y.shape[1] != self.data_nu.shape[1]:
# shape is wrong
raise ValueError
except IndexError:
# len(Y.shape) < 2
raise ValueError
if columns is None:
return Y
else:
columns = np.array([columns]).flatten()
return Y[:, columns]
else:
if columns is None:
return self.data_pca.inverse_transform(Y)
else:
# only return specific columns
columns = np.array([columns]).flatten()
Y_inv = np.dot(Y, self.data_pca.components_[:, columns])
if hasattr(self.data_pca, "mean_"):
Y_inv += self.data_pca.mean_[columns]
return Y_inv
except ValueError:
# more informative error
raise ValueError(
"data of shape {0} cannot be inverse transformed"
" from graph built on reduced data of shape ({1}, {2}). Expected shape ({3}, {2})".format(
Y.shape, self.data_nu.shape[0], self.data_nu.shape[1], Y.shape[0]
)
)
|
class Data(Base):
'''Parent class that handles the import and dimensionality reduction of data
Parameters
----------
data : array-like, shape=[n_samples,n_features]
accepted types: `numpy.ndarray`, `scipy.sparse.spmatrix`.
`pandas.DataFrame`, `pandas.SparseDataFrame`.
n_pca : {`int`, `None`, `bool`, 'auto'}, optional (default: `None`)
number of PC dimensions to retain for graph building.
If n_pca in `[None, False, 0]`, uses the original data.
If 'auto' or `True` then estimate using a singular value threshold
Note: if data is sparse, uses SVD instead of PCA
TODO: should we subtract and store the mean?
rank_threshold : `float`, 'auto', optional (default: 'auto')
threshold to use when estimating rank for
`n_pca in [True, 'auto']`.
If 'auto', this threshold is
s_max * eps * max(n_samples, n_features)
where s_max is the maximum singular value of the data matrix
and eps is numerical precision. [press2007]_.
random_state : `int` or `None`, optional (default: `None`)
Random state for random PCA
Attributes
----------
data : array-like, shape=[n_samples,n_features]
Original data matrix
n_pca : int or `None`
data_nu : array-like, shape=[n_samples,n_pca]
Reduced data matrix
data_pca : sklearn.decomposition.PCA or sklearn.decomposition.TruncatedSVD
sklearn PCA operator
'''
def __init__(
self, data, n_pca=None, rank_threshold=None, random_state=None, **kwargs
):
pass
def _parse_n_pca_threshold(self, data, n_pca, rank_threshold):
pass
def _check_data(self, data):
pass
def _reduce_data(self):
'''Private method to reduce data dimension.
If data is dense, uses randomized PCA. If data is sparse, uses
randomized SVD.
TODO: should we subtract and store the mean?
TODO: Fix the rank estimation so we do not compute the full SVD.
Returns
-------
Reduced data matrix
'''
pass
def get_params(self):
'''Get parameters from this object'''
pass
def set_params(self, **params):
'''Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
- n_pca
- random_state
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
'''
pass
def transform(self, Y):
'''Transform input data `Y` to reduced data space defined by `self.data`
Takes data in the same ambient space as `self.data` and transforms it
to be in the same reduced space as `self.data_nu`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_features]
n_features must be the same as `self.data`.
Returns
-------
Transformed data, shape=[n_samples_y, n_pca]
Raises
------
ValueError : if Y.shape[1] != self.data.shape[1]
'''
pass
def inverse_transform(self, Y, columns=None):
'''Transform input data `Y` to ambient data space defined by `self.data`
Takes data in the same reduced space as `self.data_nu` and transforms
it to be in the same ambient space as `self.data`.
Parameters
----------
Y : array-like, shape=[n_samples_y, n_pca]
n_features must be the same as `self.data_nu`.
columns : list-like
list of integers referring to column indices in the original data
space to be returned. Avoids recomputing the full matrix where only
a few dimensions of the ambient space are of interest
Returns
-------
Inverse transformed data, shape=[n_samples_y, n_features]
Raises
------
ValueError : if Y.shape[1] != self.data_nu.shape[1]
'''
pass
| 9 | 6 | 38 | 2 | 28 | 9 | 6 | 0.47 | 1 | 10 | 0 | 0 | 8 | 6 | 8 | 11 | 353 | 35 | 221 | 27 | 210 | 103 | 129 | 25 | 120 | 17 | 2 | 4 | 50 |
143,895 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/base.py
|
graphtools.base.Base
|
class Base(object):
"""Class that deals with key-word arguments but is otherwise
just an object.
"""
def __init__(self):
super().__init__()
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
# Extract and sort argument names excluding 'self'
parameters = set([p.name for p in parameters])
# recurse
for superclass in cls.__bases__:
try:
parameters.update(superclass._get_param_names())
except AttributeError:
# object and pygsp.graphs.Graph don't have this method
pass
return parameters
def set_params(self, **kwargs):
# for k in kwargs:
# raise TypeError("set_params() got an unexpected "
# "keyword argument '{}'".format(k))
return self
|
class Base(object):
'''Class that deals with key-word arguments but is otherwise
just an object.
'''
def __init__(self):
pass
@classmethod
def _get_param_names(cls):
'''Get parameter names for the estimator'''
pass
def set_params(self, **kwargs):
pass
| 5 | 2 | 12 | 1 | 7 | 4 | 2 | 0.7 | 1 | 3 | 0 | 1 | 2 | 0 | 3 | 3 | 45 | 6 | 23 | 9 | 18 | 16 | 18 | 8 | 14 | 4 | 1 | 2 | 6 |
143,896 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.LandmarkGraph
|
class LandmarkGraph(DataGraph):
"""Landmark graph
Adds landmarking feature to any data graph by taking spectral clusters
and building a 'landmark operator' from clusters to samples and back to
clusters.
Any transformation on the landmark kernel is trivially extended to the
data space by multiplying by the transition matrix.
Parameters
----------
data : array-like, shape=[n_samples,n_features]
accepted types: `numpy.ndarray`, `scipy.sparse.spmatrix`.,
`pandas.DataFrame`, `pandas.SparseDataFrame`.
n_landmark : `int`, optional (default: 2000)
number of landmarks to use
n_svd : `int`, optional (default: 100)
number of SVD components to use for spectral clustering
Attributes
----------
landmark_op : array-like, shape=[n_landmark, n_landmark]
Landmark operator.
Can be treated as a diffusion operator between landmarks.
transitions : array-like, shape=[n_samples, n_landmark]
Transition probabilities between samples and landmarks.
clusters : array-like, shape=[n_samples]
Private attribute. Cluster assignments for each sample.
Examples
--------
>>> G = graphtools.Graph(data, n_landmark=1000)
>>> X_landmark = transform(G.landmark_op)
>>> X_full = G.interpolate(X_landmark)
"""
def __init__(self, data, n_landmark=2000, n_svd=100, **kwargs):
"""Initialize a landmark graph.
Raises
------
RuntimeWarning : if too many SVD dimensions or
too few landmarks are used
"""
if n_landmark >= data.shape[0]:
raise ValueError(
"n_landmark ({}) >= n_samples ({}). Use "
"kNNGraph instead".format(n_landmark, data.shape[0])
)
if n_svd >= data.shape[0]:
warnings.warn(
"n_svd ({}) >= n_samples ({}) Consider "
"using kNNGraph or lower n_svd".format(n_svd, data.shape[0]),
RuntimeWarning,
)
self.n_landmark = n_landmark
self.n_svd = n_svd
super().__init__(data, **kwargs)
def get_params(self):
"""Get parameters from this object"""
params = super().get_params()
params.update({"n_landmark": self.n_landmark, "n_pca": self.n_pca})
return params
def set_params(self, **params):
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
- n_landmark
- n_svd
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
"""
# update parameters
reset_landmarks = False
if "n_landmark" in params and params["n_landmark"] != self.n_landmark:
self.n_landmark = params["n_landmark"]
reset_landmarks = True
if "n_svd" in params and params["n_svd"] != self.n_svd:
self.n_svd = params["n_svd"]
reset_landmarks = True
# update superclass parameters
super().set_params(**params)
# reset things that changed
if reset_landmarks:
self._reset_landmarks()
return self
def _reset_landmarks(self):
"""Reset landmark data
Landmarks can be recomputed without recomputing the kernel
"""
try:
del self._landmark_op
del self._transitions
del self._clusters
except AttributeError:
# landmarks aren't currently defined
pass
@property
def landmark_op(self):
"""Landmark operator
Compute or return the landmark operator
Returns
-------
landmark_op : array-like, shape=[n_landmark, n_landmark]
Landmark operator. Can be treated as a diffusion operator between
landmarks.
"""
try:
return self._landmark_op
except AttributeError:
self.build_landmark_op()
return self._landmark_op
@property
def clusters(self):
"""Cluster assignments for each sample.
Compute or return the cluster assignments
Returns
-------
clusters : list-like, shape=[n_samples]
Cluster assignments for each sample.
"""
try:
return self._clusters
except AttributeError:
self.build_landmark_op()
return self._clusters
@property
def transitions(self):
"""Transition matrix from samples to landmarks
Compute the landmark operator if necessary, then return the
transition matrix.
Returns
-------
transitions : array-like, shape=[n_samples, n_landmark]
Transition probabilities between samples and landmarks.
"""
try:
return self._transitions
except AttributeError:
self.build_landmark_op()
return self._transitions
def _landmarks_to_data(self):
landmarks = np.unique(self.clusters)
if sparse.issparse(self.kernel):
pmn = sparse.vstack(
[
sparse.csr_matrix(self.kernel[self.clusters == i, :].sum(axis=0))
for i in landmarks
]
)
else:
pmn = np.array(
[np.sum(self.kernel[self.clusters == i, :], axis=0) for i in landmarks]
)
return pmn
def _data_transitions(self):
return normalize(self._landmarks_to_data(), "l1", axis=1)
def build_landmark_op(self):
"""Build the landmark operator
Calculates spectral clusters on the kernel, and calculates transition
probabilities between cluster centers by using transition probabilities
between samples assigned to each cluster.
"""
with _logger.log_task("landmark operator"):
is_sparse = sparse.issparse(self.kernel)
# spectral clustering
with _logger.log_task("SVD"):
_, _, VT = randomized_svd(
self.diff_aff,
n_components=self.n_svd,
random_state=self.random_state,
)
with _logger.log_task("KMeans"):
kmeans = MiniBatchKMeans(
self.n_landmark,
init_size=3 * self.n_landmark,
n_init=1,
batch_size=10000,
random_state=self.random_state,
)
self._clusters = kmeans.fit_predict(self.diff_op.dot(VT.T))
# transition matrices
pmn = self._landmarks_to_data()
# row normalize
pnm = pmn.transpose()
pmn = normalize(pmn, norm="l1", axis=1)
pnm = normalize(pnm, norm="l1", axis=1)
# sparsity agnostic matrix multiplication
landmark_op = pmn.dot(pnm)
if is_sparse:
# no need to have a sparse landmark operator
landmark_op = landmark_op.toarray()
# store output
self._landmark_op = landmark_op
self._transitions = pnm
def extend_to_data(self, data, **kwargs):
"""Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of landmarks. Any
transformation of the landmarks can be trivially applied to `Y` by
performing
`transform_Y = transitions.dot(transform)`
Parameters
----------
Y: array-like, [n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, [n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data`
"""
kernel = self.build_kernel_to_data(data, **kwargs)
if sparse.issparse(kernel):
pnm = sparse.hstack(
[
sparse.csr_matrix(kernel[:, self.clusters == i].sum(axis=1))
for i in np.unique(self.clusters)
]
)
else:
pnm = np.array(
[
np.sum(kernel[:, self.clusters == i], axis=1).T
for i in np.unique(self.clusters)
]
).transpose()
pnm = normalize(pnm, norm="l1", axis=1)
return pnm
def interpolate(self, transform, transitions=None, Y=None):
"""Interpolate new data onto a transformation of the graph data
One of either transitions or Y should be provided
Parameters
----------
transform : array-like, shape=[n_samples, n_transform_features]
transitions : array-like, optional, shape=[n_samples_y, n_samples]
Transition matrix from `Y` (not provided) to `self.data`
Y: array-like, optional, shape=[n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
Y_transform : array-like, [n_samples_y, n_features or n_pca]
Transition matrix from `Y` to `self.data`
"""
if transitions is None and Y is None:
# assume Y is self.data and use standard landmark transitions
transitions = self.transitions
return super().interpolate(transform, transitions=transitions, Y=Y)
|
class LandmarkGraph(DataGraph):
'''Landmark graph
Adds landmarking feature to any data graph by taking spectral clusters
and building a 'landmark operator' from clusters to samples and back to
clusters.
Any transformation on the landmark kernel is trivially extended to the
data space by multiplying by the transition matrix.
Parameters
----------
data : array-like, shape=[n_samples,n_features]
accepted types: `numpy.ndarray`, `scipy.sparse.spmatrix`.,
`pandas.DataFrame`, `pandas.SparseDataFrame`.
n_landmark : `int`, optional (default: 2000)
number of landmarks to use
n_svd : `int`, optional (default: 100)
number of SVD components to use for spectral clustering
Attributes
----------
landmark_op : array-like, shape=[n_landmark, n_landmark]
Landmark operator.
Can be treated as a diffusion operator between landmarks.
transitions : array-like, shape=[n_samples, n_landmark]
Transition probabilities between samples and landmarks.
clusters : array-like, shape=[n_samples]
Private attribute. Cluster assignments for each sample.
Examples
--------
>>> G = graphtools.Graph(data, n_landmark=1000)
>>> X_landmark = transform(G.landmark_op)
>>> X_full = G.interpolate(X_landmark)
'''
def __init__(self, data, n_landmark=2000, n_svd=100, **kwargs):
'''Initialize a landmark graph.
Raises
------
RuntimeWarning : if too many SVD dimensions or
too few landmarks are used
'''
pass
def get_params(self):
'''Get parameters from this object'''
pass
def set_params(self, **params):
'''Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
- n_landmark
- n_svd
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
'''
pass
def _reset_landmarks(self):
'''Reset landmark data
Landmarks can be recomputed without recomputing the kernel
'''
pass
@property
def landmark_op(self):
'''Landmark operator
Compute or return the landmark operator
Returns
-------
landmark_op : array-like, shape=[n_landmark, n_landmark]
Landmark operator. Can be treated as a diffusion operator between
landmarks.
'''
pass
@property
def clusters(self):
'''Cluster assignments for each sample.
Compute or return the cluster assignments
Returns
-------
clusters : list-like, shape=[n_samples]
Cluster assignments for each sample.
'''
pass
@property
def transitions(self):
'''Transition matrix from samples to landmarks
Compute the landmark operator if necessary, then return the
transition matrix.
Returns
-------
transitions : array-like, shape=[n_samples, n_landmark]
Transition probabilities between samples and landmarks.
'''
pass
def _landmarks_to_data(self):
pass
def _data_transitions(self):
pass
def build_landmark_op(self):
'''Build the landmark operator
Calculates spectral clusters on the kernel, and calculates transition
probabilities between cluster centers by using transition probabilities
between samples assigned to each cluster.
'''
pass
def extend_to_data(self, data, **kwargs):
'''Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of landmarks. Any
transformation of the landmarks can be trivially applied to `Y` by
performing
`transform_Y = transitions.dot(transform)`
Parameters
----------
Y: array-like, [n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, [n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data`
'''
pass
def interpolate(self, transform, transitions=None, Y=None):
'''Interpolate new data onto a transformation of the graph data
One of either transitions or Y should be provided
Parameters
----------
transform : array-like, shape=[n_samples, n_transform_features]
transitions : array-like, optional, shape=[n_samples_y, n_samples]
Transition matrix from `Y` (not provided) to `self.data`
Y: array-like, optional, shape=[n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
Y_transform : array-like, [n_samples_y, n_features or n_pca]
Transition matrix from `Y` to `self.data`
'''
pass
| 16 | 11 | 20 | 2 | 10 | 8 | 2 | 1 | 1 | 4 | 0 | 6 | 12 | 6 | 12 | 19 | 298 | 48 | 125 | 34 | 109 | 125 | 86 | 30 | 73 | 4 | 2 | 2 | 25 |
143,897 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.TraditionalGraph
|
class TraditionalGraph(DataGraph):
"""Traditional weighted adjacency graph
Parameters
----------
data : array-like, shape=[n_samples,n_features]
accepted types: `numpy.ndarray`, `scipy.sparse.spmatrix`,
`pandas.DataFrame`, `pandas.SparseDataFrame`.
If `precomputed` is not `None`, data should be an
[n_samples, n_samples] matrix denoting pairwise distances,
affinities, or edge weights.
knn : `int`, optional (default: 5)
Number of nearest neighbors (including self) to use to build the graph
decay : `int` or `None`, optional (default: 40)
Rate of alpha decay to use. If `None`, alpha decay is not used.
bandwidth : `float`, list-like,`callable`, or `None`, optional (default: `None`)
Fixed bandwidth to use. If given, overrides `knn`. Can be a single
bandwidth, list-like (shape=[n_samples]) of bandwidths for each
sample, or a `callable` that takes in a `n x m` matrix and returns a
a single value or list-like of length n (shape=[n_samples])
bandwidth_scale : `float`, optional (default : 1.0)
Rescaling factor for bandwidth.
distance : `str`, optional (default: `'euclidean'`)
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph.
TODO: actually sklearn.neighbors has even more choices
n_pca : {`int`, `None`, `bool`, 'auto'}, optional (default: `None`)
number of PC dimensions to retain for graph building.
If n_pca in `[None,False,0]`, uses the original data.
If `True` then estimate using a singular value threshold
Note: if data is sparse, uses SVD instead of PCA
TODO: should we subtract and store the mean?
rank_threshold : `float`, 'auto', optional (default: 'auto')
threshold to use when estimating rank for
`n_pca in [True, 'auto']`.
Note that the default kwarg is `None` for this parameter.
It is subsequently parsed to 'auto' if necessary.
If 'auto', this threshold is
smax * np.finfo(data.dtype).eps * max(data.shape)
where smax is the maximum singular value of the data matrix.
For reference, see, e.g.
W. Press, S. Teukolsky, W. Vetterling and B. Flannery,
“Numerical Recipes (3rd edition)”,
Cambridge University Press, 2007, page 795.
thresh : `float`, optional (default: `1e-4`)
Threshold above which to calculate alpha decay kernel.
All affinities below `thresh` will be set to zero in order to save
on time and memory constraints.
precomputed : {'distance', 'affinity', 'adjacency', `None`},
optional (default: `None`)
If the graph is precomputed, this variable denotes which graph
matrix is provided as `data`.
Only one of `precomputed` and `n_pca` can be set.
"""
def __init__(
self,
data,
knn=5,
decay=40,
bandwidth=None,
bandwidth_scale=1.0,
distance="euclidean",
n_pca=None,
thresh=1e-4,
precomputed=None,
**kwargs,
):
if decay is None and precomputed not in ["affinity", "adjacency"]:
# decay high enough is basically a binary kernel
raise ValueError(
"`decay` must be provided for a "
"TraditionalGraph. For kNN kernel, use kNNGraph."
)
if precomputed is not None and n_pca not in [None, 0, False]:
# the data itself is a matrix of distances / affinities
n_pca = None
warnings.warn(
"n_pca cannot be given on a precomputed graph." " Setting n_pca=None",
RuntimeWarning,
)
if knn is None and bandwidth is None:
raise ValueError("Either `knn` or `bandwidth` must be provided.")
if knn is not None and knn > data.shape[0] - 2:
warnings.warn(
"Cannot set knn ({k}) to be greater than "
" n_samples - 2 ({n}). Setting knn={n}".format(
k=knn, n=data.shape[0] - 2
)
)
knn = data.shape[0] - 2
if precomputed is not None:
if precomputed not in ["distance", "affinity", "adjacency"]:
raise ValueError(
"Precomputed value {} not recognized. "
"Choose from ['distance', 'affinity', "
"'adjacency']".format(precomputed)
)
elif data.shape[0] != data.shape[1]:
raise ValueError(
"Precomputed {} must be a square matrix. "
"{} was given".format(precomputed, data.shape)
)
elif (data < 0).sum() > 0:
raise ValueError(
"Precomputed {} should be " "non-negative".format(precomputed)
)
self.knn = knn
self.decay = decay
self.bandwidth = bandwidth
self.bandwidth_scale = bandwidth_scale
self.distance = distance
self.thresh = thresh
self.precomputed = precomputed
super().__init__(data, n_pca=n_pca, **kwargs)
def get_params(self):
"""Get parameters from this object"""
params = super().get_params()
params.update(
{
"knn": self.knn,
"decay": self.decay,
"bandwidth": self.bandwidth,
"bandwidth_scale": self.bandwidth_scale,
"distance": self.distance,
"precomputed": self.precomputed,
}
)
return params
def set_params(self, **params):
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Invalid parameters: (these would require modifying the kernel matrix)
- precomputed
- distance
- knn
- decay
- bandwidth
- bandwidth_scale
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
"""
if "precomputed" in params and params["precomputed"] != self.precomputed:
raise ValueError("Cannot update precomputed. " "Please create a new graph")
if (
"distance" in params
and params["distance"] != self.distance
and self.precomputed is None
):
raise ValueError("Cannot update distance. " "Please create a new graph")
if "knn" in params and params["knn"] != self.knn and self.precomputed is None:
raise ValueError("Cannot update knn. Please create a new graph")
if (
"decay" in params
and params["decay"] != self.decay
and self.precomputed is None
):
raise ValueError("Cannot update decay. Please create a new graph")
if (
"bandwidth" in params
and params["bandwidth"] != self.bandwidth
and self.precomputed is None
):
raise ValueError("Cannot update bandwidth. Please create a new graph")
if (
"bandwidth_scale" in params
and params["bandwidth_scale"] != self.bandwidth_scale
):
raise ValueError("Cannot update bandwidth_scale. Please create a new graph")
# update superclass parameters
super().set_params(**params)
return self
def build_kernel(self):
"""Build the KNN kernel.
Build a k nearest neighbors kernel, optionally with alpha decay.
If `precomputed` is not `None`, the appropriate steps in the kernel
building process are skipped.
Must return a symmetric matrix
Returns
-------
K : kernel matrix, shape=[n_samples, n_samples]
symmetric matrix with ones down the diagonal
with no non-negative entries.
Raises
------
ValueError: if `precomputed` is not an acceptable value
"""
if self.precomputed == "affinity":
# already done
# TODO: should we check that precomputed matrices look okay?
# e.g. check the diagonal
K = self.data_nu
elif self.precomputed == "adjacency":
# need to set diagonal to one to make it an affinity matrix
K = self.data_nu
if sparse.issparse(K) and not (
isinstance(K, sparse.dok_matrix) or isinstance(K, sparse.lil_matrix)
):
K = K.tolil()
K = matrix.set_diagonal(K, 1)
else:
with _logger.log_task("affinities"):
if sparse.issparse(self.data_nu):
self.data_nu = self.data_nu.toarray()
if self.precomputed == "distance":
pdx = self.data_nu
elif self.precomputed is None:
pdx = pdist(self.data_nu, metric=self.distance)
if np.any(pdx == 0):
pdx = squareform(pdx)
duplicate_ids = np.array(
[i for i in np.argwhere(pdx == 0) if i[1] > i[0]]
)
if len(duplicate_ids) < 20:
duplicate_names = ", ".join(
["{} and {}".format(i[0], i[1]) for i in duplicate_ids]
)
warnings.warn(
"Detected zero distance between samples {}. "
"Consider removing duplicates to avoid errors in "
"downstream processing.".format(duplicate_names),
RuntimeWarning,
)
else:
warnings.warn(
"Detected zero distance between {} pairs of samples. "
"Consider removing duplicates to avoid errors in "
"downstream processing.".format(len(duplicate_ids)),
RuntimeWarning,
)
else:
pdx = squareform(pdx)
else:
raise ValueError(
"precomputed='{}' not recognized. "
"Choose from ['affinity', 'adjacency', 'distance', "
"None]".format(self.precomputed)
)
if self.bandwidth is None:
knn_dist = np.partition(pdx, self.knn + 1, axis=1)[
:, : self.knn + 1
]
bandwidth = np.max(knn_dist, axis=1)
elif callable(self.bandwidth):
bandwidth = self.bandwidth(pdx)
else:
bandwidth = self.bandwidth
bandwidth = bandwidth * self.bandwidth_scale
pdx = (pdx.T / bandwidth).T
K = np.exp(-1 * np.power(pdx, self.decay))
# handle nan
K = np.where(np.isnan(K), 1, K)
# truncate
if sparse.issparse(K):
if not (
isinstance(K, sparse.csr_matrix)
or isinstance(K, sparse.csc_matrix)
or isinstance(K, sparse.bsr_matrix)
):
K = K.tocsr()
K.data[K.data < self.thresh] = 0
K = K.tocoo()
K.eliminate_zeros()
K = K.tocsr()
else:
K[K < self.thresh] = 0
return K
def build_kernel_to_data(self, Y, knn=None, bandwidth=None, bandwidth_scale=None):
"""Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of landmarks. Any
transformation of the landmarks can be trivially applied to `Y` by
performing
`transform_Y = transitions.dot(transform)`
Parameters
----------
Y: array-like, [n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, [n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data`
Raises
------
ValueError: if `precomputed` is not `None`, then the graph cannot
be extended.
"""
if knn is None:
knn = self.knn
if bandwidth is None:
bandwidth = self.bandwidth
if bandwidth_scale is None:
bandwidth_scale = self.bandwidth_scale
if self.precomputed is not None:
raise ValueError("Cannot extend kernel on precomputed graph")
else:
with _logger.log_task("affinities"):
Y = self._check_extension_shape(Y)
pdx = cdist(Y, self.data_nu, metric=self.distance)
if bandwidth is None:
knn_dist = np.partition(pdx, knn, axis=1)[:, :knn]
bandwidth = np.max(knn_dist, axis=1)
elif callable(bandwidth):
bandwidth = bandwidth(pdx)
bandwidth = bandwidth_scale * bandwidth
pdx = (pdx.T / bandwidth).T
K = np.exp(-1 * pdx**self.decay)
# handle nan
K = np.where(np.isnan(K), 1, K)
K[K < self.thresh] = 0
return K
@property
def weighted(self):
if self.precomputed is not None:
return not matrix.nonzero_discrete(self.K, [0.5, 1])
else:
return super().weighted
def _check_shortest_path_distance(self, distance):
if self.precomputed is not None:
if distance == "data":
raise ValueError(
"Graph shortest path with data distance not "
"valid for precomputed graphs. For precomputed graphs, "
"use `distance='constant'` for unweighted graphs and "
"`distance='affinity'` for weighted graphs."
)
super()._check_shortest_path_distance(distance)
def _default_shortest_path_distance(self):
if self.precomputed is not None and not self.weighted:
distance = "constant"
_logger.log_info("Using constant distances.")
else:
distance = super()._default_shortest_path_distance()
return distance
|
class TraditionalGraph(DataGraph):
'''Traditional weighted adjacency graph
Parameters
----------
data : array-like, shape=[n_samples,n_features]
accepted types: `numpy.ndarray`, `scipy.sparse.spmatrix`,
`pandas.DataFrame`, `pandas.SparseDataFrame`.
If `precomputed` is not `None`, data should be an
[n_samples, n_samples] matrix denoting pairwise distances,
affinities, or edge weights.
knn : `int`, optional (default: 5)
Number of nearest neighbors (including self) to use to build the graph
decay : `int` or `None`, optional (default: 40)
Rate of alpha decay to use. If `None`, alpha decay is not used.
bandwidth : `float`, list-like,`callable`, or `None`, optional (default: `None`)
Fixed bandwidth to use. If given, overrides `knn`. Can be a single
bandwidth, list-like (shape=[n_samples]) of bandwidths for each
sample, or a `callable` that takes in a `n x m` matrix and returns a
a single value or list-like of length n (shape=[n_samples])
bandwidth_scale : `float`, optional (default : 1.0)
Rescaling factor for bandwidth.
distance : `str`, optional (default: `'euclidean'`)
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph.
TODO: actually sklearn.neighbors has even more choices
n_pca : {`int`, `None`, `bool`, 'auto'}, optional (default: `None`)
number of PC dimensions to retain for graph building.
If n_pca in `[None,False,0]`, uses the original data.
If `True` then estimate using a singular value threshold
Note: if data is sparse, uses SVD instead of PCA
TODO: should we subtract and store the mean?
rank_threshold : `float`, 'auto', optional (default: 'auto')
threshold to use when estimating rank for
`n_pca in [True, 'auto']`.
Note that the default kwarg is `None` for this parameter.
It is subsequently parsed to 'auto' if necessary.
If 'auto', this threshold is
smax * np.finfo(data.dtype).eps * max(data.shape)
where smax is the maximum singular value of the data matrix.
For reference, see, e.g.
W. Press, S. Teukolsky, W. Vetterling and B. Flannery,
“Numerical Recipes (3rd edition)”,
Cambridge University Press, 2007, page 795.
thresh : `float`, optional (default: `1e-4`)
Threshold above which to calculate alpha decay kernel.
All affinities below `thresh` will be set to zero in order to save
on time and memory constraints.
precomputed : {'distance', 'affinity', 'adjacency', `None`},
optional (default: `None`)
If the graph is precomputed, this variable denotes which graph
matrix is provided as `data`.
Only one of `precomputed` and `n_pca` can be set.
'''
def __init__(
self,
data,
knn=5,
decay=40,
bandwidth=None,
bandwidth_scale=1.0,
distance="euclidean",
n_pca=None,
thresh=1e-4,
precomputed=None,
**kwargs,
):
pass
def get_params(self):
'''Get parameters from this object'''
pass
def set_params(self, **params):
'''Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Invalid parameters: (these would require modifying the kernel matrix)
- precomputed
- distance
- knn
- decay
- bandwidth
- bandwidth_scale
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
'''
pass
def build_kernel(self):
'''Build the KNN kernel.
Build a k nearest neighbors kernel, optionally with alpha decay.
If `precomputed` is not `None`, the appropriate steps in the kernel
building process are skipped.
Must return a symmetric matrix
Returns
-------
K : kernel matrix, shape=[n_samples, n_samples]
symmetric matrix with ones down the diagonal
with no non-negative entries.
Raises
------
ValueError: if `precomputed` is not an acceptable value
'''
pass
def build_kernel_to_data(self, Y, knn=None, bandwidth=None, bandwidth_scale=None):
'''Build transition matrix from new data to the graph
Creates a transition matrix such that `Y` can be approximated by
a linear combination of landmarks. Any
transformation of the landmarks can be trivially applied to `Y` by
performing
`transform_Y = transitions.dot(transform)`
Parameters
----------
Y: array-like, [n_samples_y, n_features]
new data for which an affinity matrix is calculated
to the existing data. `n_features` must match
either the ambient or PCA dimensions
Returns
-------
transitions : array-like, [n_samples_y, self.data.shape[0]]
Transition matrix from `Y` to `self.data`
Raises
------
ValueError: if `precomputed` is not `None`, then the graph cannot
be extended.
'''
pass
@property
def weighted(self):
pass
def _check_shortest_path_distance(self, distance):
pass
def _default_shortest_path_distance(self):
pass
| 10 | 5 | 38 | 2 | 28 | 8 | 6 | 0.51 | 1 | 3 | 0 | 3 | 8 | 8 | 8 | 15 | 373 | 34 | 224 | 41 | 202 | 115 | 121 | 28 | 112 | 13 | 2 | 5 | 44 |
143,898 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.MNNLandmarkPyGSPGraph
|
class MNNLandmarkPyGSPGraph(MNNGraph, LandmarkGraph, PyGSPGraph):
pass
|
class MNNLandmarkPyGSPGraph(MNNGraph, LandmarkGraph, PyGSPGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,899 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.MNNPyGSPGraph
|
class MNNPyGSPGraph(MNNGraph, PyGSPGraph):
pass
|
class MNNPyGSPGraph(MNNGraph, PyGSPGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,900 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.TraditionalLandmarkGraph
|
class TraditionalLandmarkGraph(TraditionalGraph, LandmarkGraph):
pass
|
class TraditionalLandmarkGraph(TraditionalGraph, LandmarkGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,901 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.TraditionalLandmarkPyGSPGraph
|
class TraditionalLandmarkPyGSPGraph(TraditionalGraph, LandmarkGraph, PyGSPGraph):
pass
|
class TraditionalLandmarkPyGSPGraph(TraditionalGraph, LandmarkGraph, PyGSPGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,902 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.TraditionalPyGSPGraph
|
class TraditionalPyGSPGraph(TraditionalGraph, PyGSPGraph):
pass
|
class TraditionalPyGSPGraph(TraditionalGraph, PyGSPGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,903 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.MNNLandmarkGraph
|
class MNNLandmarkGraph(MNNGraph, LandmarkGraph):
pass
|
class MNNLandmarkGraph(MNNGraph, LandmarkGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,904 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/graphtools/graphs.py
|
graphtools.graphs.kNNLandmarkGraph
|
class kNNLandmarkGraph(kNNGraph, LandmarkGraph):
pass
|
class kNNLandmarkGraph(kNNGraph, LandmarkGraph):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
143,905 |
KrishnaswamyLab/graphtools
|
KrishnaswamyLab_graphtools/test/test_estimator.py
|
test_estimator.Estimator
|
class Estimator(graphtools.estimator.GraphEstimator):
def _reset_graph(self):
self.reset = True
|
class Estimator(graphtools.estimator.GraphEstimator):
def _reset_graph(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 35 | 3 | 0 | 3 | 3 | 1 | 0 | 3 | 3 | 1 | 1 | 4 | 0 | 1 |
143,906 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView.test_non_critical.MyBackend
|
class MyBackend(BaseHealthCheckBackend):
critical_service = False
def check_status(self):
self.add_error("Super Fail!")
|
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 8 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 1 | 0 | 1 |
143,907 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/redis/apps.py
|
health_check.contrib.redis.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
name = "health_check.contrib.redis"
def ready(self):
from .backends import RedisHealthCheck
plugin_dir.register(RedisHealthCheck)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 4 | 1 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 7 | 2 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 1 | 0 | 1 |
143,908 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/s3boto3_storage/apps.py
|
health_check.contrib.s3boto3_storage.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
name = "health_check.contrib.s3boto3_storage"
def ready(self):
from .backends import S3Boto3StorageHealthCheck
plugin_dir.register(S3Boto3StorageHealthCheck)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 4 | 1 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 7 | 2 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 1 | 0 | 1 |
143,909 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/s3boto3_storage/backends.py
|
health_check.contrib.s3boto3_storage.backends.S3Boto3StorageHealthCheck
|
class S3Boto3StorageHealthCheck(StorageHealthCheck):
"""
Tests the status of a `S3BotoStorage` file storage backend.
S3BotoStorage is included in the `django-storages` package
and recommended by for example Amazon and Heroku for Django
static and media file storage on cloud platforms.
``django-storages`` can be found at https://git.io/v1lGx
``S3Boto3Storage`` can be found at
https://github.com/jschneier/django-storages/blob/master/storages/backends/s3boto3.py
"""
logger = logging.getLogger(__name__)
storage = "storages.backends.s3boto3.S3Boto3Storage"
storage_alias = "default"
def check_delete(self, file_name):
storage = self.get_storage()
if not storage.exists(file_name):
raise ServiceUnavailable("File does not exist")
storage.delete(file_name)
|
class S3Boto3StorageHealthCheck(StorageHealthCheck):
'''
Tests the status of a `S3BotoStorage` file storage backend.
S3BotoStorage is included in the `django-storages` package
and recommended by for example Amazon and Heroku for Django
static and media file storage on cloud platforms.
``django-storages`` can be found at https://git.io/v1lGx
``S3Boto3Storage`` can be found at
https://github.com/jschneier/django-storages/blob/master/storages/backends/s3boto3.py
'''
def check_delete(self, file_name):
pass
| 2 | 1 | 5 | 0 | 5 | 0 | 2 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 14 | 22 | 4 | 9 | 6 | 7 | 9 | 9 | 6 | 7 | 2 | 2 | 1 | 2 |
143,910 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_commands.py
|
tests.test_commands.TestCommand
|
class TestCommand:
@pytest.fixture(autouse=True)
def setup(self):
plugin_dir.reset()
plugin_dir.register(FailPlugin)
plugin_dir.register(OkPlugin)
yield
plugin_dir.reset()
def test_command(self):
stdout = StringIO()
with pytest.raises(SystemExit):
call_command("health_check", stdout=stdout)
stdout.seek(0)
assert stdout.read() == (
"FailPlugin ... unknown error: Oops\n"
"OkPlugin ... working\n"
)
def test_command_with_subset(self):
SUBSET_NAME_1 = "subset-1"
SUBSET_NAME_2 = "subset-2"
HEALTH_CHECK["SUBSETS"] = {
SUBSET_NAME_1: ["OkPlugin"],
SUBSET_NAME_2: ["OkPlugin", "FailPlugin"],
}
stdout = StringIO()
call_command("health_check",
f"--subset={SUBSET_NAME_1}", stdout=stdout)
stdout.seek(0)
assert stdout.read() == ("OkPlugin ... working\n")
def test_command_with_failed_check_subset(self):
SUBSET_NAME = "subset-2"
HEALTH_CHECK["SUBSETS"] = {SUBSET_NAME: ["OkPlugin", "FailPlugin"]}
stdout = StringIO()
with pytest.raises(SystemExit):
call_command("health_check",
f"--subset={SUBSET_NAME}", stdout=stdout)
stdout.seek(0)
assert stdout.read() == (
"FailPlugin ... unknown error: Oops\n"
"OkPlugin ... working\n"
)
def test_command_with_non_existence_subset(self):
SUBSET_NAME = "subset-2"
NON_EXISTENCE_SUBSET_NAME = "abcdef12"
HEALTH_CHECK["SUBSETS"] = {SUBSET_NAME: ["OkPlugin"]}
stdout = StringIO()
with pytest.raises(SystemExit):
call_command(
"health_check", f"--subset={NON_EXISTENCE_SUBSET_NAME}", stdout=stdout
)
stdout.seek(0)
assert stdout.read() == (
f"Subset: '{NON_EXISTENCE_SUBSET_NAME}' does not exist.\n"
)
|
class TestCommand:
@pytest.fixture(autouse=True)
def setup(self):
pass
def test_command(self):
pass
def test_command_with_subset(self):
pass
def test_command_with_failed_check_subset(self):
pass
def test_command_with_non_existence_subset(self):
pass
| 7 | 0 | 11 | 1 | 10 | 0 | 1 | 0 | 0 | 3 | 2 | 0 | 5 | 0 | 5 | 5 | 59 | 7 | 52 | 16 | 45 | 0 | 38 | 15 | 32 | 1 | 0 | 1 | 5 |
143,911 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_db.py
|
tests.test_db.HealthCheckDatabaseTests
|
class HealthCheckDatabaseTests(TestCase):
"""
Tests health check behavior with a mocked database backend.
Ensures check_status returns/raises the expected result when the database works or raises exceptions.
"""
@patch(
"health_check.db.backends.TestModel.objects.create",
lambda title=None: MockDBModel(),
)
def test_check_status_works(self):
db_backend = DatabaseBackend()
db_backend.check_status()
self.assertFalse(db_backend.errors)
@patch(
"health_check.db.backends.TestModel.objects.create",
lambda title=None: raise_(IntegrityError),
)
def test_raise_integrity_error(self):
db_backend = DatabaseBackend()
db_backend.run_check()
self.assertTrue(db_backend.errors)
self.assertIn("unexpected result: Integrity Error",
db_backend.pretty_status())
@patch(
"health_check.db.backends.TestModel.objects.create",
lambda title=None: MockDBModel(error_thrown=DatabaseError),
)
def test_raise_database_error(self):
db_backend = DatabaseBackend()
db_backend.run_check()
self.assertTrue(db_backend.errors)
self.assertIn("unavailable: Database error",
db_backend.pretty_status())
@patch(
"health_check.db.backends.TestModel.objects.create",
lambda title=None: MockDBModel(error_thrown=Exception),
)
def test_raise_exception(self):
db_backend = DatabaseBackend()
with self.assertRaises(Exception):
db_backend.run_check()
|
class HealthCheckDatabaseTests(TestCase):
'''
Tests health check behavior with a mocked database backend.
Ensures check_status returns/raises the expected result when the database works or raises exceptions.
'''
@patch(
"health_check.db.backends.TestModel.objects.create",
lambda title=None: MockDBModel(),
)
def test_check_status_works(self):
pass
@patch(
"health_check.db.backends.TestModel.objects.create",
lambda title=None: raise_(IntegrityError),
)
def test_raise_integrity_error(self):
pass
@patch(
"health_check.db.backends.TestModel.objects.create",
lambda title=None: MockDBModel(error_thrown=DatabaseError),
)
def test_raise_database_error(self):
pass
@patch(
"health_check.db.backends.TestModel.objects.create",
lambda title=None: MockDBModel(error_thrown=Exception),
)
def test_raise_exception(self):
pass
| 9 | 1 | 5 | 0 | 5 | 0 | 1 | 0.11 | 1 | 3 | 2 | 0 | 4 | 0 | 4 | 4 | 43 | 4 | 35 | 25 | 14 | 4 | 19 | 9 | 14 | 1 | 1 | 1 | 4 |
143,912 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/db/apps.py
|
health_check.db.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
default_auto_field = "django.db.models.AutoField"
name = "health_check.db"
def ready(self):
from .backends import DatabaseBackend
plugin_dir.register(DatabaseBackend)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 4 | 1 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 8 | 2 | 6 | 5 | 3 | 0 | 6 | 5 | 3 | 1 | 1 | 0 | 1 |
143,913 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_rabbitmq.py
|
tests.test_rabbitmq.TestRabbitMQHealthCheck
|
class TestRabbitMQHealthCheck:
"""Test RabbitMQ health check."""
@mock.patch("health_check.contrib.rabbitmq.backends.getattr")
@mock.patch("health_check.contrib.rabbitmq.backends.Connection")
def test_broker_refused_connection(self, mocked_connection, mocked_getattr):
"""Test when the connection to RabbitMQ is refused."""
mocked_getattr.return_value = "broker_url"
conn_exception = ConnectionRefusedError("Refused connection")
# mock returns
mocked_conn = mock.MagicMock()
mocked_connection.return_value.__enter__.return_value = mocked_conn
mocked_conn.connect.side_effect = conn_exception
# instantiates the class
rabbitmq_healthchecker = RabbitMQHealthCheck()
# invokes the method check_status()
rabbitmq_healthchecker.check_status()
assert len(rabbitmq_healthchecker.errors), 1
# mock assertions
mocked_connection.assert_called_once_with("broker_url")
@mock.patch("health_check.contrib.rabbitmq.backends.getattr")
@mock.patch("health_check.contrib.rabbitmq.backends.Connection")
def test_broker_auth_error(self, mocked_connection, mocked_getattr):
"""Test that the connection to RabbitMQ has an authentication error."""
mocked_getattr.return_value = "broker_url"
conn_exception = AccessRefused("Refused connection")
# mock returns
mocked_conn = mock.MagicMock()
mocked_connection.return_value.__enter__.return_value = mocked_conn
mocked_conn.connect.side_effect = conn_exception
# instantiates the class
rabbitmq_healthchecker = RabbitMQHealthCheck()
# invokes the method check_status()
rabbitmq_healthchecker.check_status()
assert len(rabbitmq_healthchecker.errors), 1
# mock assertions
mocked_connection.assert_called_once_with("broker_url")
@mock.patch("health_check.contrib.rabbitmq.backends.getattr")
@mock.patch("health_check.contrib.rabbitmq.backends.Connection")
def test_broker_connection_upon_none_url(self, mocked_connection, mocked_getattr):
"""Thest when the connection to RabbitMQ has no ``broker_url``."""
mocked_getattr.return_value = None
# if the variable BROKER_URL is not set, AccessRefused exception is raised
conn_exception = AccessRefused("Refused connection")
# mock returns
mocked_conn = mock.MagicMock()
mocked_connection.return_value.__enter__.return_value = mocked_conn
mocked_conn.connect.side_effect = conn_exception
# instantiates the class
rabbitmq_healthchecker = RabbitMQHealthCheck()
# invokes the method check_status()
rabbitmq_healthchecker.check_status()
assert len(rabbitmq_healthchecker.errors), 1
# mock assertions
mocked_connection.assert_called_once_with(None)
|
class TestRabbitMQHealthCheck:
'''Test RabbitMQ health check.'''
@mock.patch("health_check.contrib.rabbitmq.backends.getattr")
@mock.patch("health_check.contrib.rabbitmq.backends.Connection")
def test_broker_refused_connection(self, mocked_connection, mocked_getattr):
'''Test when the connection to RabbitMQ is refused.'''
pass
@mock.patch("health_check.contrib.rabbitmq.backends.getattr")
@mock.patch("health_check.contrib.rabbitmq.backends.Connection")
def test_broker_auth_error(self, mocked_connection, mocked_getattr):
'''Test that the connection to RabbitMQ has an authentication error.'''
pass
@mock.patch("health_check.contrib.rabbitmq.backends.getattr")
@mock.patch("health_check.contrib.rabbitmq.backends.Connection")
def test_broker_connection_upon_none_url(self, mocked_connection, mocked_getattr):
'''Thest when the connection to RabbitMQ has no ``broker_url``.'''
pass
| 10 | 4 | 20 | 5 | 10 | 5 | 1 | 0.46 | 0 | 3 | 1 | 0 | 3 | 0 | 3 | 3 | 71 | 17 | 37 | 16 | 27 | 17 | 31 | 13 | 27 | 1 | 0 | 0 | 3 |
143,914 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/rabbitmq/backends.py
|
health_check.contrib.rabbitmq.backends.RabbitMQHealthCheck
|
class RabbitMQHealthCheck(BaseHealthCheckBackend):
"""Health check for RabbitMQ."""
namespace = None
def check_status(self):
"""Check RabbitMQ service by opening and closing a broker channel."""
logger.debug("Checking for a broker_url on django settings...")
broker_url_setting_key = (
f"{self.namespace}_BROKER_URL" if self.namespace else "BROKER_URL"
)
broker_url = getattr(settings, broker_url_setting_key, None)
logger.debug("Got %s as the broker_url. Connecting to rabbit...", broker_url)
logger.debug("Attempting to connect to rabbit...")
try:
# conn is used as a context to release opened resources later
with Connection(broker_url) as conn:
conn.connect() # exceptions may be raised upon calling connect
except ConnectionRefusedError as e:
self.add_error(
ServiceUnavailable(
"Unable to connect to RabbitMQ: Connection was refused."
),
e,
)
except AccessRefused as e:
self.add_error(
ServiceUnavailable(
"Unable to connect to RabbitMQ: Authentication error."
),
e,
)
except IOError as e:
self.add_error(ServiceUnavailable("IOError"), e)
except BaseException as e:
self.add_error(ServiceUnavailable("Unknown error"), e)
else:
logger.debug("Connection established. RabbitMQ is healthy.")
|
class RabbitMQHealthCheck(BaseHealthCheckBackend):
'''Health check for RabbitMQ.'''
def check_status(self):
'''Check RabbitMQ service by opening and closing a broker channel.'''
pass
| 2 | 2 | 39 | 6 | 31 | 3 | 6 | 0.12 | 1 | 3 | 1 | 0 | 1 | 0 | 1 | 8 | 44 | 8 | 33 | 7 | 31 | 4 | 21 | 5 | 19 | 6 | 1 | 2 | 6 |
143,915 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_plugins.py
|
tests.test_plugins.TestPlugin
|
class TestPlugin:
@pytest.fixture(autouse=True)
def setup(self):
plugin_dir.reset()
plugin_dir.register(FakePlugin)
yield
plugin_dir.reset()
def test_register_plugin(self):
assert len(plugin_dir._registry) == 1
|
class TestPlugin:
@pytest.fixture(autouse=True)
def setup(self):
pass
def test_register_plugin(self):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 2 | 0 | 2 | 2 | 10 | 1 | 9 | 4 | 5 | 0 | 8 | 3 | 5 | 1 | 0 | 0 | 2 |
143,916 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView.test_error_param_json.JSONErrorBackend
|
class JSONErrorBackend(BaseHealthCheckBackend):
def run_check(self):
self.add_error("JSON Error")
|
class JSONErrorBackend(BaseHealthCheckBackend):
def run_check(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 8 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,917 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_redis.py
|
tests.test_redis.TestRedisHealthCheck
|
class TestRedisHealthCheck:
"""Test Redis health check."""
@mock.patch("health_check.contrib.redis.backends.getattr")
@mock.patch("health_check.contrib.redis.backends.from_url", autospec=True)
def test_redis_refused_connection(self, mocked_connection, mocked_getattr):
"""Test when the connection to Redis is refused."""
mocked_getattr.return_value = "redis_url"
# mock returns
mocked_connection.return_value = mock.MagicMock()
mocked_connection.return_value.__enter__.side_effect = ConnectionRefusedError(
"Refused connection"
)
# instantiates the class
redis_healthchecker = RedisHealthCheck()
# invokes the method check_status()
redis_healthchecker.check_status()
assert len(redis_healthchecker.errors), 1
# mock assertions
mocked_connection.assert_called_once_with("redis://localhost/1", **{})
@mock.patch("health_check.contrib.redis.backends.getattr")
@mock.patch("health_check.contrib.redis.backends.from_url")
def test_redis_timeout_error(self, mocked_connection, mocked_getattr):
"""Test Redis TimeoutError."""
mocked_getattr.return_value = "redis_url"
# mock returns
mocked_connection.return_value = mock.MagicMock()
mocked_connection.return_value.__enter__.side_effect = TimeoutError(
"Timeout Error"
)
# instantiates the class
redis_healthchecker = RedisHealthCheck()
# invokes the method check_status()
redis_healthchecker.check_status()
assert len(redis_healthchecker.errors), 1
# mock assertions
mocked_connection.assert_called_once_with("redis://localhost/1", **{})
@mock.patch("health_check.contrib.redis.backends.getattr")
@mock.patch("health_check.contrib.redis.backends.from_url")
def test_redis_con_limit_exceeded(self, mocked_connection, mocked_getattr):
"""Test Connection Limit Exceeded error."""
mocked_getattr.return_value = "redis_url"
# mock returns
mocked_connection.return_value = mock.MagicMock()
mocked_connection.return_value.__enter__.side_effect = ConnectionError(
"Connection Error"
)
# instantiates the class
redis_healthchecker = RedisHealthCheck()
# invokes the method check_status()
redis_healthchecker.check_status()
assert len(redis_healthchecker.errors), 1
# mock assertions
mocked_connection.assert_called_once_with("redis://localhost/1", **{})
@mock.patch("health_check.contrib.redis.backends.getattr")
@mock.patch("health_check.contrib.redis.backends.from_url")
def test_redis_conn_ok(self, mocked_connection, mocked_getattr):
"""Test everything is OK."""
mocked_getattr.return_value = "redis_url"
# mock returns
mocked_connection.return_value = mock.MagicMock()
mocked_connection.return_value.__enter__.side_effect = True
# instantiates the class
redis_healthchecker = RedisHealthCheck()
# invokes the method check_status()
redis_healthchecker.check_status()
assert len(redis_healthchecker.errors), 0
# mock assertions
mocked_connection.assert_called_once_with("redis://localhost/1", **{})
|
class TestRedisHealthCheck:
'''Test Redis health check.'''
@mock.patch("health_check.contrib.redis.backends.getattr")
@mock.patch("health_check.contrib.redis.backends.from_url", autospec=True)
def test_redis_refused_connection(self, mocked_connection, mocked_getattr):
'''Test when the connection to Redis is refused.'''
pass
@mock.patch("health_check.contrib.redis.backends.getattr")
@mock.patch("health_check.contrib.redis.backends.from_url")
def test_redis_timeout_error(self, mocked_connection, mocked_getattr):
'''Test Redis TimeoutError.'''
pass
@mock.patch("health_check.contrib.redis.backends.getattr")
@mock.patch("health_check.contrib.redis.backends.from_url")
def test_redis_con_limit_exceeded(self, mocked_connection, mocked_getattr):
'''Test Connection Limit Exceeded error.'''
pass
@mock.patch("health_check.contrib.redis.backends.getattr")
@mock.patch("health_check.contrib.redis.backends.from_url")
def test_redis_conn_ok(self, mocked_connection, mocked_getattr):
'''Test everything is OK.'''
pass
| 13 | 5 | 19 | 4 | 10 | 5 | 1 | 0.45 | 0 | 5 | 1 | 0 | 4 | 0 | 4 | 4 | 88 | 20 | 47 | 13 | 34 | 21 | 33 | 9 | 28 | 1 | 0 | 0 | 4 |
143,918 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/s3boto_storage/apps.py
|
health_check.contrib.s3boto_storage.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
name = "health_check.contrib.s3boto_storage"
def ready(self):
from .backends import S3BotoStorageHealthCheck
plugin_dir.register(S3BotoStorageHealthCheck)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 4 | 1 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 7 | 2 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 1 | 0 | 1 |
143,919 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/s3boto_storage/backends.py
|
health_check.contrib.s3boto_storage.backends.S3BotoStorageHealthCheck
|
class S3BotoStorageHealthCheck(StorageHealthCheck):
"""
Tests the status of a `S3BotoStorage` file storage backend.
S3BotoStorage is included in the `django-storages` package
and recommended by for example Amazon and Heroku for Django
static and media file storage on cloud platforms.
``django-storages`` can be found at https://git.io/v1lGx
``S3BotoStorage`` can be found at https://git.io/v1lGF
"""
logger = logging.getLogger(__name__)
storage = "storages.backends.s3boto.S3BotoStorage"
def check_delete(self, file_name):
storage = self.get_storage()
storage.delete(file_name)
|
class S3BotoStorageHealthCheck(StorageHealthCheck):
'''
Tests the status of a `S3BotoStorage` file storage backend.
S3BotoStorage is included in the `django-storages` package
and recommended by for example Amazon and Heroku for Django
static and media file storage on cloud platforms.
``django-storages`` can be found at https://git.io/v1lGx
``S3BotoStorage`` can be found at https://git.io/v1lGF
'''
def check_delete(self, file_name):
pass
| 2 | 1 | 3 | 0 | 3 | 0 | 1 | 1.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 14 | 18 | 4 | 6 | 5 | 4 | 8 | 6 | 5 | 4 | 1 | 2 | 0 | 1 |
143,920 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_storage.py
|
tests.test_storage.HealthCheckStorageTests
|
class HealthCheckStorageTests(TestCase):
"""
Tests health check behavior with a mocked storage backend.
Ensures check_status returns/raises the expected result when the storage works or raises exceptions.
"""
def test_get_storage(self):
"""Test get_storage method returns None on the base class, but a Storage instance on default."""
base_storage = StorageHealthCheck()
self.assertIsNone(base_storage.get_storage())
default_storage = DefaultFileStorageHealthCheck()
self.assertIsInstance(default_storage.get_storage(), Storage)
@unittest.skipUnless((4, 2) <= django.VERSION < (5, 0), "Only for Django 4.2 - 5.0")
def test_get_storage_django_between_42_and_50(self):
"""Check that the old DEFAULT_FILE_STORAGE setting keeps being supported."""
# Note: this test doesn't work on Django<4.2 because the setting value is
# evaluated when the class attribute DefaultFileStorageHealthCheck.store is
# read, which is at import time, before we can mock the setting.
with self.settings(DEFAULT_FILE_STORAGE="tests.test_storage.CustomStorage"):
default_storage = DefaultFileStorageHealthCheck()
self.assertIsInstance(default_storage.get_storage(), CustomStorage)
@unittest.skipUnless((4, 2) <= django.VERSION, "Django 4.2+ required")
def test_get_storage_django_42_plus(self):
"""Check that the new STORAGES setting is supported."""
with self.settings(
STORAGES={"default": {"BACKEND": "tests.test_storage.CustomStorage"}}
):
default_storage = DefaultFileStorageHealthCheck()
self.assertIsInstance(default_storage.get_storage(), CustomStorage)
@mock.patch(
"health_check.storage.backends.DefaultFileStorageHealthCheck.storage",
MockStorage(),
)
def test_check_status_working(self):
"""Test check_status returns True when storage is working properly."""
default_storage_health = DefaultFileStorageHealthCheck()
default_storage = default_storage_health.get_storage()
default_storage_open = "{}.{}.open".format(
default_storage.__module__, default_storage.__class__.__name__
)
with mock.patch(
default_storage_open,
mock.mock_open(
read_data=default_storage_health.get_file_content()),
):
self.assertTrue(default_storage_health.check_status())
@unittest.skipUnless(django.VERSION <= (4, 1), "Only for Django 4.1 and earlier")
@mock.patch(
"health_check.storage.backends.DefaultFileStorageHealthCheck.storage",
MockStorage(saves=False),
)
def test_file_does_not_exist_django_41_earlier(self):
"""Test check_status raises ServiceUnavailable when file is not saved."""
default_storage_health = DefaultFileStorageHealthCheck()
with self.assertRaises(ServiceUnavailable):
default_storage_health.check_status()
@unittest.skipUnless((4, 2) <= django.VERSION, "Only for Django 4.2+")
@mock.patch(
"health_check.storage.backends.storages",
{"default": MockStorage(saves=False)},
)
def test_file_does_not_exist_django_42_plus(self):
"""Test check_status raises ServiceUnavailable when file is not saved."""
default_storage_health = DefaultFileStorageHealthCheck()
with self.assertRaises(ServiceUnavailable):
default_storage_health.check_status()
@unittest.skipUnless(django.VERSION <= (4, 1), "Only for Django 4.1 and earlier")
@mock.patch(
"health_check.storage.backends.DefaultFileStorageHealthCheck.storage",
MockStorage(deletes=False),
)
def test_file_not_deleted_django_41_earlier(self):
"""Test check_status raises ServiceUnavailable when file is not deleted."""
default_storage_health = DefaultFileStorageHealthCheck()
with self.assertRaises(ServiceUnavailable):
default_storage_health.check_status()
@unittest.skipUnless((4, 2) <= django.VERSION, "Only for Django 4.2+")
@mock.patch(
"health_check.storage.backends.storages",
{"default": MockStorage(deletes=False)},
)
def test_file_not_deleted_django_42_plus(self):
"""Test check_status raises ServiceUnavailable when file is not deleted."""
default_storage_health = DefaultFileStorageHealthCheck()
with self.assertRaises(ServiceUnavailable):
default_storage_health.check_status()
|
class HealthCheckStorageTests(TestCase):
'''
Tests health check behavior with a mocked storage backend.
Ensures check_status returns/raises the expected result when the storage works or raises exceptions.
'''
def test_get_storage(self):
'''Test get_storage method returns None on the base class, but a Storage instance on default.'''
pass
@unittest.skipUnless((4, 2) <= django.VERSION < (5, 0), "Only for Django 4.2 - 5.0")
def test_get_storage_django_between_42_and_50(self):
'''Check that the old DEFAULT_FILE_STORAGE setting keeps being supported.'''
pass
@unittest.skipUnless((4, 2) <= django.VERSION, "Django 4.2+ required")
def test_get_storage_django_42_plus(self):
'''Check that the new STORAGES setting is supported.'''
pass
@mock.patch(
"health_check.storage.backends.DefaultFileStorageHealthCheck.storage",
MockStorage(),
)
def test_check_status_working(self):
'''Test check_status returns True when storage is working properly.'''
pass
@unittest.skipUnless(django.VERSION <= (4, 1), "Only for Django 4.1 and earlier")
@mock.patch(
"health_check.storage.backends.DefaultFileStorageHealthCheck.storage",
MockStorage(saves=False),
)
def test_file_does_not_exist_django_41_earlier(self):
'''Test check_status raises ServiceUnavailable when file is not saved.'''
pass
@unittest.skipUnless((4, 2) <= django.VERSION, "Only for Django 4.2+")
@mock.patch(
"health_check.storage.backends.storages",
{"default": MockStorage(saves=False)},
)
def test_file_does_not_exist_django_42_plus(self):
'''Test check_status raises ServiceUnavailable when file is not saved.'''
pass
@unittest.skipUnless(django.VERSION <= (4, 1), "Only for Django 4.1 and earlier")
@mock.patch(
"health_check.storage.backends.DefaultFileStorageHealthCheck.storage",
MockStorage(deletes=False),
)
def test_file_not_deleted_django_41_earlier(self):
'''Test check_status raises ServiceUnavailable when file is not deleted.'''
pass
@unittest.skipUnless((4, 2) <= django.VERSION, "Only for Django 4.2+")
@mock.patch(
"health_check.storage.backends.storages",
{"default": MockStorage(deletes=False)},
)
def test_file_not_deleted_django_42_plus(self):
'''Test check_status raises ServiceUnavailable when file is not deleted.'''
pass
| 20 | 9 | 7 | 1 | 5 | 1 | 1 | 0.22 | 1 | 4 | 4 | 0 | 8 | 0 | 8 | 8 | 96 | 12 | 69 | 30 | 34 | 15 | 36 | 20 | 27 | 1 | 1 | 1 | 8 |
143,921 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView.test_warning.MyBackend
|
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
raise ServiceWarning("so so")
|
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 8 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,922 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView.test_error.MyBackend
|
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
self.add_error("Super Fail!")
|
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 8 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,923 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView.test_success_unsupported_and_supported_accept.SuccessBackend
|
class SuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
|
class SuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 8 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,924 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView.test_success_subset_define.SuccessTwoBackend
|
class SuccessTwoBackend(BaseHealthCheckBackend):
def run_check(self):
pass
|
class SuccessTwoBackend(BaseHealthCheckBackend):
def run_check(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 8 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,925 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_mixins.py
|
tests.test_mixins.TestCheckMixin
|
class TestCheckMixin:
@pytest.fixture(autouse=True)
def setup(self):
plugin_dir.reset()
plugin_dir.register(FailPlugin)
plugin_dir.register(OkPlugin)
yield
plugin_dir.reset()
@pytest.mark.parametrize("disable_threading", [(True,), (False,)])
def test_plugins(self, monkeypatch, disable_threading):
monkeypatch.setitem(
HEALTH_CHECK, "DISABLE_THREADING", disable_threading)
assert len(Checker().plugins) == 2
@pytest.mark.parametrize("disable_threading", [(True,), (False,)])
def test_errors(self, monkeypatch, disable_threading):
monkeypatch.setitem(
HEALTH_CHECK, "DISABLE_THREADING", disable_threading)
assert len(Checker().errors) == 1
@pytest.mark.parametrize("disable_threading", [(True,), (False,)])
def test_run_check(self, monkeypatch, disable_threading):
monkeypatch.setitem(
HEALTH_CHECK, "DISABLE_THREADING", disable_threading)
assert len(Checker().run_check()) == 1
def test_run_check_threading_enabled(self, monkeypatch):
"""Ensure threading used when not disabled."""
# Ensure threading is enabled.
monkeypatch.setitem(HEALTH_CHECK, "DISABLE_THREADING", False)
# Ensure ThreadPoolExecutor is used
with patch("health_check.mixins.ThreadPoolExecutor") as tpe:
Checker().run_check()
tpe.assert_called()
def test_run_check_threading_disabled(self, monkeypatch):
"""Ensure threading not used when disabled."""
# Ensure threading is disabled.
monkeypatch.setitem(HEALTH_CHECK, "DISABLE_THREADING", True)
# Ensure ThreadPoolExecutor is not used
with patch("health_check.mixins.ThreadPoolExecutor") as tpe:
Checker().run_check()
tpe.assert_not_called()
|
class TestCheckMixin:
@pytest.fixture(autouse=True)
def setup(self):
pass
@pytest.mark.parametrize("disable_threading", [(True,), (False,)])
def test_plugins(self, monkeypatch, disable_threading):
pass
@pytest.mark.parametrize("disable_threading", [(True,), (False,)])
def test_errors(self, monkeypatch, disable_threading):
pass
@pytest.mark.parametrize("disable_threading", [(True,), (False,)])
def test_run_check(self, monkeypatch, disable_threading):
pass
def test_run_check_threading_enabled(self, monkeypatch):
'''Ensure threading used when not disabled.'''
pass
def test_run_check_threading_disabled(self, monkeypatch):
'''Ensure threading not used when disabled.'''
pass
| 11 | 2 | 6 | 1 | 4 | 1 | 1 | 0.2 | 0 | 3 | 3 | 0 | 6 | 0 | 6 | 6 | 48 | 12 | 30 | 13 | 19 | 6 | 26 | 7 | 19 | 1 | 0 | 1 | 6 |
143,926 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView.test_success_subset_define.SuccessOneBackend
|
class SuccessOneBackend(BaseHealthCheckBackend):
def run_check(self):
pass
|
class SuccessOneBackend(BaseHealthCheckBackend):
def run_check(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 8 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,927 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView.test_success_prefer_json.JSONSuccessBackend
|
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
|
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 8 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,928 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_storage.py
|
tests.test_storage.HealthCheckS3Boto3StorageTests
|
class HealthCheckS3Boto3StorageTests(TestCase):
"""
Tests health check behavior with a mocked S3Boto3Storage backend.
"""
@unittest.skipUnless(django.VERSION <= (4, 1), "Only for Django 4.1 and earlier")
@mock.patch(
"storages.backends.s3boto3.S3Boto3Storage",
MockS3Boto3Storage(deletes=False),
)
def test_check_delete_success_django_41_earlier(self):
"""Test that check_delete correctly deletes a file when S3Boto3Storage is working."""
health_check = S3Boto3StorageHealthCheck()
mock_storage = health_check.get_storage()
file_name = "testfile.txt"
content = BytesIO(b"Test content")
mock_storage.save(file_name, content)
with self.assertRaises(ServiceUnavailable):
health_check.check_delete(file_name)
@unittest.skipUnless((4, 2) <= django.VERSION, "Only for Django 4.2+")
def test_check_delete_success(self):
"""Test that check_delete correctly deletes a file when S3Boto3Storage is working."""
health_check = S3Boto3StorageHealthCheck()
mock_storage = health_check.get_storage()
file_name = "testfile.txt"
content = BytesIO(b"Test content")
mock_storage.save(file_name, content)
health_check.check_delete(file_name)
self.assertFalse(mock_storage.exists(file_name))
def test_check_delete_failure(self):
"""Test that check_delete raises ServiceUnavailable when deletion fails."""
with mock.patch.object(
MockS3Boto3Storage,
"delete",
side_effect=Exception("Failed to delete file."),
):
health_check = S3Boto3StorageHealthCheck()
with self.assertRaises(ServiceUnavailable):
health_check.check_delete("testfile.txt")
@unittest.skipUnless(django.VERSION <= (4, 1), "Only for Django 4.1 and earlier")
@mock.patch(
"storages.backends.s3boto3.S3Boto3Storage",
MockS3Boto3Storage(deletes=False),
)
def test_check_status_working_django_41_earlier(self):
"""Test check_status returns True when S3Boto3Storage can save and delete files."""
health_check = S3Boto3StorageHealthCheck()
with self.assertRaises(ServiceUnavailable):
self.assertTrue(health_check.check_status())
@unittest.skipUnless((4, 2) <= django.VERSION, "Only for Django 4.2+")
def test_check_status_working(self):
"""Test check_status returns True when S3Boto3Storage can save and delete files."""
health_check = S3Boto3StorageHealthCheck()
self.assertTrue(health_check.check_status())
def test_check_status_failure_on_save(self):
"""Test check_status raises ServiceUnavailable when file cannot be saved."""
with mock.patch.object(
MockS3Boto3Storage, "save", side_effect=Exception("Failed to save file.")
):
health_check = S3Boto3StorageHealthCheck()
with self.assertRaises(ServiceUnavailable):
health_check.check_status()
def test_check_status_failure_on_delete(self):
"""Test check_status raises ServiceUnavailable when file cannot be deleted."""
with mock.patch.object(
MockS3Boto3Storage, "exists", new_callable=mock.PropertyMock
) as mock_exists:
mock_exists.return_value = False
health_check = S3Boto3StorageHealthCheck()
with self.assertRaises(ServiceUnavailable):
health_check.check_status()
|
class HealthCheckS3Boto3StorageTests(TestCase):
'''
Tests health check behavior with a mocked S3Boto3Storage backend.
'''
@unittest.skipUnless(django.VERSION <= (4, 1), "Only for Django 4.1 and earlier")
@mock.patch(
"storages.backends.s3boto3.S3Boto3Storage",
MockS3Boto3Storage(deletes=False),
)
def test_check_delete_success_django_41_earlier(self):
'''Test that check_delete correctly deletes a file when S3Boto3Storage is working.'''
pass
@unittest.skipUnless((4, 2) <= django.VERSION, "Only for Django 4.2+")
def test_check_delete_success_django_41_earlier(self):
'''Test that check_delete correctly deletes a file when S3Boto3Storage is working.'''
pass
def test_check_delete_failure(self):
'''Test that check_delete raises ServiceUnavailable when deletion fails.'''
pass
@unittest.skipUnless(django.VERSION <= (4, 1), "Only for Django 4.1 and earlier")
@mock.patch(
"storages.backends.s3boto3.S3Boto3Storage",
MockS3Boto3Storage(deletes=False),
)
def test_check_status_working_django_41_earlier(self):
'''Test check_status returns True when S3Boto3Storage can save and delete files.'''
pass
@unittest.skipUnless((4, 2) <= django.VERSION, "Only for Django 4.2+")
def test_check_status_working_django_41_earlier(self):
'''Test check_status returns True when S3Boto3Storage can save and delete files.'''
pass
def test_check_status_failure_on_save(self):
'''Test check_status raises ServiceUnavailable when file cannot be saved.'''
pass
def test_check_status_failure_on_delete(self):
'''Test check_status raises ServiceUnavailable when file cannot be deleted.'''
pass
| 14 | 8 | 8 | 0 | 7 | 1 | 1 | 0.17 | 1 | 5 | 3 | 0 | 7 | 0 | 7 | 7 | 79 | 9 | 60 | 26 | 40 | 10 | 40 | 21 | 32 | 1 | 1 | 2 | 7 |
143,929 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMediaType
|
class TestMediaType:
def test_lt(self):
assert not MediaType("*/*") < MediaType("*/*")
assert not MediaType("*/*") < MediaType("*/*", 0.9)
assert MediaType("*/*", 0.9) < MediaType("*/*")
def test_str(self):
assert str(MediaType("*/*")) == "*/*; q=1.0"
assert str(MediaType("image/*", 0.6)) == "image/*; q=0.6"
def test_repr(self):
assert repr(MediaType("*/*")) == "MediaType: */*; q=1.0"
def test_eq(self):
assert MediaType("*/*") == MediaType("*/*")
assert MediaType("*/*", 0.9) != MediaType("*/*")
valid_strings = [
("*/*", MediaType("*/*")),
("*/*; q=0.9", MediaType("*/*", 0.9)),
("*/*; q=0", MediaType("*/*", 0.0)),
("*/*; q=0.0", MediaType("*/*", 0.0)),
("*/*; q=0.1", MediaType("*/*", 0.1)),
("*/*; q=0.12", MediaType("*/*", 0.12)),
("*/*; q=0.123", MediaType("*/*", 0.123)),
("*/*; q=1.000", MediaType("*/*", 1.0)),
("*/*; q=1", MediaType("*/*", 1.0)),
("*/*;q=0.9", MediaType("*/*", 0.9)),
("*/* ;q=0.9", MediaType("*/*", 0.9)),
("*/* ; q=0.9", MediaType("*/*", 0.9)),
("*/* ; q=0.9", MediaType("*/*", 0.9)),
("*/*;v=b3", MediaType("*/*")),
("*/*; q=0.5; v=b3", MediaType("*/*", 0.5)),
]
@pytest.mark.parametrize("type, expected", valid_strings)
def test_from_valid_strings(self, type, expected):
assert MediaType.from_string(type) == expected
invalid_strings = [
"*/*;0.9",
'text/html;z=""',
"text/html; xxx",
"text/html; =a",
]
@pytest.mark.parametrize("type", invalid_strings)
def test_from_invalid_strings(self, type):
with pytest.raises(ValueError) as e:
MediaType.from_string(type)
expected_error = '"%s" is not a valid media type' % type
assert expected_error in str(e.value)
def test_parse_header(self):
assert list(MediaType.parse_header()) == [
MediaType("*/*"),
]
assert list(
MediaType.parse_header(
"text/html; q=0.1, application/xhtml+xml; q=0.1 ,application/json"
)
) == [
MediaType("application/json"),
MediaType("text/html", 0.1),
MediaType("application/xhtml+xml", 0.1),
]
|
class TestMediaType:
def test_lt(self):
pass
def test_str(self):
pass
def test_repr(self):
pass
def test_eq(self):
pass
@pytest.mark.parametrize("type, expected", valid_strings)
def test_from_valid_strings(self, type, expected):
pass
@pytest.mark.parametrize("type", invalid_strings)
def test_from_invalid_strings(self, type):
pass
def test_parse_header(self):
pass
| 10 | 0 | 5 | 0 | 5 | 0 | 1 | 0 | 0 | 4 | 1 | 0 | 7 | 0 | 7 | 7 | 66 | 8 | 58 | 14 | 48 | 0 | 25 | 11 | 17 | 1 | 0 | 1 | 7 |
143,930 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/tests/test_mixins.py
|
tests.test_mixins.Checker
|
class Checker(CheckMixin):
pass
|
class Checker(CheckMixin):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
143,931 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/psutil/backends.py
|
health_check.contrib.psutil.backends.MemoryUsage
|
class MemoryUsage(BaseHealthCheckBackend):
def check_status(self):
try:
memory = psutil.virtual_memory()
if MEMORY_MIN and memory.available < (MEMORY_MIN * 1024 * 1024):
locale.setlocale(locale.LC_ALL, "")
avail = "{:n}".format(int(memory.available / 1024 / 1024))
threshold = "{:n}".format(MEMORY_MIN)
raise ServiceWarning(
"{host} {avail} MB available RAM below {threshold} MB".format(
host=host, avail=avail, threshold=threshold
)
)
except ValueError as e:
self.add_error(ServiceReturnedUnexpectedResult("ValueError"), e)
|
class MemoryUsage(BaseHealthCheckBackend):
def check_status(self):
pass
| 2 | 0 | 14 | 0 | 14 | 0 | 3 | 0 | 1 | 4 | 2 | 0 | 1 | 0 | 1 | 8 | 15 | 0 | 15 | 6 | 13 | 0 | 11 | 5 | 9 | 3 | 1 | 2 | 3 |
143,932 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/tests/test_plugins.py
|
tests.test_plugins.Plugin
|
class Plugin(BaseHealthCheckBackend):
def check_status(self):
pass
|
class Plugin(BaseHealthCheckBackend):
def check_status(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 8 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
143,933 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/tests/test_storage.py
|
tests.test_storage.CustomStorage
|
class CustomStorage(Storage):
pass
|
class CustomStorage(Storage):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
143,934 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/tests/test_storage.py
|
tests.test_storage.MockS3Boto3Storage
|
class MockS3Boto3Storage:
"""
A mock S3Boto3Storage backend to simulate interactions with AWS S3.
"""
def __init__(self, saves=True, deletes=True):
self.saves = saves
self.deletes = deletes
self.files = {}
def open(self, name, mode="rb"):
"""
Simulates opening a file from the mocked S3 storage.
For simplicity, this doesn't differentiate between read and write modes.
"""
if name in self.files:
# Assuming file content is stored as bytes
file_content = self.files[name]
if isinstance(file_content, bytes):
return File(BytesIO(file_content))
else:
raise ValueError("File content must be bytes.")
else:
raise FileNotFoundError(f"The file {name} does not exist.")
def save(self, name, content):
"""
Overriding save to ensure content is stored as bytes in a way compatible with open method.
Assumes content is either a ContentFile, bytes, or a string that needs conversion.
"""
if self.saves:
# Check if content is a ContentFile or similar and read bytes
if hasattr(content, "read"):
file_content = content.read()
elif isinstance(content, bytes):
file_content = content
elif isinstance(content, str):
file_content = content.encode() # Convert string to bytes
else:
raise ValueError("Unsupported file content type.")
self.files[name] = file_content
return name
raise Exception("Failed to save file.")
def delete(self, name):
if self.deletes:
self.files.pop(name, None)
else:
raise Exception("Failed to delete file.")
def exists(self, name):
return name in self.files
|
class MockS3Boto3Storage:
'''
A mock S3Boto3Storage backend to simulate interactions with AWS S3.
'''
def __init__(self, saves=True, deletes=True):
pass
def open(self, name, mode="rb"):
'''
Simulates opening a file from the mocked S3 storage.
For simplicity, this doesn't differentiate between read and write modes.
'''
pass
def save(self, name, content):
'''
Overriding save to ensure content is stored as bytes in a way compatible with open method.
Assumes content is either a ContentFile, bytes, or a string that needs conversion.
'''
pass
def delete(self, name):
pass
def exists(self, name):
pass
| 6 | 3 | 9 | 0 | 7 | 2 | 2 | 0.41 | 0 | 5 | 0 | 0 | 5 | 3 | 5 | 5 | 53 | 6 | 34 | 11 | 28 | 14 | 28 | 11 | 22 | 5 | 0 | 2 | 12 |
143,935 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/tests/test_storage.py
|
tests.test_storage.MockStorage
|
class MockStorage(Storage):
"""
A Mock Storage backend used for testing.
saves - Determines whether save will mock a successful or unsuccessful save
deletes - Determines whether save will mock a successful or unsuccessful deletion
"""
MOCK_FILE_COUNT = 0
saves = None
deletes = None
def __init__(self, saves=True, deletes=True):
super(MockStorage, self).__init__()
self.MOCK_FILE_COUNT = 0
self.saves = saves
self.deletes = deletes
def exists(self, file_name):
return self.MOCK_FILE_COUNT != 0
def delete(self, name):
if self.deletes:
self.MOCK_FILE_COUNT -= 1
def save(self, name, content, max_length=None):
if self.saves:
self.MOCK_FILE_COUNT += 1
|
class MockStorage(Storage):
'''
A Mock Storage backend used for testing.
saves - Determines whether save will mock a successful or unsuccessful save
deletes - Determines whether save will mock a successful or unsuccessful deletion
'''
def __init__(self, saves=True, deletes=True):
pass
def exists(self, file_name):
pass
def delete(self, name):
pass
def save(self, name, content, max_length=None):
pass
| 5 | 1 | 3 | 0 | 3 | 0 | 2 | 0.29 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 4 | 27 | 5 | 17 | 8 | 12 | 5 | 17 | 8 | 12 | 2 | 1 | 1 | 6 |
143,936 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/tests/test_views.py
|
tests.test_views.TestMainView
|
class TestMainView:
url = reverse("health_check:health_check_home")
def test_success(self, client):
response = client.get(self.url)
assert response.status_code == 200, response.content.decode("utf-8")
assert response["content-type"] == "text/html; charset=utf-8"
def test_error(self, client):
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
self.add_error("Super Fail!")
plugin_dir.reset()
plugin_dir.register(MyBackend)
response = client.get(self.url)
assert response.status_code == 500, response.content.decode("utf-8")
assert response["content-type"] == "text/html; charset=utf-8"
assert b"Super Fail!" in response.content
def test_warning(self, client):
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
raise ServiceWarning("so so")
plugin_dir.reset()
plugin_dir.register(MyBackend)
response = client.get(self.url)
assert response.status_code == 500, response.content.decode("utf-8")
assert b"so so" in response.content, response.content
HEALTH_CHECK["WARNINGS_AS_ERRORS"] = False
response = client.get(self.url)
assert response.status_code == 200, response.content.decode("utf-8")
assert response["content-type"] == "text/html; charset=utf-8"
assert b"so so" in response.content, response.content
def test_non_critical(self, client):
class MyBackend(BaseHealthCheckBackend):
critical_service = False
def check_status(self):
self.add_error("Super Fail!")
plugin_dir.reset()
plugin_dir.register(MyBackend)
response = client.get(self.url)
assert response.status_code == 200, response.content.decode("utf-8")
assert response["content-type"] == "text/html; charset=utf-8"
assert b"Super Fail!" in response.content
def test_success_accept_json(self, client):
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(JSONSuccessBackend)
response = client.get(self.url, HTTP_ACCEPT="application/json")
assert response["content-type"] == "application/json"
assert response.status_code == 200
def test_success_prefer_json(self, client):
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(JSONSuccessBackend)
response = client.get(
self.url, HTTP_ACCEPT="application/json; q=0.8, text/html; q=0.5"
)
assert response["content-type"] == "application/json"
assert response.status_code == 200
def test_success_accept_xhtml(self, client):
class SuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(SuccessBackend)
response = client.get(self.url, HTTP_ACCEPT="application/xhtml+xml")
assert response["content-type"] == "text/html; charset=utf-8"
assert response.status_code == 200
def test_success_unsupported_accept(self, client):
class SuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(SuccessBackend)
response = client.get(self.url, HTTP_ACCEPT="application/octet-stream")
assert response["content-type"] == "text/plain"
assert response.status_code == 406
assert (
response.content
== b"Not Acceptable: Supported content types: text/html, application/json"
)
def test_success_unsupported_and_supported_accept(self, client):
class SuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(SuccessBackend)
response = client.get(
self.url, HTTP_ACCEPT="application/octet-stream, application/json; q=0.9"
)
assert response["content-type"] == "application/json"
assert response.status_code == 200
def test_success_accept_order(self, client):
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(JSONSuccessBackend)
response = client.get(
self.url,
HTTP_ACCEPT="text/html, application/xhtml+xml, application/json; q=0.9, */*; q=0.1",
)
assert response["content-type"] == "text/html; charset=utf-8"
assert response.status_code == 200
def test_success_accept_order__reverse(self, client):
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(JSONSuccessBackend)
response = client.get(
self.url,
HTTP_ACCEPT="text/html; q=0.1, application/xhtml+xml; q=0.1, application/json",
)
assert response["content-type"] == "application/json"
assert response.status_code == 200
def test_format_override(self, client):
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(JSONSuccessBackend)
response = client.get(self.url + "?format=json", HTTP_ACCEPT="text/html")
assert response["content-type"] == "application/json"
assert response.status_code == 200
def test_format_no_accept_header(self, client):
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(JSONSuccessBackend)
response = client.get(self.url)
assert response.status_code == 200, response.content.decode("utf-8")
assert response["content-type"] == "text/html; charset=utf-8"
def test_error_accept_json(self, client):
class JSONErrorBackend(BaseHealthCheckBackend):
def run_check(self):
self.add_error("JSON Error")
plugin_dir.reset()
plugin_dir.register(JSONErrorBackend)
response = client.get(self.url, HTTP_ACCEPT="application/json")
assert response.status_code == 500, response.content.decode("utf-8")
assert response["content-type"] == "application/json"
assert (
"JSON Error"
in json.loads(response.content.decode("utf-8"))[
JSONErrorBackend().identifier()
]
)
def test_success_param_json(self, client):
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(JSONSuccessBackend)
response = client.get(self.url, {"format": "json"})
assert response.status_code == 200, response.content.decode("utf-8")
assert response["content-type"] == "application/json"
assert json.loads(response.content.decode("utf-8")) == {
JSONSuccessBackend().identifier(): JSONSuccessBackend().pretty_status()
}
def test_success_subset_define(self, client):
class SuccessOneBackend(BaseHealthCheckBackend):
def run_check(self):
pass
class SuccessTwoBackend(BaseHealthCheckBackend):
def run_check(self):
pass
plugin_dir.reset()
plugin_dir.register(SuccessOneBackend)
plugin_dir.register(SuccessTwoBackend)
HEALTH_CHECK["SUBSETS"] = {
"startup-probe": ["SuccessOneBackend", "SuccessTwoBackend"],
"liveness-probe": ["SuccessTwoBackend"],
}
response_startup_probe = client.get(
self.url + "startup-probe/", {"format": "json"}
)
assert (
response_startup_probe.status_code == 200
), response_startup_probe.content.decode("utf-8")
assert response_startup_probe["content-type"] == "application/json"
assert json.loads(response_startup_probe.content.decode("utf-8")) == {
SuccessOneBackend().identifier(): SuccessOneBackend().pretty_status(),
SuccessTwoBackend().identifier(): SuccessTwoBackend().pretty_status(),
}
response_liveness_probe = client.get(
self.url + "liveness-probe/", {"format": "json"}
)
assert (
response_liveness_probe.status_code == 200
), response_liveness_probe.content.decode("utf-8")
assert response_liveness_probe["content-type"] == "application/json"
assert json.loads(response_liveness_probe.content.decode("utf-8")) == {
SuccessTwoBackend().identifier(): SuccessTwoBackend().pretty_status(),
}
def test_error_subset_not_found(self, client):
plugin_dir.reset()
response = client.get(self.url + "liveness-probe/", {"format": "json"})
print(f"content: {response.content}")
print(f"code: {response.status_code}")
assert response.status_code == 404, response.content.decode("utf-8")
def test_error_param_json(self, client):
class JSONErrorBackend(BaseHealthCheckBackend):
def run_check(self):
self.add_error("JSON Error")
plugin_dir.reset()
plugin_dir.register(JSONErrorBackend)
response = client.get(self.url, {"format": "json"})
assert response.status_code == 500, response.content.decode("utf-8")
assert response["content-type"] == "application/json"
assert (
"JSON Error"
in json.loads(response.content.decode("utf-8"))[
JSONErrorBackend().identifier()
]
)
|
class TestMainView:
def test_success(self, client):
pass
def test_error(self, client):
pass
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
pass
def test_warning(self, client):
pass
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
pass
def test_non_critical(self, client):
pass
class MyBackend(BaseHealthCheckBackend):
def check_status(self):
pass
def test_success_accept_json(self, client):
pass
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_success_prefer_json(self, client):
pass
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_success_accept_xhtml(self, client):
pass
class SuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_success_unsupported_accept(self, client):
pass
class SuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_success_unsupported_and_supported_accept(self, client):
pass
class SuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_success_accept_order(self, client):
pass
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_success_accept_order__reverse(self, client):
pass
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_format_override(self, client):
pass
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_format_no_accept_header(self, client):
pass
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_error_accept_json(self, client):
pass
class JSONErrorBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_success_param_json(self, client):
pass
class JSONSuccessBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_success_subset_define(self, client):
pass
class SuccessOneBackend(BaseHealthCheckBackend):
def run_check(self):
pass
class SuccessTwoBackend(BaseHealthCheckBackend):
def run_check(self):
pass
def test_error_subset_not_found(self, client):
pass
def test_error_param_json(self, client):
pass
class JSONErrorBackend(BaseHealthCheckBackend):
def run_check(self):
pass
| 53 | 0 | 8 | 1 | 7 | 0 | 1 | 0 | 0 | 17 | 17 | 0 | 18 | 0 | 18 | 18 | 260 | 41 | 219 | 74 | 166 | 0 | 178 | 74 | 125 | 1 | 0 | 0 | 35 |
143,937 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/cache/apps.py
|
health_check.cache.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
name = "health_check.cache"
def ready(self):
from .backends import CacheBackend
for backend in settings.CACHES:
plugin_dir.register(CacheBackend, backend=backend)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 5 | 1 | 4 | 0 | 2 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 8 | 2 | 6 | 5 | 3 | 0 | 6 | 5 | 3 | 2 | 1 | 1 | 2 |
143,938 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/cache/backends.py
|
health_check.cache.backends.CacheBackend
|
class CacheBackend(BaseHealthCheckBackend):
def __init__(self, backend="default"):
super().__init__()
self.backend = backend
self.cache_key = getattr(
settings, "HEALTHCHECK_CACHE_KEY", "djangohealthcheck_test"
)
def identifier(self):
return f"Cache backend: {self.backend}"
def check_status(self):
cache = caches[self.backend]
try:
cache.set(self.cache_key, "itworks")
if not cache.get(self.cache_key) == "itworks":
raise ServiceUnavailable(f"Cache key {self.cache_key} does not match")
except CacheKeyWarning as e:
self.add_error(ServiceReturnedUnexpectedResult("Cache key warning"), e)
except ValueError as e:
self.add_error(ServiceReturnedUnexpectedResult("ValueError"), e)
except (ConnectionError, RedisError) as e:
self.add_error(ServiceReturnedUnexpectedResult("Connection Error"), e)
|
class CacheBackend(BaseHealthCheckBackend):
def __init__(self, backend="default"):
pass
def identifier(self):
pass
def check_status(self):
pass
| 4 | 0 | 7 | 0 | 7 | 0 | 2 | 0 | 1 | 5 | 2 | 0 | 3 | 2 | 3 | 10 | 24 | 3 | 21 | 8 | 17 | 0 | 19 | 7 | 15 | 5 | 1 | 2 | 7 |
143,939 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/celery/apps.py
|
health_check.contrib.celery.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
name = "health_check.contrib.celery"
def ready(self):
from .backends import CeleryHealthCheck
if hasattr(settings, "HEALTHCHECK_CELERY_TIMEOUT"):
warnings.warn(
"HEALTHCHECK_CELERY_TIMEOUT is deprecated and may be removed in the "
"future. Please use HEALTHCHECK_CELERY_RESULT_TIMEOUT and "
"HEALTHCHECK_CELERY_QUEUE_TIMEOUT instead.",
DeprecationWarning,
)
for queue in current_app.amqp.queues:
celery_class_name = "CeleryHealthCheck" + queue.title()
celery_class = type(
celery_class_name, (CeleryHealthCheck,), {"queue": queue}
)
plugin_dir.register(celery_class)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 18 | 3 | 15 | 0 | 3 | 0 | 1 | 3 | 1 | 0 | 1 | 0 | 1 | 1 | 21 | 4 | 17 | 7 | 14 | 0 | 10 | 7 | 7 | 3 | 1 | 1 | 3 |
143,940 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/celery/backends.py
|
health_check.contrib.celery.backends.CeleryHealthCheck
|
class CeleryHealthCheck(BaseHealthCheckBackend):
def check_status(self):
timeout = getattr(settings, "HEALTHCHECK_CELERY_TIMEOUT", 3)
result_timeout = getattr(settings, "HEALTHCHECK_CELERY_RESULT_TIMEOUT", timeout)
queue_timeout = getattr(settings, "HEALTHCHECK_CELERY_QUEUE_TIMEOUT", timeout)
priority = getattr(settings, "HEALTHCHECK_CELERY_PRIORITY", None)
try:
result = add.apply_async(
args=[4, 4], expires=queue_timeout, queue=self.queue, priority=priority
)
result.get(timeout=result_timeout)
if result.result != 8:
self.add_error(
ServiceReturnedUnexpectedResult("Celery returned wrong result")
)
except IOError as e:
self.add_error(ServiceUnavailable("IOError"), e)
except NotImplementedError as e:
self.add_error(
ServiceUnavailable(
"NotImplementedError: Make sure CELERY_RESULT_BACKEND is set"
),
e,
)
except TaskRevokedError as e:
self.add_error(
ServiceUnavailable(
"TaskRevokedError: The task was revoked, likely because it spent "
"too long in the queue"
),
e,
)
except TimeoutError as e:
self.add_error(
ServiceUnavailable(
"TimeoutError: The task took too long to return a result"
),
e,
)
except BaseException as e:
self.add_error(ServiceUnavailable("Unknown error"), e)
|
class CeleryHealthCheck(BaseHealthCheckBackend):
def check_status(self):
pass
| 2 | 0 | 41 | 1 | 40 | 0 | 7 | 0 | 1 | 5 | 2 | 0 | 1 | 1 | 1 | 8 | 42 | 1 | 41 | 9 | 39 | 0 | 21 | 7 | 19 | 7 | 1 | 2 | 7 |
143,941 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/celery_ping/apps.py
|
health_check.contrib.celery_ping.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
name = "health_check.contrib.celery_ping"
def ready(self):
from .backends import CeleryPingHealthCheck
plugin_dir.register(CeleryPingHealthCheck)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 4 | 1 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 7 | 2 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 1 | 0 | 1 |
143,942 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/celery_ping/backends.py
|
health_check.contrib.celery_ping.backends.CeleryPingHealthCheck
|
class CeleryPingHealthCheck(BaseHealthCheckBackend):
CORRECT_PING_RESPONSE = {"ok": "pong"}
def check_status(self):
timeout = getattr(settings, "HEALTHCHECK_CELERY_PING_TIMEOUT", 1)
try:
ping_result = app.control.ping(timeout=timeout)
except IOError as e:
self.add_error(ServiceUnavailable("IOError"), e)
except NotImplementedError as exc:
self.add_error(
ServiceUnavailable(
"NotImplementedError: Make sure CELERY_RESULT_BACKEND is set"
),
exc,
)
except BaseException as exc:
self.add_error(ServiceUnavailable("Unknown error"), exc)
else:
if not ping_result:
self.add_error(
ServiceUnavailable("Celery workers unavailable"),
)
else:
self._check_ping_result(ping_result)
def _check_ping_result(self, ping_result):
active_workers = []
for result in ping_result:
worker, response = list(result.items())[0]
if response != self.CORRECT_PING_RESPONSE:
self.add_error(
ServiceUnavailable(
f"Celery worker {worker} response was incorrect"
),
)
continue
active_workers.append(worker)
if not self.errors:
self._check_active_queues(active_workers)
def _check_active_queues(self, active_workers):
defined_queues = getattr(app.conf, "task_queues", None) or getattr(
app.conf, "CELERY_QUEUES", None
)
if not defined_queues:
return
defined_queues = set([queue.name for queue in defined_queues])
active_queues = set()
for queues in app.control.inspect(active_workers).active_queues().values():
active_queues.update([queue.get("name") for queue in queues])
for queue in defined_queues.difference(active_queues):
self.add_error(
ServiceUnavailable(f"No worker for Celery task queue {queue}"),
)
|
class CeleryPingHealthCheck(BaseHealthCheckBackend):
def check_status(self):
pass
def _check_ping_result(self, ping_result):
pass
def _check_active_queues(self, active_workers):
pass
| 4 | 0 | 19 | 2 | 17 | 0 | 4 | 0 | 1 | 5 | 1 | 0 | 3 | 0 | 3 | 10 | 62 | 10 | 52 | 15 | 48 | 0 | 36 | 13 | 32 | 5 | 1 | 2 | 13 |
143,943 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/migrations/apps.py
|
health_check.contrib.migrations.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
name = "health_check.contrib.migrations"
def ready(self):
from .backends import MigrationsHealthCheck
plugin_dir.register(MigrationsHealthCheck)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 4 | 1 | 3 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 7 | 2 | 5 | 4 | 2 | 0 | 5 | 4 | 2 | 1 | 1 | 0 | 1 |
143,944 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/migrations/backends.py
|
health_check.contrib.migrations.backends.MigrationsHealthCheck
|
class MigrationsHealthCheck(BaseHealthCheckBackend):
def get_migration_plan(self, executor):
return executor.migration_plan(executor.loader.graph.leaf_nodes())
def check_status(self):
db_alias = getattr(settings, "HEALTHCHECK_MIGRATIONS_DB", DEFAULT_DB_ALIAS)
try:
executor = MigrationExecutor(connections[db_alias])
plan = self.get_migration_plan(executor)
if plan:
self.add_error(ServiceUnavailable("There are migrations to apply"))
except DatabaseError as e:
self.add_error(ServiceUnavailable("Database is not ready"), e)
except Exception as e:
self.add_error(ServiceUnavailable("Unexpected error"), e)
|
class MigrationsHealthCheck(BaseHealthCheckBackend):
def get_migration_plan(self, executor):
pass
def check_status(self):
pass
| 3 | 0 | 7 | 0 | 7 | 0 | 3 | 0 | 1 | 2 | 1 | 0 | 2 | 0 | 2 | 9 | 15 | 1 | 14 | 7 | 11 | 0 | 14 | 6 | 11 | 4 | 1 | 2 | 5 |
143,945 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/psutil/apps.py
|
health_check.contrib.psutil.apps.HealthCheckConfig
|
class HealthCheckConfig(AppConfig):
name = "health_check.contrib.psutil"
def ready(self):
from .backends import DiskUsage, MemoryUsage
# Ensure checks haven't been explicitly disabled before registering
if (
hasattr(settings, "HEALTH_CHECK")
and ("DISK_USAGE_MAX" in settings.HEALTH_CHECK)
and (settings.HEALTH_CHECK["DISK_USAGE_MAX"] is None)
):
pass
else:
plugin_dir.register(DiskUsage)
if (
hasattr(settings, "HEALTH_CHECK")
and ("DISK_USAGE_MAX" in settings.HEALTH_CHECK)
and (settings.HEALTH_CHECK["MEMORY_MIN"] is None)
):
pass
else:
plugin_dir.register(MemoryUsage)
|
class HealthCheckConfig(AppConfig):
def ready(self):
pass
| 2 | 0 | 20 | 1 | 18 | 1 | 3 | 0.05 | 1 | 2 | 2 | 0 | 1 | 0 | 1 | 1 | 23 | 2 | 20 | 4 | 17 | 1 | 10 | 4 | 7 | 3 | 1 | 1 | 3 |
143,946 |
KristianOellegaard/django-health-check
|
KristianOellegaard_django-health-check/health_check/contrib/psutil/backends.py
|
health_check.contrib.psutil.backends.DiskUsage
|
class DiskUsage(BaseHealthCheckBackend):
def check_status(self):
try:
du = psutil.disk_usage("/")
if DISK_USAGE_MAX and du.percent >= DISK_USAGE_MAX:
raise ServiceWarning(
"{host} {percent}% disk usage exceeds {disk_usage}%".format(
host=host, percent=du.percent, disk_usage=DISK_USAGE_MAX
)
)
except ValueError as e:
self.add_error(ServiceReturnedUnexpectedResult("ValueError"), e)
|
class DiskUsage(BaseHealthCheckBackend):
def check_status(self):
pass
| 2 | 0 | 11 | 0 | 11 | 0 | 3 | 0 | 1 | 3 | 2 | 0 | 1 | 0 | 1 | 8 | 12 | 0 | 12 | 4 | 10 | 0 | 8 | 3 | 6 | 3 | 1 | 2 | 3 |
143,947 |
KristianOellegaard/django-health-check
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/KristianOellegaard_django-health-check/tests/test_celery_ping.py
|
tests.test_celery_ping.TestCeleryPingHealthCheck
|
class TestCeleryPingHealthCheck:
CELERY_APP_CONTROL_PING = (
"health_check.contrib.celery_ping.backends.app.control.ping"
)
CELERY_APP_CONTROL_INSPECT_ACTIVE_QUEUES = (
"health_check.contrib.celery_ping.backends.app.control.inspect.active_queues"
)
@pytest.fixture
def health_check(self):
return CeleryPingHealthCheck()
def test_check_status_doesnt_add_errors_when_ping_successful(self, health_check):
celery_worker = "celery@4cc150a7b49b"
with patch(
self.CELERY_APP_CONTROL_PING,
return_value=[
{celery_worker: CeleryPingHealthCheck.CORRECT_PING_RESPONSE},
{f"{celery_worker}-2": CeleryPingHealthCheck.CORRECT_PING_RESPONSE},
],
), patch(
self.CELERY_APP_CONTROL_INSPECT_ACTIVE_QUEUES,
return_value={
celery_worker: [
{"name": queue.name} for queue in settings.CELERY_QUEUES
]
},
):
health_check.check_status()
assert not health_check.errors
def test_check_status_reports_errors_if_ping_responses_are_incorrect(
self, health_check
):
with patch(
self.CELERY_APP_CONTROL_PING,
return_value=[
{"celery1@4cc150a7b49b": CeleryPingHealthCheck.CORRECT_PING_RESPONSE},
{"celery2@4cc150a7b49b": {}},
{"celery3@4cc150a7b49b": {"error": "pong"}},
],
):
health_check.check_status()
assert len(health_check.errors) == 2
def test_check_status_adds_errors_when_ping_successfull_but_not_all_defined_queues_have_consumers(
self,
health_check,
):
celery_worker = "celery@4cc150a7b49b"
queues = list(settings.CELERY_QUEUES)
with patch(
self.CELERY_APP_CONTROL_PING,
return_value=[
{celery_worker: CeleryPingHealthCheck.CORRECT_PING_RESPONSE}],
), patch(
self.CELERY_APP_CONTROL_INSPECT_ACTIVE_QUEUES,
return_value={celery_worker: [{"name": queues.pop().name}]},
):
health_check.check_status()
assert len(health_check.errors) == len(queues)
@pytest.mark.parametrize(
"exception_to_raise",
[
IOError,
TimeoutError,
],
)
def test_check_status_add_error_when_io_error_raised_from_ping(
self, exception_to_raise, health_check
):
with patch(self.CELERY_APP_CONTROL_PING, side_effect=exception_to_raise):
health_check.check_status()
assert len(health_check.errors) == 1
assert "ioerror" in health_check.errors[0].message.lower()
@pytest.mark.parametrize(
"exception_to_raise", [ValueError,
SystemError, IndexError, MemoryError]
)
def test_check_status_add_error_when_any_exception_raised_from_ping(
self, exception_to_raise, health_check
):
with patch(self.CELERY_APP_CONTROL_PING, side_effect=exception_to_raise):
health_check.check_status()
assert len(health_check.errors) == 1
assert health_check.errors[0].message.lower() == "unknown error"
def test_check_status_when_raised_exception_notimplementederror(self, health_check):
expected_error_message = (
"notimplementederror: make sure celery_result_backend is set"
)
with patch(self.CELERY_APP_CONTROL_PING, side_effect=NotImplementedError):
health_check.check_status()
assert len(health_check.errors) == 1
assert health_check.errors[0].message.lower(
) == expected_error_message
@pytest.mark.parametrize("ping_result", [None, list()])
def test_check_status_add_error_when_ping_result_failed(
self, ping_result, health_check
):
with patch(self.CELERY_APP_CONTROL_PING, return_value=ping_result):
health_check.check_status()
assert len(health_check.errors) == 1
assert "workers unavailable" in health_check.errors[0].message.lower(
)
|
class TestCeleryPingHealthCheck:
@pytest.fixture
def health_check(self):
pass
def test_check_status_doesnt_add_errors_when_ping_successful(self, health_check):
pass
def test_check_status_reports_errors_if_ping_responses_are_incorrect(
self, health_check
):
pass
def test_check_status_adds_errors_when_ping_successfull_but_not_all_defined_queues_have_consumers(
self,
health_check,
):
pass
@pytest.mark.parametrize(
"exception_to_raise",
[
IOError,
TimeoutError,
],
)
def test_check_status_add_error_when_io_error_raised_from_ping(
self, exception_to_raise, health_check
):
pass
@pytest.mark.parametrize(
"exception_to_raise", [ValueError,
SystemError, IndexError, MemoryError]
)
def test_check_status_add_error_when_any_exception_raised_from_ping(
self, exception_to_raise, health_check
):
pass
def test_check_status_when_raised_exception_notimplementederror(self, health_check):
pass
@pytest.mark.parametrize("ping_result", [None, list()])
def test_check_status_add_error_when_ping_result_failed(
self, ping_result, health_check
):
pass
| 13 | 0 | 11 | 1 | 10 | 0 | 1 | 0 | 0 | 3 | 1 | 0 | 8 | 0 | 8 | 8 | 114 | 18 | 96 | 38 | 64 | 0 | 41 | 15 | 32 | 1 | 0 | 1 | 8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.