id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
200 |
101Loop/drf-addons
|
101Loop_drf-addons/drfaddons/admin.py
|
drfaddons.admin.HideModelAdminMixin
|
class HideModelAdminMixin:
"""
Hide a model from Application page but allows addition of object
from inside of other models.
Author: Himanshu Shankar (https://himanshus.com)
"""
def get_model_perms(self, request):
return {}
|
class HideModelAdminMixin:
'''
Hide a model from Application page but allows addition of object
from inside of other models.
Author: Himanshu Shankar (https://himanshus.com)
'''
def get_model_perms(self, request):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 1.67 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 10 | 2 | 3 | 2 | 1 | 5 | 3 | 2 | 1 | 1 | 0 | 0 | 1 |
201 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_sharded_clusters.py
|
tests.test_sharded_clusters.ShardsTestCase
|
class ShardsTestCase(unittest.TestCase):
def setUp(self):
self.sh = ShardedClusters()
PortPool().change_range()
def tearDown(self):
self.sh.cleanup()
def test_singleton(self):
self.assertEqual(id(self.sh), id(ShardedClusters()))
def test_bool(self):
self.assertEqual(False, bool(self.sh))
self.sh.create(create_shard())
self.assertEqual(True, bool(self.sh))
def test_operations(self):
config = {'shards': [create_shard(i) for i in range(3)]}
cluster = ShardedCluster(config)
self.assertEqual(len(self.sh), 0)
operator.setitem(self.sh, 1, cluster)
self.assertEqual(len(self.sh), 1)
self.assertEqual(operator.getitem(self.sh, 1)['id'], cluster.id)
operator.delitem(self.sh, 1)
self.assertEqual(len(self.sh), 0)
self.assertRaises(KeyError, operator.getitem, self.sh, 1)
cluster.cleanup()
def test_operations(self):
self.assertTrue(len(self.sh) == 0)
config1 = create_shard(1)
config2 = create_shard(2)
self.sh.create(config1)
self.sh.create(config2)
self.assertTrue(len(self.sh) == 2)
for key in self.sh:
self.assertTrue(key in ('sh01', 'sh02'))
for key in ('sh01', 'sh02'):
self.assertTrue(key in self.sh)
def test_cleanup(self):
config1 = create_shard(1)
config2 = create_shard(2)
self.assertTrue(len(self.sh) == 0)
self.sh.create(config1)
self.sh.create(config2)
self.assertTrue(len(self.sh) == 2)
self.sh.cleanup()
self.assertTrue(len(self.sh) == 0)
def test_sh_new(self):
port = PortPool().port(check=True)
config = {
'id': 'shard_cluster_1',
'configsvrs': [{}],
'routers': [{"port": port}],
'shards': [create_shard(1), create_shard(2),
{'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}},
]
}
cluster_id = self.sh.create(config)
self.assertEqual(cluster_id, 'shard_cluster_1')
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
c = pymongo.MongoClient(host)
result = c.admin.command("listShards")
for shard in result['shards']:
shard['_id'] in ('sh01', 'sh02', 'sh-rs-01')
c.close()
def test_sh_new_with_auth(self):
port = PortPool().port(check=True)
config = {
'id': 'shard_cluster_1',
'auth_key': 'secret',
'login': 'admin',
'password': 'adminpass',
'configsvrs': [{}],
'routers': [{"port": port}],
'shards': [create_shard(1), create_shard(2)]
}
self.sh.create(config)
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
c = pymongo.MongoClient(host)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.command, "listShards")
c.close()
c = pymongo.MongoClient(host, username='admin', password='adminpass')
self.assertTrue(isinstance(c.admin.command("listShards"), dict))
c.close()
def test_sh_del(self):
sh1_id = self.sh.create(create_shard(1))
sh2_id = self.sh.create(create_shard(2))
self.assertEqual(len(self.sh), 2)
self.sh.remove(sh1_id)
self.assertEqual(len(self.sh), 1)
self.sh.remove(sh2_id)
self.assertEqual(len(self.sh), 0)
def test_info3(self):
config = {
'configsvrs': [{}],
'routers': [{}, {}, {}],
'shards': [create_shard(1), create_shard(2)]
}
sh_id = self.sh.create(config)
info = self.sh.info(sh_id)
self.assertTrue(isinstance(info, dict))
for item in ("shards", "configsvrs", "routers",
"mongodb_uri", "orchestration"):
self.assertTrue(item in info)
self.assertEqual(len(info['shards']), 2)
self.assertEqual(len(info['configsvrs']), 1)
self.assertEqual(len(info['routers']), 3)
mongodb_uri = info['mongodb_uri']
for router in info['routers']:
self.assertIn(Servers().hostname(router['id']), mongodb_uri)
self.assertTrue(mongodb_uri.find('mongodb://') == 0)
self.assertEqual(info['orchestration'], 'sharded_clusters')
def test_configsvrs(self):
config = {}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.configsvrs(sh_id)), 1)
self.sh.cleanup()
config = {'configsvrs': [{}]}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.configsvrs(sh_id)), 1)
def test_routers(self):
config = create_shard()
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.routers(sh_id)), 1)
self.sh.cleanup()
config = {'routers': [{}, {}, {}]}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.routers(sh_id)), 3)
def test_router_add(self):
config = {}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.routers(sh_id)), 1)
self.sh.router_add(sh_id, {})
self.assertEqual(len(self.sh.routers(sh_id)), 2)
self.sh.router_add(sh_id, {})
self.assertEqual(len(self.sh.routers(sh_id)), 3)
self.sh.cleanup()
def test_members(self):
port = PortPool().port(check=True)
config = {'routers': [{'port': port}]}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.members(sh_id)), 0)
self.sh.cleanup()
config = {'routers': [{'port': port}], 'shards': [create_shard(i) for i in range(3)]}
sh_id = self.sh.create(config)
self.assertEqual(len(self.sh.members(sh_id)), 3)
def test_member_info(self):
config = {'shards': [create_shard(), {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
sh_id = self.sh.create(config)
info = self.sh.member_info(sh_id, 'sh00')
self.assertEqual(info['id'], 'sh00')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
info = self.sh.member_info(sh_id, 'sh-rs-01')
self.assertEqual(info['id'], 'sh-rs-01')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
def test_member_info_with_auth(self):
config = {'auth_key': 'secret', 'login': 'admin', 'password': 'admin', 'shards': [create_shard(), {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
sh_id = self.sh.create(config)
info = self.sh.member_info(sh_id, 'sh00')
self.assertEqual(info['id'], 'sh00')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
info = self.sh.member_info(sh_id, 'sh-rs-01')
self.assertEqual(info['id'], 'sh-rs-01')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
def test_member_del(self):
port = PortPool().port(check=True)
config = {'routers': [{'port': port}], 'shards': [create_shard(1), create_shard(2), {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
sh_id = self.sh.create(config)
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
c = pymongo.MongoClient(host)
result = c.admin.command("listShards")
self.assertEqual(len(result['shards']), 3)
# remove member-host
result = self.sh.member_del(sh_id, 'sh01')
self.assertEqual(len(c.admin.command("listShards")['shards']), 3)
self.assertEqual(result['state'], 'started')
self.assertEqual(result['shard'], 'sh01')
time.sleep(5)
result = self.sh.member_del(sh_id, 'sh01')
self.assertEqual(result['state'], 'completed')
self.assertEqual(len(c.admin.command("listShards")['shards']), 2)
self.assertEqual(result['shard'], 'sh01')
# remove member-replicaset
result = self.sh.member_del(sh_id, 'sh-rs-01')
self.assertEqual(len(c.admin.command("listShards")['shards']), 2)
self.assertEqual(result['state'], 'started')
self.assertEqual(result['shard'], 'sh-rs-01')
time.sleep(7)
result = self.sh.member_del(sh_id, 'sh-rs-01')
self.assertEqual(result['state'], 'completed')
self.assertEqual(len(c.admin.command("listShards")['shards']), 1)
self.assertEqual(result['shard'], 'sh-rs-01')
def test_member_add(self):
port = PortPool().port(check=True)
config = {'routers': [{'port': port}]}
sh_id = self.sh.create(config)
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
c = pymongo.MongoClient(host)
self.assertEqual(len(c.admin.command("listShards")['shards']), 0)
result = self.sh.member_add(sh_id, create_shard(1))
self.assertTrue(result.get('isReplicaSet', False))
self.assertEqual(result['id'], 'sh01')
self.assertEqual(len(c.admin.command("listShards")['shards']), 1)
result = self.sh.member_add(sh_id, {'id': 'test2', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}})
self.assertFalse(result.get('isServer', False))
self.assertTrue(result.get('isReplicaSet', False))
self.assertEqual(result['id'], 'test2')
self.assertEqual(len(c.admin.command("listShards")['shards']), 2)
def test_require_api_version_auth(self):
port = PortPool().port(check=True)
config = {
'login': 'luke', 'password': 'ekul',
'routers': [{'port': port}],
"requireApiVersion": "1"
}
self.sh.create(config)
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
client = pymongo.MongoClient(host, server_api=ServerApi("1"))
server_params = client.admin.command("getParameter", "*")
assert server_params['requireApiVersion'] is True
self.sh.cleanup()
def test_require_api_version_noauth(self):
port = PortPool().port(check=True)
config = {
'routers': [{'port': port}],
"requireApiVersion": "1"
}
self.sh.create(config)
host = "{hostname}:{port}".format(hostname=HOSTNAME, port=port)
client = pymongo.MongoClient(host, server_api=ServerApi("1"))
server_params = client.admin.command("getParameter", "*")
assert server_params['requireApiVersion'] is True
self.sh.cleanup()
|
class ShardsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_singleton(self):
pass
def test_bool(self):
pass
def test_operations(self):
pass
def test_operations(self):
pass
def test_cleanup(self):
pass
def test_sh_new(self):
pass
def test_sh_new_with_auth(self):
pass
def test_sh_del(self):
pass
def test_info3(self):
pass
def test_configsvrs(self):
pass
def test_routers(self):
pass
def test_router_add(self):
pass
def test_members(self):
pass
def test_member_info(self):
pass
def test_member_info_with_auth(self):
pass
def test_member_del(self):
pass
def test_member_add(self):
pass
def test_require_api_version_auth(self):
pass
def test_require_api_version_noauth(self):
pass
| 22 | 0 | 12 | 1 | 11 | 0 | 1 | 0.01 | 1 | 8 | 4 | 0 | 21 | 1 | 21 | 93 | 269 | 36 | 231 | 86 | 209 | 2 | 204 | 86 | 182 | 3 | 2 | 1 | 26 |
202 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_launch.py
|
tests.test_launch.TestLaunch
|
class TestLaunch(unittest.TestCase):
def test_launch_single(self):
if os.name != 'posix':
raise unittest.SkipTest('Only works on posix!')
run('mongo-orchestration start')
proc = pexpect.spawn('mongo-launch', ['single'])
proc.expect('Type "q" to quit:')
proc.send('q\n')
proc.wait()
self.assertEqual(proc.exitstatus, 0)
run('mongo-orchestration stop')
def test_launch_replica_set(self):
if os.name != 'posix':
raise unittest.SkipTest('Only works on posix!')
run('mongo-orchestration start')
proc = pexpect.spawn('mongo-launch', ['replicaset', 'ssl'])
proc.expect('"r" to shutdown and restart the primary')
proc.send('q\n')
proc.wait()
self.assertEqual(proc.exitstatus, 0)
run('mongo-orchestration stop')
def test_launch_sharded(self):
if os.name != 'posix':
raise unittest.SkipTest('Only works on posix!')
run('mongo-orchestration start')
proc = pexpect.spawn('mongo-launch', ['shard', 'auth'])
proc.expect('Type "q" to quit:')
proc.send('q\n')
proc.wait()
self.assertEqual(proc.exitstatus, 0)
run('mongo-orchestration stop')
|
class TestLaunch(unittest.TestCase):
def test_launch_single(self):
pass
def test_launch_replica_set(self):
pass
def test_launch_sharded(self):
pass
| 4 | 0 | 10 | 0 | 10 | 0 | 2 | 0 | 1 | 2 | 0 | 0 | 3 | 0 | 3 | 75 | 34 | 3 | 31 | 7 | 27 | 0 | 31 | 7 | 27 | 2 | 2 | 1 | 6 |
203 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_process.py
|
tests.test_process.PortPoolTestCase
|
class PortPoolTestCase(unittest.TestCase):
def setUp(self):
self.hostname = HOSTNAME
self.pp = process.PortPool()
self.pp.change_range(min_port=1025, max_port=1080)
self.sockets = {}
def tearDown(self):
for s in self.sockets:
self.sockets[s].close()
def listen_port(self, port, max_connection=0):
if self.sockets.get(port, None):
self.sockets[port].close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOSTNAME, port))
s.listen(max_connection)
self.sockets[port] = s
def test_singleton(self):
pp2 = process.PortPool(min_port=1025, max_port=1038)
self.assertEqual(id(self.pp), id(pp2))
def test_port_sequence(self):
ports = set([1025, 1026, 1027, 1028, 30, 28, 22, 45])
self.pp.change_range(port_sequence=ports)
_ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertEqual(ports, _ports)
def test_find_port(self):
port = self.pp.port()
self.pp.change_range(port, port)
port = self.pp.port()
self.assertTrue(port > 0)
self.listen_port(port)
self.assertRaises(IndexError, self.pp.port)
def test_port_with_check(self):
self.pp.change_range(min_port=1100, max_port=1200)
port1, port2 = self.pp.port(check=True), self.pp.port(check=True)
self.pp.change_range(port_sequence=[port1, port2])
self.listen_port(port1, 0)
self.assertTrue(port2 == self.pp.port(check=True))
def test_check_port(self):
port = self.pp.port(check=True)
self.assertTrue(self.pp._PortPool__check_port(port))
self.listen_port(port)
self.assertFalse(self.pp._PortPool__check_port(port))
def test_release_port(self):
port = self.pp.port(check=True)
self.assertTrue(port in self.pp._PortPool__closed)
self.pp.release_port(port)
self.assertFalse(port in self.pp._PortPool__closed)
def test_refresh(self):
ports = set([random.randint(1025, 2000) for i in range(15)])
self.pp.change_range(port_sequence=ports)
ports_opened = self.pp._PortPool__ports.copy()
test_port = ports_opened.pop()
self.assertTrue(test_port in self.pp._PortPool__ports)
self.assertTrue(len(self.pp._PortPool__ports) > 1)
for port in ports:
if port != test_port:
try:
self.listen_port(port)
except (socket.error):
pass
self.pp.refresh()
self.assertTrue(len(self.pp._PortPool__ports) == 1)
def test_refresh_only_closed(self):
ports = set([random.randint(1025, 2000) for _ in range(15)])
self.pp.change_range(port_sequence=ports)
closed_num = len(self.pp._PortPool__closed)
self.pp.port(), self.pp.port()
self.assertTrue(closed_num + 2 == len(self.pp._PortPool__closed))
ports_opened = self.pp._PortPool__ports.copy()
test_port = ports_opened.pop()
self.listen_port(test_port)
self.pp.refresh(only_closed=True)
self.assertTrue(closed_num == len(self.pp._PortPool__closed))
self.pp.refresh()
self.assertTrue(closed_num + 1 == len(self.pp._PortPool__closed))
def test_change_range(self):
self.pp.change_range(min_port=1025, max_port=1033)
ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertTrue(ports == set(range(1025, 1033 + 1)))
random_ports = set([random.randint(1025, 2000) for i in range(15)])
self.pp.change_range(port_sequence=random_ports)
ports = self.pp._PortPool__closed.union(self.pp._PortPool__ports)
self.assertTrue(ports == random_ports)
|
class PortPoolTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def listen_port(self, port, max_connection=0):
pass
def test_singleton(self):
pass
def test_port_sequence(self):
pass
def test_find_port(self):
pass
def test_port_with_check(self):
pass
def test_check_port(self):
pass
def test_release_port(self):
pass
def test_refresh(self):
pass
def test_refresh_only_closed(self):
pass
def test_change_range(self):
pass
| 13 | 0 | 7 | 0 | 7 | 0 | 1 | 0 | 1 | 5 | 1 | 0 | 12 | 3 | 12 | 84 | 100 | 17 | 83 | 35 | 70 | 0 | 83 | 35 | 70 | 4 | 2 | 3 | 17 |
204 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_process.py
|
tests.test_process.ProcessTestCase
|
class ProcessTestCase(unittest.TestCase):
def setUp(self):
self.hostname = HOSTNAME
self.s = None
self.executable = sys.executable
self.pp = process.PortPool(min_port=1025, max_port=2000)
self.sockets = {}
self.tmp_files = list()
self.bin_path = os.path.join(os.environ.get('MONGOBIN', ''), 'mongod')
self.db_path = tempfile.mkdtemp()
self.cfg = {"oplogSize": 10, 'dbpath': self.db_path}
def tearDown(self):
for s in self.sockets:
self.sockets[s].close()
if self.cfg:
process.cleanup_mprocess('', self.cfg)
for item in self.tmp_files:
if os.path.exists(item):
os.remove(item)
def listen_port(self, port, max_connection=0):
if self.sockets.get(port, None):
self.sockets[port].close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOSTNAME, port))
s.listen(max_connection)
self.sockets[port] = s
def test_repair(self):
port = self.pp.port(check=True)
# Assume we're testing on 64-bit machines.
self.cfg['nojournal'] = True
lock_file = os.path.join(self.cfg['dbpath'], 'mongod.lock')
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
proc, host = process.mprocess(self.bin_path, config_path, port=port, timeout=60)
self.assertTrue(os.path.exists(lock_file))
if platform.system() == 'Windows':
# mongod.lock cannot be read by any external process on Windows.
with self.assertRaises(IOError):
open(lock_file, 'r')
else:
with open(lock_file, 'r') as fd:
self.assertGreater(len(fd.read()), 0)
proc.terminate()
proc.communicate()
process.repair_mongo(self.bin_path, self.cfg['dbpath'])
with open(lock_file, 'r') as fd:
contents = fd.read()
self.assertEqual(len(contents), 0,
"lock_file contains: " + contents)
def test_mprocess_fail(self):
fd_cfg, config_path = tempfile.mkstemp()
os.close(fd_cfg)
self.tmp_files.append(config_path)
self.assertRaises(OSError, process.mprocess,
'fake-process_', config_path, None, 30)
process.write_config({"fake": True}, config_path)
self.assertRaises(OSError, process.mprocess,
self.bin_path, config_path, None, 30)
def test_mprocess(self):
port = self.pp.port(check=True)
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
result = process.mprocess(self.bin_path, config_path, port=port)
self.assertTrue(isinstance(result, tuple))
proc, host = result
self.assertTrue(isinstance(proc, subprocess.Popen))
self.assertTrue(isinstance(host, str))
process.kill_mprocess(proc)
def test_mprocess_busy_port(self):
config_path = process.write_config(self.cfg)
self.tmp_files.append(config_path)
port = self.pp.port()
self.listen_port(port, max_connection=0)
proc, host = process.mprocess(self.executable, config_path,
port=port, timeout=2)
self.assertTrue(proc.pid > 0)
self.assertEqual(host, self.hostname + ':' + str(port))
self.sockets.pop(port).close()
self.assertRaises(OSError, process.mprocess,
self.executable, '', port, 1)
def test_kill_mprocess(self):
p = subprocess.Popen([self.executable])
self.assertTrue(process.proc_alive(p))
process.kill_mprocess(p)
self.assertFalse(process.proc_alive(p))
def test_cleanup_process(self):
fd_cfg, config_path = tempfile.mkstemp()
fd_key, key_file = tempfile.mkstemp()
fd_log, log_path = tempfile.mkstemp()
db_path = tempfile.mkdtemp()
self.assertTrue(os.path.exists(config_path))
self.assertTrue(os.path.exists(key_file))
self.assertTrue(os.path.exists(log_path))
self.assertTrue(os.path.exists(db_path))
with os.fdopen(fd_cfg, 'w') as fd:
fd.write('keyFile={key_file}\n'
'logPath={log_path}\n'
'dbpath={db_path}'.format(**locals()))
for fd in (fd_cfg, fd_key, fd_log):
try:
os.close(fd)
except OSError:
# fd_cfg may be closed already if fdopen() didn't raise
pass
cfg = {'keyFile': key_file, 'logpath': log_path, 'dbpath': db_path}
process.cleanup_mprocess(config_path, cfg)
self.assertFalse(os.path.exists(config_path))
self.assertFalse(os.path.exists(key_file))
self.assertTrue(os.path.exists(log_path))
self.assertFalse(os.path.exists(db_path))
process.remove_path(log_path)
self.assertFalse(os.path.exists(log_path))
def test_remove_path(self):
fd, file_path = tempfile.mkstemp()
os.close(fd)
self.assertTrue(os.path.exists(file_path))
process.remove_path(file_path)
self.assertFalse(os.path.exists(file_path))
dir_path = tempfile.mkdtemp()
fd, file_path = tempfile.mkstemp(dir=dir_path)
os.close(fd)
process.remove_path(dir_path)
self.assertFalse(os.path.exists(file_path))
self.assertFalse(os.path.exists(dir_path))
def test_write_config(self):
cfg = {'port': 27017, 'objcheck': 'true'}
config_path = process.write_config(cfg)
self.assertTrue(os.path.exists(config_path))
with open(config_path, 'r') as fd:
config_data = fd.read()
self.assertTrue('port=27017' in config_data)
self.assertTrue('objcheck=true' in config_data)
process.cleanup_mprocess(config_path, cfg)
def test_write_config_with_specify_config_path(self):
cfg = {'port': 27017, 'objcheck': 'true'}
fd_key, file_path = tempfile.mkstemp()
os.close(fd_key)
config_path = process.write_config(cfg, file_path)
self.assertEqual(file_path, config_path)
process.cleanup_mprocess(config_path, cfg)
def test_proc_alive(self):
p = subprocess.Popen([self.executable])
self.assertTrue(process.proc_alive(p))
p.terminate()
p.wait()
self.assertFalse(process.proc_alive(p))
self.assertFalse(process.proc_alive(None))
def test_read_config(self):
cfg = {"oplogSize": 10, "other": "some string"}
config_path = process.write_config(cfg)
self.tmp_files.append(config_path)
self.assertEqual(process.read_config(config_path), cfg)
|
class ProcessTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def listen_port(self, port, max_connection=0):
pass
def test_repair(self):
pass
def test_mprocess_fail(self):
pass
def test_mprocess_fail(self):
pass
def test_mprocess_busy_port(self):
pass
def test_kill_mprocess(self):
pass
def test_cleanup_process(self):
pass
def test_remove_path(self):
pass
def test_write_config(self):
pass
def test_write_config_with_specify_config_path(self):
pass
def test_proc_alive(self):
pass
def test_read_config(self):
pass
| 15 | 0 | 11 | 0 | 11 | 0 | 2 | 0.02 | 1 | 7 | 1 | 0 | 14 | 9 | 14 | 86 | 167 | 15 | 149 | 60 | 134 | 3 | 141 | 57 | 126 | 5 | 2 | 2 | 22 |
205 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_replica_set.py
|
tests.test_replica_set.ReplicaSetAuthTestCase
|
class ReplicaSetAuthTestCase(unittest.TestCase):
def setUp(self):
PortPool().change_range()
self.servers = Servers()
self.servers.set_settings(*TEST_RELEASES)
self.repl_cfg = {'auth_key': 'secret', 'login': 'admin', 'password': 'admin', 'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
def tearDown(self):
if len(self.repl) > 0:
self.repl.cleanup()
def test_auth_connection(self):
self.assertTrue(isinstance(self.repl.connection().admin.list_collection_names(), list))
c = pymongo.MongoClient(self.repl.primary(), replicaSet=self.repl.repl_id)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.list_collection_names)
def test_auth_admin(self):
c = pymongo.MongoClient(self.repl.primary(), replicaSet=self.repl.repl_id)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.list_collection_names)
c.close()
c = pymongo.MongoClient(self.repl.primary(), replicaSet=self.repl.repl_id, username='admin', password='admin')
self.assertTrue(isinstance(c.admin.list_collection_names(), list))
c.close()
def test_auth_collection(self):
raise SkipTest("test is not currently working")
c = pymongo.MongoClient(self.repl.primary(), replicaSet=self.repl.repl_id, username='admin', password='admin')
c.test_auth.command('createUser', 'user', pwd='userpass', roles=['readWrite'])
c.close()
c = pymongo.MongoClient(self.repl.primary(), replicaSet=self.repl.repl_id, username='user', password='userpass')
db = c.test_auth
#coll = db.foo.with_options(write_concern=pymongo.WriteConcern(2, 10000))
self.assertTrue(db.foo.insert_one({'foo': 'bar'}))
self.assertTrue(isinstance(db.foo.find_one({}), dict))
c.close()
def test_auth_arbiter_member_info(self):
self.repl.cleanup()
self.repl = ReplicaSet({'members': [
{}, {'rsParams': {'arbiterOnly': True}}]})
info = self.repl.member_info(1)
for key in ('procInfo', 'mongodb_uri', 'statuses', 'rsInfo'):
self.assertIn(key, info)
rs_info = info['rsInfo']
for key in ('primary', 'secondary', 'arbiterOnly'):
self.assertIn(key, rs_info)
self.assertFalse(rs_info['primary'])
self.assertFalse(rs_info['secondary'])
self.assertTrue(rs_info['arbiterOnly'])
def test_mongodb_auth_uri(self):
self.assertIn('mongodb_auth_uri', self.repl.info())
rs_auth_uri = self.repl.info()['mongodb_auth_uri']
hosts = ','.join(m['host'] for m in self.repl.members())
self.assertIn(hosts, rs_auth_uri)
self.assertIn('admin:admin', rs_auth_uri)
self.assertIn('authSource=admin', rs_auth_uri)
replset_param = 'replicaSet=' + self.repl.repl_id
self.assertIn(replset_param, rs_auth_uri)
def test_member_info_auth_uri(self):
for i in range(len(self.repl)):
member = self.repl.member_info(i)
self.assertIn('mongodb_auth_uri', member)
uri = member['mongodb_auth_uri']
host = Servers().hostname(member['server_id'])
self.assertIn(host, uri)
self.assertIn('admin:admin', uri)
self.assertIn('authSource=admin', uri)
|
class ReplicaSetAuthTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_auth_connection(self):
pass
def test_auth_admin(self):
pass
def test_auth_collection(self):
pass
def test_auth_arbiter_member_info(self):
pass
def test_mongodb_auth_uri(self):
pass
def test_member_info_auth_uri(self):
pass
| 9 | 0 | 8 | 0 | 8 | 0 | 2 | 0.02 | 1 | 7 | 3 | 0 | 8 | 3 | 8 | 80 | 71 | 8 | 62 | 26 | 53 | 1 | 61 | 26 | 52 | 3 | 2 | 1 | 12 |
206 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_replica_set.py
|
tests.test_replica_set.ReplicaSetSSLTestCase
|
class ReplicaSetSSLTestCase(SSLTestCase):
def tearDown(self):
if hasattr(self, 'repl'):
self.repl.cleanup()
def test_ssl_auth(self):
if SERVER_VERSION < (2, 4):
raise SkipTest("Need to be able to set 'authenticationMechanisms' "
"parameter to test.")
member_params = {
'procParams': {
'clusterAuthMode': 'x509',
'setParameter': {'authenticationMechanisms': 'MONGODB-X509'}
}
}
self.repl_cfg = {
'login': TEST_SUBJECT,
'authSource': '$external',
'members': [member_params, member_params],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
# Should not raise an Exception.
self.repl = ReplicaSet(self.repl_cfg)
# Should create an extra user. No raise on authenticate.
client = pymongo.MongoClient(
self.repl.primary(), tlsCertificateKeyFile=DEFAULT_CLIENT_CERT,
tlsAllowInvalidCertificates=True, username=DEFAULT_SUBJECT, mechanism='MONGODB-X509')
client['$external'].command('isMaster)')
# Should create the user we requested. No raise on authenticate.
client = pymongo.MongoClient(
self.repl.primary(), tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True, username=TEST_SUBJECT, mechanism='MONGODB-X509')
client['$external'].command('isMaster)')
def test_scram_with_ssl(self):
member_params = {'procParams': {'clusterAuthMode': 'x509'}}
self.repl_cfg = {
'login': 'luke',
'password': 'ekul',
'members': [member_params, member_params],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
# Should not raise an Exception.
self.repl = ReplicaSet(self.repl_cfg)
# Should create the user we requested. No raise on authenticate.
client = pymongo.MongoClient(
self.repl.primary(), tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True, username='luke', password='ekul')
# This should be the only user.
self.assertEqual(len(client.admin.command('usersInfo')['users']), 1)
self.assertFalse(client['$external'].command('usersInfo')['users'])
def test_ssl(self):
member_params = {}
self.repl_cfg = {
'members': [member_params, member_params],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
# Should not raise an Exception.
self.repl = ReplicaSet(self.repl_cfg)
# Server should require SSL.
with self.assertRaises(pymongo.errors.ConnectionFailure):
connected(pymongo.MongoClient(self.repl.primary()))
# This shouldn't raise.
connected(pymongo.MongoClient(
self.repl.primary(), tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True))
def test_mongodb_auth_uri(self):
if SERVER_VERSION < (2, 4):
raise SkipTest("Need to be able to set 'authenticationMechanisms' "
"parameter to test.")
member_params = {
'procParams': {
'clusterAuthMode': 'x509',
'setParameter': {'authenticationMechanisms': 'MONGODB-X509'}
}
}
self.repl_cfg = {
'login': TEST_SUBJECT,
'authSource': '$external',
'members': [member_params, member_params],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
self.repl = ReplicaSet(self.repl_cfg)
self.assertIn('mongodb_auth_uri', self.repl.info())
repl_auth_uri = self.repl.info()['mongodb_auth_uri']
hosts = ','.join(m['host'] for m in self.repl.members())
self.assertIn(hosts, repl_auth_uri)
self.assertIn(TEST_SUBJECT, repl_auth_uri)
self.assertIn('authSource=$external', repl_auth_uri)
self.assertIn('authMechanism=MONGODB-X509', repl_auth_uri)
replset_param = 'replicaSet=' + self.repl.repl_id
self.assertIn(replset_param, repl_auth_uri)
def test_member_info_auth_uri(self):
member_params = {
'procParams': {
'clusterAuthMode': 'x509',
'setParameter': {'authenticationMechanisms': 'MONGODB-X509'}
}
}
self.repl_cfg = {
'login': TEST_SUBJECT,
'authSource': '$external',
'members': [member_params, member_params],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
self.repl = ReplicaSet(self.repl_cfg)
for i in range(len(self.repl)):
member = self.repl.member_info(i)
self.assertIn('mongodb_auth_uri', member)
uri = member['mongodb_auth_uri']
host = Servers().hostname(member['server_id'])
self.assertIn(host, uri)
self.assertIn(TEST_SUBJECT, uri)
self.assertIn('authSource=$external', uri)
self.assertIn('authMechanism=MONGODB-X509', uri)
|
class ReplicaSetSSLTestCase(SSLTestCase):
def tearDown(self):
pass
def test_ssl_auth(self):
pass
def test_scram_with_ssl(self):
pass
def test_ssl_auth(self):
pass
def test_mongodb_auth_uri(self):
pass
def test_member_info_auth_uri(self):
pass
| 7 | 0 | 24 | 2 | 21 | 2 | 2 | 0.07 | 1 | 4 | 2 | 0 | 6 | 2 | 6 | 79 | 153 | 15 | 129 | 23 | 122 | 9 | 56 | 23 | 49 | 2 | 3 | 1 | 10 |
207 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_replica_set.py
|
tests.test_replica_set.ReplicaSetTestCase
|
class ReplicaSetTestCase(unittest.TestCase):
def setUp(self):
PortPool().change_range()
self.servers = Servers()
self.servers.set_settings(*TEST_RELEASES)
self.repl_cfg = {'members': [{}, {}, {'rsParams': {'priority': 0, 'hidden': True}}, {'rsParams': {'arbiterOnly': True}}]}
# self.repl = ReplicaSet(self.repl_cfg)
def tearDown(self):
if hasattr(self, 'repl'):
self.repl.cleanup()
def test_len(self):
raise SkipTest("test is not currently working")
self.repl = ReplicaSet(self.repl_cfg)
self.assertTrue(len(self.repl) == len(self.repl_cfg['members']))
self.repl.member_del(3)
self.assertTrue(len(self.repl) == len(self.repl_cfg['members']) - 1)
self.repl.repl_member_add({'rsParams': {'arbiterOnly': True}})
self.assertTrue(len(self.repl) == len(self.repl_cfg['members']))
def test_cleanup(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
self.assertTrue(len(self.repl) == len(self.repl_cfg['members']))
self.repl.cleanup()
self.assertTrue(len(self.repl) == 0)
def test_member_id_to_host(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
members = self.repl.config['members']
for member in members:
host = self.repl.member_id_to_host(member['_id'])
self.assertEqual(member['host'], host)
def test_host2id(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
members = self.repl.config['members']
for member in members:
self.assertEqual(member['_id'],
self.repl.host2id(member['host']))
def test_update_server_map(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
origin = self.repl.server_map.copy()
self.repl.update_server_map(self.repl.config)
self.assertEqual(self.repl.server_map, origin)
def test_repl_update(self):
self.repl_cfg = {'members': [{}, {}, {'rsParams': {'priority': 0, 'hidden': True}}]}
self.repl = ReplicaSet(self.repl_cfg)
config = self.repl.config
config['members'][1]['priority'] = 0
config['members'][1]['hidden'] = True
self.assertTrue(self.repl.repl_update(config))
self.assertTrue(self.repl.config['members'][1]['hidden'])
def test_info(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
cfg = self.repl.config
info = self.repl.info()
self.assertEqual(info['auth_key'], self.repl.auth_key)
self.assertEqual(info['id'], self.repl.repl_id)
self.assertEqual(len(info['members']), len(cfg['members']))
members1 = sorted(cfg['members'], key=lambda item: item['_id'])
members2 = sorted(info['members'], key=lambda item: item['_id'])
for i in range(len(members1)):
self.assertEqual(members1[i]['_id'], members2[i]['_id'])
self.assertEqual(members1[i]['host'], members2[i]['host'])
def test_repl_member_add(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
member_id = self.repl.repl_member_add({"rsParams": {"priority": 0, "hidden": True}})
self.assertTrue(member_id >= 0)
member = [item for item in self.repl.config['members'] if item['_id'] == member_id][0]
self.assertTrue(member['hidden'])
def test_run_command(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
result = self.repl.run_command('serverStatus', arg=None, is_eval=False, member_id=0)['repl']
for key in ('me', 'setName', 'primary', 'hosts'):
self.assertTrue(key in result)
self.assertEqual(self.repl.run_command(command="replSetGetStatus", is_eval=False)['ok'], 1)
def test_config(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
config = self.repl.config
self.assertTrue('_id' in config)
self.assertTrue('members' in config)
def test_member_create(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
result = self.repl.member_create({}, 13)
self.assertTrue('host' in result)
self.assertTrue('_id' in result)
h_id = Servers().host_to_server_id(result['host'])
h_info = Servers().info(h_id)
self.assertIn(result['host'], h_info['mongodb_uri'])
self.assertTrue(h_info['procInfo']['alive'])
Servers().remove(h_id)
def test_member_del(self):
self.repl_cfg = {'members': [{}, {}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
m_count = len(self.repl.config['members'])
self.assertTrue(self.repl.member_del(2))
self.assertEqual(len(self.repl.config['members']), m_count - 1)
def test_member_del_no_reconfig(self):
self.repl_cfg = {'members': [{}, {}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
m_count = len(self.repl.config['members'])
self.assertTrue(self.repl.member_del(2, reconfig=False))
self.assertEqual(len(self.repl.config['members']), m_count)
self.repl.server_map.pop(2)
def test_member_update(self):
self.repl = ReplicaSet(self.repl_cfg)
member = [item for item in self.repl.config['members'] if item['_id'] == 2][0]
self.assertTrue(member.get('hidden', False))
self.assertTrue(self.repl.member_update(2, {"rsParams": {"priority": 1, "hidden": False}}))
member = [item for item in self.repl.config['members'] if item['_id'] == 2][0]
self.assertFalse(member.get('hidden', False))
def test_member_info(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
member = [item for item in self.repl.config['members'] if item['_id'] == 1][0]
result = self.repl.member_info(1)
self.assertTrue(result['procInfo']['alive'])
self.assertIn(member['host'], result['mongodb_uri'])
self.assertTrue(len(result['rsInfo']) > 0)
def test_member_command(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
_id = 1
self.assertTrue(self.repl.member_info(_id)['procInfo']['alive'])
self.repl.member_command(_id, 'stop')
self.assertFalse(self.repl.member_info(_id)['procInfo']['alive'])
self.repl.member_command(_id, 'start')
self.assertTrue(self.repl.member_info(_id)['procInfo']['alive'])
self.repl.member_command(_id, 'restart')
self.assertTrue(self.repl.member_info(_id)['procInfo']['alive'])
def test_members(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
members1 = sorted(self.repl.config['members'], key=lambda item: item['_id'])
members2 = sorted(self.repl.members(), key=lambda item: item['_id'])
self.assertEqual(len(members1), len(members2))
for i in range(len(members1)):
self.assertEqual(members1[i]['host'], members2[i]['host'])
self.assertEqual(members1[i]['_id'], members2[i]['_id'])
def test_primary(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
primary = self.repl.primary()
server_id = Servers().host_to_server_id(primary)
self.assertTrue(Servers().info(server_id)['statuses']['primary'])
def test_get_members_in_state(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
primaries = self.repl.get_members_in_state(1)
self.assertEqual(len(primaries), 1)
self.assertEqual(primaries[0], self.repl.primary())
def test_connection(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
_id = 1
hostname = self.repl.member_id_to_host(_id)
self.assertTrue(self.repl.connection(timeout=5))
self.assertTrue(self.repl.connection(hostname=hostname, timeout=5))
self.repl.member_command(_id, 'stop')
self.assertRaises(pymongo.errors.AutoReconnect, lambda: self.repl.connection(hostname=hostname, timeout=5))
def test_secondaries(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
secondaries = [item['host'] for item in self.repl.secondaries()]
self.assertEqual(secondaries, self.repl.get_members_in_state(2))
def test_arbiters(self):
self.repl = ReplicaSet(self.repl_cfg)
arbiters = [item['host'] for item in self.repl.arbiters()]
self.assertEqual(arbiters, self.repl.get_members_in_state(7))
def test_hidden(self):
self.repl = ReplicaSet(self.repl_cfg)
for _ in self.repl.hidden():
self.assertTrue(self.repl.run_command('serverStatus', arg=None, is_eval=False, member_id=2)['repl']['hidden'])
def test_passives(self):
self.repl = ReplicaSet(self.repl_cfg)
self.repl.repl_member_add({"rsParams": {"priority": 0}})
for member in self.repl.passives():
self.assertTrue(member['host'] in self.repl.run_command('isMaster', is_eval=False).get('passives'))
def test_servers(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
self.repl.repl_member_add({"rsParams": {"priority": 0}})
for member in self.repl.servers():
self.assertTrue(member['host'] in self.repl.run_command('isMaster', is_eval=False).get('hosts'))
def test_compare_servers_passives(self):
self.repl = ReplicaSet(self.repl_cfg)
self.repl.repl_member_add({"rsParams": {"priority": 0}})
self.repl.repl_member_add({})
servers = self.repl.servers()
passives = self.repl.passives()
for item in servers:
self.assertTrue(item not in passives)
for item in passives:
self.assertTrue(item not in servers)
def test_wait_while_reachable(self):
self.repl_cfg = {'members': [{}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
servers = [member['host'] for member in self.repl.members()]
self.assertTrue(self.repl.wait_while_reachable(servers, timeout=10))
self.repl.member_command(1, 'stop')
self.assertFalse(self.repl.wait_while_reachable(servers, timeout=10))
def test_reset(self):
self.repl_cfg = {'members': [{}, {}, {}]}
self.repl = ReplicaSet(self.repl_cfg)
server_ids = [m['server_id'] for m in self.repl.members()]
all_hosts = [Servers().hostname(server_id) for server_id in server_ids]
# Shut down all members of the ReplicaSet.
for server_id in server_ids:
Servers().command(server_id, 'stop')
# Reset the ReplicaSet. --- We should be able to connect to all members.
self.repl.reset()
for host in all_hosts:
# No ConnectionFailure/AutoReconnect.
connected(pymongo.MongoClient(host))
def test_rs_settings(self):
if SERVER_VERSION < (2, 4):
raise SkipTest(
"Need at least MongoDB >= 2.4 to test replica set settings.")
self.repl_cfg = {
'rsSettings': {'heartbeatTimeoutSecs': 20},
'members': [{}]
}
self.repl = ReplicaSet(self.repl_cfg)
conn = self.repl.connection()
if SERVER_VERSION >= (2, 8):
config = conn.admin.command('replSetGetConfig')['config']
else:
config = conn.local.system.replset.find_one()
self.assertEqual(config['settings']['heartbeatTimeoutSecs'], 20)
|
class ReplicaSetTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_len(self):
pass
def test_cleanup(self):
pass
def test_member_id_to_host(self):
pass
def test_host2id(self):
pass
def test_update_server_map(self):
pass
def test_repl_update(self):
pass
def test_info(self):
pass
def test_repl_member_add(self):
pass
def test_run_command(self):
pass
def test_config(self):
pass
def test_member_create(self):
pass
def test_member_del(self):
pass
def test_member_del_no_reconfig(self):
pass
def test_member_update(self):
pass
def test_member_info(self):
pass
def test_member_command(self):
pass
def test_members(self):
pass
def test_primary(self):
pass
def test_get_members_in_state(self):
pass
def test_connection(self):
pass
def test_secondaries(self):
pass
def test_arbiters(self):
pass
def test_hidden(self):
pass
def test_passives(self):
pass
def test_servers(self):
pass
def test_compare_servers_passives(self):
pass
def test_wait_while_reachable(self):
pass
def test_reset(self):
pass
def test_rs_settings(self):
pass
| 32 | 0 | 8 | 0 | 7 | 0 | 1 | 0.02 | 1 | 5 | 3 | 0 | 31 | 3 | 31 | 103 | 269 | 35 | 230 | 83 | 198 | 4 | 224 | 83 | 192 | 3 | 2 | 1 | 46 |
208 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_replica_sets.py
|
tests.test_replica_sets.ReplicaSetsTestCase
|
class ReplicaSetsTestCase(unittest.TestCase):
def setUp(self):
PortPool().change_range()
self.rs = ReplicaSets()
def tearDown(self):
self.rs.cleanup()
def waiting(self, fn, timeout=300, sleep=10):
t_start = time.time()
while not fn():
if time.time() - t_start > timeout:
return False
time.sleep(sleep)
return True
def test_singleton(self):
self.assertEqual(id(self.rs), id(ReplicaSets()))
def test_bool(self):
self.assertEqual(False, bool(self.rs))
self.rs.create({'id': 'test-rs-1', 'members': [{}, {}]})
self.assertEqual(True, bool(self.rs))
def test_operations(self):
repl_cfg = {'members': [{}, {}]}
repl = ReplicaSet(repl_cfg)
self.assertEqual(len(self.rs), 0)
operator.setitem(self.rs, 1, repl)
self.assertEqual(len(self.rs), 1)
self.assertEqual(operator.getitem(self.rs, 1).repl_id, repl.repl_id)
operator.delitem(self.rs, 1)
self.assertEqual(len(self.rs), 0)
self.assertRaises(KeyError, operator.getitem, self.rs, 1)
def test_operations2(self):
self.assertTrue(len(self.rs) == 0)
self.rs.create({'id': 'test-rs-1', 'members': [{}, {}]})
self.rs.create({'id': 'test-rs-2', 'members': [{}, {}]})
self.assertTrue(len(self.rs) == 2)
for key in self.rs:
self.assertTrue(key in ('test-rs-1', 'test-rs-2'))
for key in ('test-rs-1', 'test-rs-2'):
self.assertTrue(key in self.rs)
def test_cleanup(self):
self.assertTrue(len(self.rs) == 0)
self.rs.create({'id': 'test-rs-1', 'members': [{}, {}]})
self.rs.create({'id': 'test-rs-2', 'members': [{}, {}]})
self.assertTrue(len(self.rs) == 2)
self.rs.cleanup()
self.assertTrue(len(self.rs) == 0)
def test_rs_new(self):
port1, port2 = PortPool().port(check=True), PortPool().port(check=True)
repl_id = self.rs.create({'id': 'test-rs-1',
'members': [{"procParams": {"port": port1}},
{"procParams": {"port": port2}}
]})
return
self.assertEqual(repl_id, 'test-rs-1')
server1 = "{hostname}:{port}".format(hostname=HOSTNAME, port=port1)
server2 = "{hostname}:{port}".format(hostname=HOSTNAME, port=port2)
c = pymongo.MongoClient([server1, server2], replicaSet=repl_id)
self.assertEqual(c.admin.command("replSetGetConfig")['config']['_id'], repl_id)
c.close()
def test_rs_new_with_auth(self):
port1, port2 = PortPool().port(check=True), PortPool().port(check=True)
repl_id = self.rs.create({'id': 'test-rs-1',
'auth_key': 'sercret', 'login': 'admin', 'password': 'admin',
'members': [{"procParams": {"port": port1}},
{"procParams": {"port": port2}}
]})
self.assertEqual(repl_id, 'test-rs-1')
server1 = "{hostname}:{port}".format(hostname=HOSTNAME, port=port1)
server2 = "{hostname}:{port}".format(hostname=HOSTNAME, port=port2)
c = pymongo.MongoClient([server1, server2], replicaSet=repl_id)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.list_collection_names)
c.close()
c = pymongo.MongoClient([server1, server2], replicaSet=repl_id, username='admin', password='admin')
self.assertTrue(isinstance(c.admin.list_collection_names(), list))
c.close()
def test_info(self):
repl_id = self.rs.create({'id': 'test-rs-1', 'members': [{}, {}]})
info = self.rs.info(repl_id)
self.assertTrue(isinstance(info, dict))
for item in ("id", "mongodb_uri", "members", "orchestration"):
self.assertTrue(item in info)
self.assertEqual(info['id'], repl_id)
self.assertEqual(len(info['members']), 2)
mongodb_uri = info['mongodb_uri']
for member in self.rs.members(repl_id):
self.assertIn(member['host'], mongodb_uri)
self.assertTrue(mongodb_uri.find('mongodb://') == 0)
self.assertEqual(info['orchestration'], 'replica_sets')
def test_info_with_auth(self):
repl_id = self.rs.create({'id': 'test-rs-1', 'login': 'admin', 'password': 'admin', 'members': [{}, {}]})
info = self.rs.info(repl_id)
self.assertTrue(isinstance(info, dict))
self.assertEqual(info['id'], repl_id)
self.assertEqual(len(info['members']), 2)
def test_primary(self):
repl_id = self.rs.create({'id': 'test-rs-1', 'members': [{}, {}]})
primary = self.rs.primary(repl_id)['mongodb_uri']
c = connected(pymongo.MongoClient(primary))
self.assertTrue(c.is_primary)
c.close()
def test_primary_stepdown(self):
# This tests Server,
# but only makes sense in the context of a replica set.
repl_id = self.rs.create(
{'id': 'test-rs-stepdown', 'members': [{}, {}, {}]})
primary = self.rs.primary(repl_id)
primary_server = Servers()._storage[primary['server_id']]
time.sleep(20)
# No Exception.
primary_server.stepdown()
self.assertNotEqual(primary['mongodb_uri'],
self.rs.primary(repl_id)['mongodb_uri'])
def test_rs_del(self):
self.rs.create({'members': [{}, {}]})
repl_id = self.rs.create({'members': [{}, {}]})
self.assertEqual(len(self.rs), 2)
primary = self.rs.primary(repl_id)['mongodb_uri']
connected(pymongo.MongoClient(primary)) # No error.
self.rs.remove(repl_id)
self.assertEqual(len(self.rs), 1)
with self.assertRaises(pymongo.errors.PyMongoError):
connected(pymongo.MongoClient(primary))
def test_members(self):
port1, port2 = PortPool().port(check=True), PortPool().port(check=True)
server1 = "{hostname}:{port}".format(hostname=HOSTNAME, port=port1)
server2 = "{hostname}:{port}".format(hostname=HOSTNAME, port=port2)
repl_id = self.rs.create({'members': [{"procParams": {"port": port1}}, {"procParams": {"port": port2}}]})
members = self.rs.members(repl_id)
self.assertEqual(len(members), 2)
self.assertTrue(server1 in [member['host'] for member in members])
self.assertTrue(server2 in [member['host'] for member in members])
def test_secondaries(self):
repl_id = self.rs.create({'members': [{"rsParams": {"priority": 1.5}}, {}, {}]})
secondaries = self.rs.secondaries(repl_id)
self.assertEqual(len(secondaries), 2)
def test_arbiters(self):
repl_id = self.rs.create({'members': [{"rsParams": {"priority": 1.5}}, {}, {"rsParams": {"arbiterOnly": True}}]})
arbiters = self.rs.arbiters(repl_id)
self.assertEqual(len(arbiters), 1)
def test_hidden(self):
repl_id = self.rs.create({'members': [{"rsParams": {"priority": 1.5}}, {}, {"rsParams": {"priority": 0, "hidden": True}}]})
hidden = self.rs.hidden(repl_id)
self.assertEqual(len(hidden), 1)
def test_passives(self):
raise SkipTest("test is not currently working")
config = {"members": [{},
{"rsParams": {"priority": 0}},
{"rsParams": {"arbiterOnly": True}},
{"rsParams": {"priority": 0, 'hidden': True}},
{"rsParams": {"priority": 0, 'secondaryDelaySecs': 5}}]}
repl_id = self.rs.create(config)
passives = self.rs.passives(repl_id)
self.assertEqual(len(passives), 1)
def test_servers(self):
raise SkipTest("test is not currently working")
config = {"members": [{},
{"rsParams": {"priority": 0}},
{"rsParams": {"arbiterOnly": True}},
{"rsParams": {"priority": 0, 'hidden': True}},
{"rsParams": {"priority": 0, 'secondaryDelaySecs': 5}}]}
repl_id = self.rs.create(config)
servers = self.rs.servers(repl_id)
self.assertEqual(len(servers), 1)
def test_compare_passives_and_servers(self):
raise SkipTest("test is not currently working")
config = {"members": [{},
{"rsParams": {"priority": 0}},
{"rsParams": {"arbiterOnly": True}},
{"rsParams": {"priority": 0, 'hidden': True}},
{"rsParams": {"priority": 0, 'secondaryDelaySecs': 5}}]}
repl_id = self.rs.create(config)
passives = [server['host'] for server in self.rs.passives(repl_id)]
servers = [server['host'] for server in self.rs.servers(repl_id)]
for item in passives:
self.assertTrue(item not in servers)
for item in servers:
self.assertTrue(item not in passives)
def test_member_info(self):
repl_id = self.rs.create({'members': [{"rsParams": {"priority": 1.5}}, {"rsParams": {"arbiterOnly": True}}, {"rsParams": {"priority": 0, "hidden": True}}]})
info = self.rs.member_info(repl_id, 0)
for key in ('procInfo', 'mongodb_uri', 'statuses', 'rsInfo'):
self.assertTrue(key in info)
self.assertEqual(info['_id'], 0)
self.assertTrue(info['statuses']['primary'])
info = self.rs.member_info(repl_id, 1)
for key in ('procInfo', 'mongodb_uri', 'statuses', 'rsInfo'):
self.assertTrue(key in info)
self.assertEqual(info['_id'], 1)
self.assertTrue(info['rsInfo']['arbiterOnly'])
info = self.rs.member_info(repl_id, 2)
for key in ('procInfo', 'mongodb_uri', 'statuses', 'rsInfo'):
self.assertTrue(key in info)
self.assertEqual(info['_id'], 2)
self.assertTrue(info['rsInfo']['hidden'])
def test_tagging(self):
tags_0 = {"status": "primary"}
tags_1 = {"status": "arbiter"}
tags_2 = {"status": "hidden"}
repl_id = self.rs.create({'members': [{"rsParams": {"priority": 1.5, "tags": tags_0}}, {"rsParams": {"arbiterOnly": True}, "tags": tags_1}, {"rsParams": {"priority": 0, "hidden": True, "tags": tags_2}}]})
self.assertEqual(tags_0, self.rs.primary(repl_id)['rsInfo']['tags'])
member_arbiter = self.rs.arbiters(repl_id)[0]['_id']
self.assertFalse('tags' in self.rs.member_info(repl_id, member_arbiter)['rsInfo'])
member_hidden = self.rs.hidden(repl_id)[0]['_id']
self.assertTrue('tags' in self.rs.member_info(repl_id, member_hidden)['rsInfo'])
def test_member_del(self):
repl_id = self.rs.create(
{'members': [{"rsParams": {"priority": 1.5}}, {}, {}]})
self.assertEqual(len(self.rs.members(repl_id)), 3)
assert_eventually(lambda: len(self.rs.secondaries(repl_id)) > 0)
secondary = self.rs.secondaries(repl_id)[0]
connected(pymongo.MongoClient(secondary['host'])) # No error.
self.assertTrue(self.rs.member_del(repl_id, secondary['_id']))
self.assertEqual(len(self.rs.members(repl_id)), 2)
with self.assertRaises(pymongo.errors.PyMongoError):
connected(pymongo.MongoClient(secondary['host']))
def test_member_add(self):
repl_id = self.rs.create({'members': [{"rsParams": {"priority": 1.5}}, {}]})
self.assertEqual(len(self.rs.members(repl_id)), 2)
member_id = self.rs.member_add(repl_id, {"rsParams": {"priority": 0, "hidden": True}})
self.assertEqual(len(self.rs.members(repl_id)), 3)
info = self.rs.member_info(repl_id, member_id)
self.assertTrue(info['rsInfo']['hidden'])
def test_member_command(self):
_id = 1
repl_id = self.rs.create({'members': [{"rsParams": {"priority": 1.5}}, {}]})
self.assertTrue(self.rs.member_info(repl_id, _id)['procInfo']['alive'])
self.rs.member_command(repl_id, _id, 'stop')
self.assertFalse(self.rs.member_info(repl_id, _id)['procInfo']['alive'])
self.rs.member_command(repl_id, _id, 'start')
self.assertTrue(self.rs.member_info(repl_id, _id)['procInfo']['alive'])
self.rs.member_command(repl_id, _id, 'restart')
self.assertTrue(self.rs.member_info(repl_id, _id)['procInfo']['alive'])
def test_member_freeze(self):
# This tests Server, but only makes sense in the context of a replica set.
repl_id = self.rs.create(
{'members': [{"rsParams": {"priority": 19}},
{"rsParams": {"priority": 5}}, {}]})
next_primary_info = self.rs.member_info(repl_id, 2)
next_primary = next_primary_info['mongodb_uri']
secondary_info = self.rs.member_info(repl_id, 1)
secondary_server = Servers()._storage[secondary_info['server_id']]
primary_info = self.rs.member_info(repl_id, 0)
primary_server = Servers()._storage[primary_info['server_id']]
assert_eventually(lambda: primary_server.connection.is_primary)
def freeze_and_stop():
self.assertTrue(secondary_server.freeze(100))
try:
# Call replSetStepDown before killing the primary's process.
# This raises OperationFailure if no secondaries are capable
# of taking over.
primary_server.connection.admin.command('replSetStepDown', 10)
except pymongo.errors.AutoReconnect:
# Have to stop the server as well so it doesn't get reelected.
primary_server.stop()
return True
except pymongo.errors.OperationFailure:
# No secondaries within 10 seconds of my optime...
return False
assert_eventually(freeze_and_stop, "Primary didn't step down.")
assert_eventually(lambda: (
self.rs.primary(repl_id)['mongodb_uri'] == next_primary),
"Secondary did not freeze.",
max_tries=120
)
assert_eventually(lambda: (
self.rs.primary(repl_id)['mongodb_uri'] ==
self.rs.member_info(repl_id, 1)['mongodb_uri']),
"Higher priority secondary never promoted.",
max_tries=120
)
def test_member_update(self):
repl_id = self.rs.create({'members': [{"rsParams": {"priority": 1.5}}, {"rsParams": {"priority": 0, "hidden": True}}, {}]})
hidden = self.rs.hidden(repl_id)[0]
self.assertTrue(self.rs.member_info(repl_id, hidden['_id'])['rsInfo']['hidden'])
self.rs.member_update(repl_id, hidden['_id'], {"rsParams": {"priority": 1, "hidden": False}})
self.assertEqual(len(self.rs.hidden(repl_id)), 0)
self.assertFalse(self.rs.member_info(repl_id, hidden['_id'])['rsInfo'].get('hidden', False))
def test_member_update_with_auth(self):
repl_id = self.rs.create({'login': 'admin', 'password': 'admin',
'members': [{"rsParams": {"priority": 1.5}},
{"rsParams": {"priority": 0, "hidden": True}},
{}]})
hidden = self.rs.hidden(repl_id)[0]
self.assertTrue(self.rs.member_info(repl_id, hidden['_id'])['rsInfo']['hidden'])
self.rs.member_update(repl_id, hidden['_id'], {"rsParams": {"priority": 1, "hidden": False}})
self.assertEqual(len(self.rs.hidden(repl_id)), 0)
self.assertFalse(self.rs.member_info(repl_id, hidden['_id'])['rsInfo'].get('hidden', False))
|
class ReplicaSetsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def waiting(self, fn, timeout=300, sleep=10):
pass
def test_singleton(self):
pass
def test_bool(self):
pass
def test_operations(self):
pass
def test_operations2(self):
pass
def test_cleanup(self):
pass
def test_rs_new(self):
pass
def test_rs_new_with_auth(self):
pass
def test_info(self):
pass
def test_info_with_auth(self):
pass
def test_primary(self):
pass
def test_primary_stepdown(self):
pass
def test_rs_del(self):
pass
def test_members(self):
pass
def test_secondaries(self):
pass
def test_arbiters(self):
pass
def test_hidden(self):
pass
def test_passives(self):
pass
def test_servers(self):
pass
def test_compare_passives_and_servers(self):
pass
def test_member_info(self):
pass
def test_tagging(self):
pass
def test_member_del(self):
pass
def test_member_add(self):
pass
def test_member_command(self):
pass
def test_member_freeze(self):
pass
def freeze_and_stop():
pass
def test_member_update(self):
pass
def test_member_update_with_auth(self):
pass
| 32 | 0 | 10 | 0 | 9 | 1 | 1 | 0.04 | 1 | 9 | 4 | 0 | 30 | 1 | 30 | 102 | 327 | 41 | 277 | 111 | 245 | 11 | 241 | 111 | 209 | 4 | 2 | 2 | 44 |
209 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_servers.py
|
tests.test_servers.ServerAuthTestCase
|
class ServerAuthTestCase(unittest.TestCase):
def setUp(self):
Server.mongod_default['nojournal'] = True
PortPool().change_range()
self.mongod = os.path.join(os.environ.get('MONGOBIN', ''), 'mongod')
self.server = Server(self.mongod, {}, auth_key='secret', login='admin', password='admin')
self.server.start()
def tearDown(self):
if hasattr(self, 'server'):
self.server.stop()
self.server.cleanup()
def test_mongodb_auth_uri(self):
self.assertIn('mongodb_auth_uri', self.server.info())
auth_uri = self.server.info()['mongodb_auth_uri']
self.assertIn(self.server.hostname, auth_uri)
self.assertIn('admin:admin', auth_uri)
self.assertIn('authSource=admin', auth_uri)
def test_mongos(self):
raise SkipTest("test is not currently working")
self.server.stop()
self.server.cleanup()
del Server.mongod_default['nojournal']
self.server = Server(self.mongod, {'configsvr': True, 'replSet': True}, auth_key='secret')
self.server.start(30)
mongos = os.path.join(os.environ.get('MONGOBIN', ''), 'mongos')
self.server2 = Server(
mongos, {'configdb': self.server.hostname},
auth_key='secret', login='admin', password='admin')
self.server2.start()
for server in (self.server, self.server2):
c = pymongo.MongoClient(server.host, server.port)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.collection_names)
self.assertTrue(c.admin.authenticate('admin', 'admin'))
self.assertTrue(isinstance(c.admin.collection_names(), list))
c.close()
self.server2.stop()
self.server2.cleanup()
def test_auth_connection(self):
self.assertTrue(isinstance(self.server.connection.admin.list_collection_names(), list))
c = pymongo.MongoClient(self.server.host, self.server.port)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.list_collection_names)
self.server.restart()
c = pymongo.MongoClient(self.server.host, self.server.port)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.list_collection_names)
def test_auth_admin(self):
c = pymongo.MongoClient(self.server.host, self.server.port)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.list_collection_names)
c.close()
c = pymongo.MongoClient(self.server.host, self.server.port, username='admin', password='admin')
self.assertTrue(c.admin.command('isMaster'))
self.assertTrue(isinstance(c.admin.list_collection_names(), list))
c.close()
def test_auth_collection(self):
c = pymongo.MongoClient(self.server.host, self.server.port, username='admin', password='admin')
self.assertTrue(bool(c.admin.command('ping')['ok']))
db = c.test_server_auth
db.command('createUser', 'user', pwd='userpass', roles=['readWrite'])
c.close()
c = pymongo.MongoClient(self.server.host, self.server.port, username='admin', password='admin')
db = c.test_server_auth
self.assertTrue(db.foo.insert_one({'foo': 'bar'}))
self.assertTrue(isinstance(db.foo.find_one(), dict))
c.close()
|
class ServerAuthTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_mongodb_auth_uri(self):
pass
def test_mongos(self):
pass
def test_auth_connection(self):
pass
def test_auth_admin(self):
pass
def test_auth_collection(self):
pass
| 8 | 0 | 9 | 0 | 9 | 0 | 1 | 0 | 1 | 6 | 2 | 0 | 7 | 3 | 7 | 79 | 72 | 9 | 63 | 19 | 55 | 0 | 61 | 19 | 53 | 2 | 2 | 1 | 9 |
210 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_container.py
|
tests.test_container.ContainerTestCase
|
class ContainerTestCase(unittest.TestCase):
def setUp(self):
self.container = Container()
self.container.set_settings()
def tearDown(self):
self.container.cleanup()
def test_set_settings(self):
default_release = 'old-release'
releases = {default_release: os.path.join(os.getcwd(), 'bin')}
orig_releases = self.container.releases
orig_default_release = self.container.default_release
try:
self.container.set_settings(releases, default_release)
self.assertEqual(releases, self.container.releases)
self.assertEqual(default_release, self.container.default_release)
finally:
self.container.set_settings(orig_releases, orig_default_release)
def test_bin_path(self):
releases = SON([('20-release', '/path/to/20/release'),
('24.9-release', '/path/to/24.9/release'),
('24-release', '/path/to/24/release'),
('26-release', '/path/to/26/release')])
default_release = '26-release'
self.container.set_settings(releases, default_release)
self.assertRaises(MongoOrchestrationError,
self.container.bin_path, '27')
self.assertEqual(self.container.bin_path('20'),
releases['20-release'])
self.assertEqual(self.container.bin_path('24'),
releases['24.9-release'])
self.assertEqual(self.container.bin_path(), releases[default_release])
# Clear default release.
self.container.set_settings(releases)
self.assertEqual(self.container.bin_path(), releases['20-release'])
# Clear all releases.
self.container.set_settings({})
self.assertEqual(self.container.bin_path(), '')
def test_getitem(self):
self.container['key'] = 'value'
self.assertEqual('value', self.container['key'])
self.assertRaises(KeyError, operator.getitem, self.container, 'error-key')
def test_setitem(self):
self.assertEqual(None, operator.setitem(self.container, 'key', 'value'))
self.container._obj_type = int
self.assertEqual(None, operator.setitem(self.container, 'key2', 15))
self.assertRaises(ValueError, operator.setitem, self.container, 'key3', 'value')
def test_delitem(self):
self.assertEqual(0, len(self.container))
self.container['key'] = 'value'
self.assertEqual(1, len(self.container))
self.assertEqual(None, operator.delitem(self.container, 'key'))
self.assertEqual(0, len(self.container))
def test_operations(self):
self.assertEqual(0, len(self.container))
keys = ('key1', 'key2', 'key3')
values = ('value1', 'value2', 'value3')
for key, value in zip(keys, values):
self.container[key] = value
self.assertEqual(len(keys), len(self.container))
# test contains
for key in keys:
self.assertTrue(key in self.container)
# test iteration
for key in self.container:
self.assertTrue(key in keys)
self.assertTrue(self.container[key] in values)
# test cleanup
self.container.cleanup()
self.assertEqual(0, len(self.container))
def test_bool(self):
self.assertEqual(False, bool(self.container))
self.container['key'] = 'value'
self.assertTrue(True, bool(self.container))
def test_notimplemented(self):
self.assertRaises(NotImplementedError, self.container.create)
self.assertRaises(NotImplementedError, self.container.remove)
self.assertRaises(NotImplementedError, self.container.info)
|
class ContainerTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_set_settings(self):
pass
def test_bin_path(self):
pass
def test_getitem(self):
pass
def test_setitem(self):
pass
def test_delitem(self):
pass
def test_operations(self):
pass
def test_bool(self):
pass
def test_notimplemented(self):
pass
| 11 | 0 | 8 | 0 | 7 | 1 | 1 | 0.07 | 1 | 8 | 2 | 0 | 10 | 1 | 10 | 82 | 87 | 10 | 72 | 21 | 61 | 5 | 65 | 21 | 54 | 4 | 2 | 1 | 13 |
211 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_servers.py
|
tests.test_servers.ServerTestCase
|
class ServerTestCase(unittest.TestCase):
def setUp(self):
PortPool().change_range()
self.mongod = os.path.join(os.environ.get('MONGOBIN', ''), 'mongod')
self.server = Server(self.mongod, {})
def tearDown(self):
if hasattr(self, 'server'):
self.server.stop()
self.server.cleanup()
def test_server(self):
self.assertTrue(isinstance(self.server, Server))
def test_init_path(self):
self.server.cleanup()
mongod = os.path.join(os.environ.get('MONGOBIN', ''), 'mongod')
log_dir = os.path.join(tempfile.gettempdir(), os.path.split(tempfile.mktemp())[-1])
log_path = tempfile.mktemp(dir=log_dir)
db_path = os.path.join(tempfile.gettempdir(), os.path.split(tempfile.mktemp())[-1])
self.assertFalse(os.path.exists(log_dir))
self.assertFalse(os.path.exists(db_path))
self.server = Server(mongod, {'logpath': log_path, 'dbpath': db_path})
self.assertTrue(os.path.exists(log_dir))
self.assertTrue(os.path.exists(db_path))
def test_mongos(self):
raise SkipTest("test is not currently working")
self.server.cleanup()
del Server.mongod_default['nojournal']
self.server = Server(self.mongod, {'configsvr': True, 'replSet': True})
self.server.start(30)
mongos = os.path.join(os.environ.get('MONGOBIN', ''), 'mongos')
self.server2 = Server(mongos, {'configdb': self.server.hostname})
self.assertTrue(self.server2.start())
self.assertTrue(self.server2.info()['statuses'].get('mongos', False))
self.server2.stop()
self.server2.cleanup()
def test_run_command(self):
self.server.start(30)
def test_info(self):
self.server.start(30)
info = self.server.info()
for item in ("mongodb_uri", "statuses", "serverInfo",
"procInfo", "orchestration"):
self.assertTrue(item in info)
self.assertTrue(info['mongodb_uri'].find(self.server.hostname))
self.assertTrue(info['mongodb_uri'].find('mongodb://') == 0)
fd_log, log_path = tempfile.mkstemp()
os.close(fd_log)
db_path = tempfile.mkdtemp()
params = {'logpath': log_path, 'dbpath': db_path}
server2 = Server(self.mongod, params)
server2.start()
info2 = server2.info()
for param, value in params.items():
self.assertTrue(info2['procInfo']['params'].get(param, value) == value)
server2.stop()
info = server2.info()
self.assertEqual(len(info['serverInfo']), 0)
self.assertEqual(len(info['statuses']), 0)
self.assertEqual(info['orchestration'], 'servers')
server2.cleanup()
def test_command(self):
self.assertRaises(pymongo.errors.PyMongoError, self.server.run_command, 'serverStatus', None, False)
self.server.start(30)
self.assertEqual(self.server.run_command('serverStatus', arg=None, is_eval=False).get('ok', -1), 1)
def test_require_api_version_auth(self):
server = Server(self.mongod, {}, require_api_version="1", login='luke', password='ekul')
server.start()
client = server.connection
client.test.test.insert_one({})
server_params = client.admin.command("getParameter", "*")
assert server_params['requireApiVersion'] is True
def test_require_api_version_noauth(self):
server = Server(self.mongod, {}, require_api_version="1")
server.start()
client = server.connection
client.test.test.insert_one({})
server_params = client.admin.command("getParameter", "*")
assert server_params['requireApiVersion'] is True
def test_start(self):
self.assertNotIn('pid', self.server.info()['procInfo'])
self.assertTrue(self.server.start(30))
self.assertTrue(self.server.info()['procInfo']['pid'] > 0)
fake_server = Server('fake_proc_', {})
self.assertRaises(mongo_orchestration.errors.TimeoutError, fake_server.start, 5)
fake_server.cleanup()
def test_start_with_repair(self):
self.server.cleanup()
self.server = Server(self.mongod, {"nojournal": True})
self.server.start(30)
os.kill(self.server.pid, 9)
self.assertTrue(self.server._is_locked)
self.assertTrue(self.server.start(20))
def test_stop(self):
self.assertTrue(self.server.start(60))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = self.server.hostname.split(':')[0]
s.connect((server, self.server.cfg['port']))
self.assertTrue(self.server.stop())
self.assertRaises(socket.error, s.connect, (server, self.server.cfg['port']))
def test_restart(self):
self.assertTrue(self.server.start(30))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = self.server.hostname.split(':')[0]
s.connect((server, self.server.cfg['port']))
s.shutdown(0)
s.close()
self.assertTrue(self.server.restart(30))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((server, self.server.cfg['port']))
s.shutdown(0)
s.close()
def test_is_alive(self):
self.server.start()
self.assertTrue(self.server.is_alive)
self.server.stop()
self.assertFalse(self.server.is_alive)
self.server.restart()
self.assertTrue(self.server.is_alive)
def test_set_parameter(self):
self.server.cleanup()
cfg = {"setParameter": {"enableTestCommands": 1}}
self.server = Server(self.mongod, cfg)
self.server.start()
c = pymongo.MongoClient(self.server.hostname)
c.foo.drop_collection('bar')
c.foo.bar.insert_one({"data": "text stuff"})
# No Exception.
c.foo.bar.create_index([("data", pymongo.TEXT)])
# No Exception.
c.admin.command("sleep", secs=1)
def test_cleanup(self):
self.server.start(80)
self.assertTrue(os.path.exists(self.server.cfg['dbpath']))
self.assertTrue(os.path.exists(self.server.config_path))
self.server.stop()
self.server.cleanup()
self.assertFalse(os.path.exists(self.server.cfg['dbpath']))
self.assertFalse(os.path.exists(self.server.config_path))
def test_reset(self):
self.server.stop()
self.server.reset()
# No ConnectionFailure.
connected(pymongo.MongoClient(self.server.hostname))
|
class ServerTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_server(self):
pass
def test_init_path(self):
pass
def test_mongos(self):
pass
def test_run_command(self):
pass
def test_info(self):
pass
def test_command(self):
pass
def test_require_api_version_auth(self):
pass
def test_require_api_version_noauth(self):
pass
def test_start(self):
pass
def test_start_with_repair(self):
pass
def test_stop(self):
pass
def test_restart(self):
pass
def test_is_alive(self):
pass
def test_set_parameter(self):
pass
def test_cleanup(self):
pass
def test_reset(self):
pass
| 19 | 0 | 8 | 0 | 8 | 0 | 1 | 0.02 | 1 | 5 | 3 | 0 | 18 | 3 | 18 | 90 | 161 | 19 | 139 | 48 | 120 | 3 | 138 | 48 | 119 | 3 | 2 | 1 | 21 |
212 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_servers.py
|
tests.test_servers.ServerVersionTestCase
|
class ServerVersionTestCase(unittest.TestCase):
def _test_version_parse(self, version_str, expected_version):
match = Server.version_patt.search(version_str)
assert match is not None
self.assertEqual(expected_version, match.group('version'))
def test_mongod_2_4_9(self):
self._test_version_parse("""db version v2.4.9
Wed Aug 3 11:02:33.903 git version: 52fe0d21959e32a5bdbecdc62057db386e4e029c
""", "2.4.9")
def test_mongos_2_4_9(self):
self._test_version_parse("""MongoS version 2.4.9 starting: pid=22088 port=27017 64-bit host=As-MBP-2.fios-router.home (--help for usage)
git version: 52fe0d21959e32a5bdbecdc62057db386e4e029c
build sys info: Darwin bs-osx-106-x86-64-2.10gen.cc 10.8.0 Darwin Kernel Version 10.8.0: Tue Jun 7 16:32:41 PDT 2011; root:xnu-1504.15.3~1/RELEASE_X86_64 x86_64 BOOST_LIB_VERSION=1_49
""", "2.4.9")
def test_mongod_3_3_10(self):
self._test_version_parse("""db version v3.3.10-262-g2743e90
git version: 2743e906fef318763e753a67967d503b37fcdd07
allocator: system
modules: none
build environment:
distarch: x86_64
target_arch: x86_64
""", "3.3.10")
def test_mongos_3_3_10(self):
self._test_version_parse("""mongos version v3.3.10-239-g4caf167
git version: 4caf167d193b3b6b4a4cf584f1d903de631a13ef
allocator: system
modules: none
build environment:
distarch: x86_64
target_arch: x86_64
""", "3.3.10")
|
class ServerVersionTestCase(unittest.TestCase):
def _test_version_parse(self, version_str, expected_version):
pass
def test_mongod_2_4_9(self):
pass
def test_mongos_2_4_9(self):
pass
def test_mongod_3_3_10(self):
pass
def test_mongos_3_3_10(self):
pass
| 6 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 5 | 0 | 5 | 77 | 36 | 4 | 32 | 7 | 26 | 0 | 13 | 7 | 7 | 1 | 2 | 0 | 5 |
213 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_sharded_clusters.py
|
tests.test_sharded_clusters.ShardSSLTestCase
|
class ShardSSLTestCase(SSLTestCase):
@classmethod
def setUpClass(cls):
cls.x509_configsvrs = [
{'members': [{'procParams': {'clusterAuthMode': 'x509'}}]}]
def setUp(self):
self.sh = None
PortPool().change_range()
def tearDown(self):
if self.sh is not None:
self.sh.cleanup()
def test_ssl_auth(self):
raise SkipTest("test is not currently working")
shard_params = {
'shardParams': {
'procParams': {
'clusterAuthMode': 'x509',
'setParameter': {'authenticationMechanisms': 'MONGODB-X509'}
},
'members': [{}]
}
}
config = {
'login': TEST_SUBJECT,
'authSource': '$external',
'configsvrs': self.x509_configsvrs,
'routers': [{'clusterAuthMode': 'x509'}],
'shards': [shard_params, shard_params],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
# Should not raise an Exception.
self.sh = ShardedCluster(config)
# Should create an extra user. No raise on authenticate.
host = self.sh.router['hostname']
client = pymongo.MongoClient(
host, tlsCertificateKeyFile=DEFAULT_CLIENT_CERT,
tlsAllowInvalidCertificates=True, username=DEFAULT_SUBJECT, mechanism='MONGODB-X509')
client['$external'].command('isMaster')
client.close()
# Should create the user we requested. No raise on authenticate.
client = pymongo.MongoClient(
host, tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True, username=TEST_SUBJECT, mechanism='MONGODB-X509')
client['$external'].command('isMaster')
client.close()
def test_scram_with_ssl(self):
proc_params = {'procParams': {'clusterAuthMode': 'x509'}}
config = {
'login': 'luke',
'password': 'ekul',
'configsvrs': self.x509_configsvrs,
'routers': [{'clusterAuthMode': 'x509'}],
'shards': [{'shardParams': {'members': [proc_params]}},
{'shardParams': {'members': [proc_params]}}],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
# Should not raise an Exception.
self.sh = ShardedCluster(config)
time.sleep(1)
# Should create the user we requested. No raise on authenticate.
host = self.sh.router['hostname']
client = pymongo.MongoClient(
host, tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True, username='luke', password='ekul')
# This should be the only user.
self.assertEqual(len(client.admin.command('usersInfo')['users']), 1)
self.assertFalse(client['$external'].command('usersInfo')['users'])
def test_ssl(self):
config = {
'configsvrs': [{}],
'routers': [{}],
'shards': [create_shard(1), create_shard(2)],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
# Should not raise an Exception.
self.sh = ShardedCluster(config)
# Server should require SSL.
host = self.sh.router['hostname']
with self.assertRaises(pymongo.errors.ConnectionFailure):
connected(pymongo.MongoClient(host))
# This shouldn't raise.
connected(
pymongo.MongoClient(host, tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True))
def test_mongodb_auth_uri(self):
raise SkipTest("test is not currently working")
if SERVER_VERSION < (2, 4):
raise SkipTest("Need to be able to set 'authenticationMechanisms' "
"parameter to test.")
shard_params = {
'shardParams': {
'procParams': {
'clusterAuthMode': 'x509',
'setParameter': {'authenticationMechanisms': 'MONGODB-X509'}
},
'members': [{}]
}
}
config = {
'login': TEST_SUBJECT,
'authSource': '$external',
'configsvrs': self.x509_configsvrs,
'routers': [{'clusterAuthMode': 'x509'}],
'shards': [shard_params, shard_params],
'sslParams': {
'tlsCAFile': certificate('ca.pem'),
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
}
self.sh = ShardedCluster(config)
self.assertIn('mongodb_auth_uri', self.sh.info())
auth_uri = self.sh.info()['mongodb_auth_uri']
hosts = ','.join(r['hostname'] for r in self.sh.routers)
self.assertIn(hosts, auth_uri)
self.assertIn(TEST_SUBJECT, auth_uri)
self.assertIn('authSource=$external', auth_uri)
self.assertIn('authMechanism=MONGODB-X509', auth_uri)
|
class ShardSSLTestCase(SSLTestCase):
@classmethod
def setUpClass(cls):
pass
def setUpClass(cls):
pass
def tearDown(self):
pass
def test_ssl_auth(self):
pass
def test_scram_with_ssl(self):
pass
def test_ssl_auth(self):
pass
def test_mongodb_auth_uri(self):
pass
| 9 | 0 | 20 | 1 | 18 | 1 | 1 | 0.07 | 1 | 3 | 2 | 0 | 6 | 1 | 7 | 80 | 147 | 13 | 125 | 24 | 116 | 9 | 51 | 23 | 43 | 2 | 3 | 1 | 9 |
214 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_sharded_clusters.py
|
tests.test_sharded_clusters.ShardTestCase
|
class ShardTestCase(unittest.TestCase):
def setUp(self):
PortPool().change_range()
def tearDown(self):
if hasattr(self, 'sh') and self.sh is not None:
self.sh.cleanup()
def test_len(self):
raise SkipTest("test is not currently working")
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh), 0)
self.sh.member_add('test01', {})
self.assertEqual(len(self.sh), 1)
self.sh.member_add('test02', {})
self.assertEqual(len(self.sh), 2)
while self.sh.member_remove('test01')['state'] != 'completed':
time.sleep(1)
self.assertEqual(len(self.sh), 1)
def test_sh_new(self):
port = PortPool().port(check=True)
config = {
'id': 'shard_cluster_1',
'configsvrs': [{}],
'routers': [{"port": port}],
'shards': [create_shard(1), create_shard(2)]
}
self.sh = ShardedCluster(config)
c = pymongo.MongoClient(self.sh.router['hostname'])
for item in c.admin.command("listShards")['shards']:
self.assertTrue(item['_id'] in ('sh01', 'sh02'))
def test_sh_new_with_auth(self):
port = PortPool().port(check=True)
config = {
'id': 'shard_cluster_1',
'auth_key': 'secret',
'login': 'admin',
'password': 'adminpass',
'configsvrs': [{}],
'routers': [{"port": port}],
'shards': [create_shard(1), create_shard(2)]
}
self.sh = ShardedCluster(config)
c = pymongo.MongoClient(self.sh.router['hostname'])
self.assertRaises(pymongo.errors.OperationFailure, c.admin.command, "listShards")
c.close()
c = pymongo.MongoClient(self.sh.router['hostname'], username='admin', password='adminpass')
self.assertTrue(isinstance(c.admin.command("listShards"), dict))
for item in c.admin.command("listShards")['shards']:
self.assertTrue(item['_id'] in ('sh01', 'sh02'))
c.close()
def test_cleanup(self):
config = {
'id': 'shard_cluster_1',
'configsvrs': [{}],
'routers': [{}],
'shards': [create_shard(1),create_shard(2),
{'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}},
]
}
self.sh = ShardedCluster(config)
self.assertTrue(len(self.sh) == len(config['shards']))
self.sh.cleanup()
self.assertTrue(len(self.sh) == 0)
def test_configsvrs(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.configsvrs), 1)
self.sh.cleanup()
config = {'configsvrs': [{}]}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.configsvrs), 1)
self.sh.cleanup()
def test_routers(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.routers), 1)
self.sh.cleanup()
config = {'routers': [{}, {}, {}]}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.routers), 3)
self.sh.cleanup()
def test_members(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.members), 0)
self.sh.cleanup()
config = {'shards': [create_shard(i) for i in range(3)]}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.members), 3)
self.sh.cleanup()
def test_router(self):
config = {}
self.sh = ShardedCluster(config)
self.assertTrue(Servers().info(self.sh.router['id'])['statuses']['mongos'])
self.sh.cleanup()
config = {'routers': [{}, {}, {}]}
self.sh = ShardedCluster(config)
routers = self.sh.routers
hostname = routers[1]['hostname']
_id = routers[1]['id']
# stop routers 0 and 2
Servers().command(routers[0]['id'], 'stop')
Servers().command(routers[2]['id'], 'stop')
router = self.sh.router
self.assertEqual(router['id'], _id)
self.assertEqual(router['hostname'], hostname)
self.sh.cleanup()
def test_router_add(self):
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.routers), 1)
self.sh.router_add({})
self.assertEqual(len(self.sh.routers), 2)
self.sh.router_add({})
self.assertEqual(len(self.sh.routers), 3)
self.sh.cleanup()
def test_router_command(self):
config = {'shards': [create_shard(), create_shard(1)]}
self.sh = ShardedCluster(config)
result = self.sh.router_command('listShards', is_eval=False)
self.assertEqual(result['ok'], 1)
self.sh.cleanup()
def test_member_add(self):
raise SkipTest("test is not currently working")
config = {}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.members), 0)
result = self.sh.member_add('test1', {})
self.assertTrue(result.get('isReplicaSet', False))
self.assertEqual(result['id'], 'test1')
self.assertEqual(len(self.sh.members), 1)
result = self.sh.member_add('test2', {'id': 'rs1', 'members': [{}, {}]})
self.assertFalse(result.get('isServer', False))
self.assertTrue(result.get('isReplicaSet', False))
self.assertEqual(result['id'], 'test2')
self.assertEqual(len(self.sh.members), 2)
self.sh.cleanup()
def test_member_info(self):
config = {'shards': [create_shard(), {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
self.sh = ShardedCluster(config)
info = self.sh.member_info('sh00')
self.assertEqual(info['id'], 'sh00')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
info = self.sh.member_info('sh-rs-01')
self.assertEqual(info['id'], 'sh-rs-01')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
self.sh.cleanup()
def test_member_info_with_auth(self):
config = {'auth_key': 'secret', 'login': 'admin', 'password': 'adminpass', 'shards': [create_shard(), {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
self.sh = ShardedCluster(config)
info = self.sh.member_info('sh00')
self.assertEqual(info['id'], 'sh00')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
info = self.sh.member_info('sh-rs-01')
self.assertEqual(info['id'], 'sh-rs-01')
self.assertTrue(info['isReplicaSet'])
self.assertTrue('_id' in info)
self.sh.cleanup()
def test_member_remove(self):
config = {'shards': [create_shard(1), create_shard(2), {'id': 'sh-rs-01', 'shardParams': {'id': 'rs1', 'members': [{}, {}]}}]}
self.sh = ShardedCluster(config)
self.assertEqual(len(self.sh.members), 3)
# remove member-host
result = self.sh.member_remove('sh01')
self.assertEqual(len(self.sh.members), 3)
self.assertEqual(result['state'], 'started')
self.assertEqual(result['shard'], 'sh01')
time.sleep(5)
result = self.sh.member_remove('sh01')
self.assertEqual(result['state'], 'completed')
self.assertEqual(len(self.sh.members), 2)
self.assertEqual(result['shard'], 'sh01')
# remove member-replicaset
result = self.sh.member_remove('sh-rs-01')
self.assertEqual(len(self.sh.members), 2)
self.assertEqual(result['state'], 'started')
self.assertEqual(result['shard'], 'sh-rs-01')
time.sleep(7)
result = self.sh.member_remove('sh-rs-01')
self.assertEqual(result['state'], 'completed')
self.assertEqual(len(self.sh.members), 1)
self.assertEqual(result['shard'], 'sh-rs-01')
self.sh.cleanup()
def test_info(self):
config = {
'configsvrs': [{}],
'routers': [{}, {}, {}],
'shards': [create_shard(1), create_shard(2)]
}
self.sh = ShardedCluster(config)
info = self.sh.info()
self.assertTrue('shards' in info)
self.assertTrue('configsvrs' in info)
self.assertTrue('routers' in info)
self.assertEqual(len(info['shards']), 2)
self.assertEqual(len(info['configsvrs']), 1)
self.assertEqual(len(info['routers']), 3)
self.sh.cleanup()
def test_tagging(self):
raise SkipTest("test is not currently working")
if SERVER_VERSION < (2, 2, 0):
raise SkipTest("mongodb v{version} doesn't support shard tagging"
.format(version='.'.join(map(str, SERVER_VERSION))))
tags = ['tag1', 'tag2']
tags_repl = ['replTag']
config = {
'configsvrs': [{}], 'routers': [{}],
'shards': [{'id': 'sh01', 'shardParams': {'tags': tags, 'members': [{}]}},
create_shard(2),
{'id': 'sh03', 'shardParams': {'tags': tags_repl, 'members': [{}, {}]}}
]
}
self.sh = ShardedCluster(config)
self.assertEqual(tags, self.sh.member_info('sh01')['tags'])
self.assertEqual([], self.sh.member_info('sh02')['tags'])
self.assertEqual(tags_repl, self.sh.member_info('sh03')['tags'])
self.sh.cleanup()
def test_reset(self):
raise SkipTest("test is not currently working")
all_hosts = []
# Start a ShardedCluster with 1 router and 1 config server.
self.sh = ShardedCluster({})
# Add 1 Server shard and 1 ReplicaSet shard.
server_id = self.sh.member_add(params={})['_id']
all_hosts.append(Servers().hostname(server_id))
repl_id = self.sh.member_add(params={'members': [{}, {}, {}]})['_id']
# Shut down the standalone.
Servers().command(server_id, 'stop')
# Shut down each member of the replica set.
server_ids = [m['server_id'] for m in ReplicaSets().members(repl_id)]
for s_id in server_ids:
Servers().command(s_id, 'stop')
all_hosts.append(Servers().hostname(s_id))
# Shut down config server and router.
config_id = self.sh.configsvrs[0]['id']
print("config_id=%r" % config_id)
if self.sh.uses_rs_configdb:
all_hosts.append(ReplicaSets().info(config_id)['mongodb_uri'])
for member in ReplicaSets().members(config_id):
Servers().command(member['server_id'], 'stop')
else:
all_hosts.append(Servers().hostname(config_id))
Servers().command(config_id, 'stop')
router_id = self.sh.routers[0]['id']
print("router_id=%r" % router_id)
all_hosts.append(Servers().hostname(router_id))
Servers().command(router_id, 'stop')
# Reset the ShardedCluster.
self.sh.reset()
# Everything is up.
for host in all_hosts:
# No ConnectionFailure/AutoReconnect.
pymongo.MongoClient(host)
def test_mongodb_auth_uri(self):
self.sh = ShardedCluster({
'login': 'luke', 'password': 'ekul',
'routers': [{}, {}],
'shards': [create_shard()]
})
self.assertIn('mongodb_auth_uri', self.sh.info())
auth_uri = self.sh.info()['mongodb_auth_uri']
hosts = ','.join(r['hostname'] for r in self.sh.routers)
self.assertIn(hosts, auth_uri)
self.assertIn('luke:ekul', auth_uri)
self.assertIn('authSource=admin', auth_uri)
def test_auth_key_without_login(self):
self.sh = ShardedCluster({
'auth_key': 'secret',
'routers': [{}],
'shards': [create_shard()]
})
self.assertIsNotNone(self.sh.key_file)
def test_require_api_version_auth(self):
self.sh = ShardedCluster({
'login': 'luke', 'password': 'ekul',
'routers': [{}],
'shards': [create_shard()],
"requireApiVersion": "1"
})
client = self.sh.connection()
server_params = client.admin.command("getParameter", "*")
client.test.test.insert_one({})
assert server_params['requireApiVersion'] is True
def test_require_api_version_noauth(self):
self.sh = ShardedCluster({
'login': 'luke', 'password': 'ekul',
'routers': [{}],
'shards': [create_shard()],
"requireApiVersion": "1"
})
client = self.sh.connection()
server_params = client.admin.command("getParameter", "*")
client.test.test.insert_one({})
assert server_params['requireApiVersion'] is True
|
class ShardTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_len(self):
pass
def test_sh_new(self):
pass
def test_sh_new_with_auth(self):
pass
def test_cleanup(self):
pass
def test_configsvrs(self):
pass
def test_routers(self):
pass
def test_members(self):
pass
def test_routers(self):
pass
def test_router_add(self):
pass
def test_router_command(self):
pass
def test_member_add(self):
pass
def test_member_info(self):
pass
def test_member_info_with_auth(self):
pass
def test_member_remove(self):
pass
def test_info(self):
pass
def test_tagging(self):
pass
def test_reset(self):
pass
def test_mongodb_auth_uri(self):
pass
def test_auth_key_without_login(self):
pass
def test_require_api_version_auth(self):
pass
def test_require_api_version_noauth(self):
pass
| 24 | 0 | 14 | 1 | 12 | 0 | 1 | 0.04 | 1 | 9 | 4 | 0 | 23 | 1 | 23 | 95 | 340 | 43 | 286 | 74 | 262 | 11 | 236 | 74 | 212 | 5 | 2 | 2 | 32 |
215 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_singleton.py
|
tests.test_singleton.SingletonTestCase
|
class SingletonTestCase(unittest.TestCase):
def test_singleton(self):
a = Singleton()
b = Singleton()
self.assertEqual(id(a), id(b))
c = Singleton()
self.assertEqual(id(c), id(b))
|
class SingletonTestCase(unittest.TestCase):
def test_singleton(self):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 73 | 8 | 1 | 7 | 5 | 5 | 0 | 7 | 5 | 5 | 1 | 2 | 0 | 1 |
216 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/__init__.py
|
tests.SSLTestCase
|
class SSLTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not SSL_ENABLED:
raise SkipTest("SSL is not enabled on this server.")
|
class SSLTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 2 | 0 | 1 | 1 | 0 | 3 | 0 | 0 | 1 | 73 | 6 | 1 | 5 | 3 | 2 | 0 | 4 | 2 | 2 | 2 | 2 | 1 | 2 |
217 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_servers.py
|
tests.test_servers.ServersTestCase
|
class ServersTestCase(unittest.TestCase):
def setUp(self):
PortPool().change_range()
self.path = tempfile.mktemp(prefix="test-storage")
self.servers = Servers()
self.servers.set_settings(*TEST_RELEASES)
def remove_path(self, path):
onerror = lambda func, filepath, exc_info: (os.chmod(filepath, stat.S_IWUSR), func(filepath))
if os.path.isfile(path):
try:
os.remove(path)
except OSError:
time.sleep(2)
onerror(os.remove, path, None)
def tearDown(self):
self.servers.cleanup()
self.remove_path(self.path)
def test_singleton(self):
self.assertEqual(id(self.servers), id(Servers()))
def test_bool(self):
self.assertEqual(False, bool(self.servers))
self.servers.create('mongod', {}, autostart=False)
self.assertTrue(True, bool(self.servers))
def test_operations(self):
server_id = self.servers.create('mongod', {}, autostart=False)
self.assertTrue(len(self.servers) == 1)
self.assertTrue(server_id in self.servers)
server_id2 = 'server-id2'
server2 = Server(os.path.join(os.environ.get('MONGOBIN', ''), 'mongod'), {})
server2.start(30)
server2_pid = server2.info()['procInfo']['pid']
self.servers[server_id2] = server2
self.assertTrue(self.servers[server_id2]['procInfo']['pid'] == server2_pid)
self.assertTrue(server_id2 in self.servers)
for h_id in self.servers:
self.assertTrue(h_id in (server_id, server_id2))
operator.delitem(self.servers, server_id2)
self.assertFalse(server_id2 in self.servers)
server2.stop()
server2.cleanup()
def test_cleanup(self):
self.servers.create('mongod', {}, autostart=False)
self.servers.create('mongod', {}, autostart=True)
self.assertTrue(len(self.servers) == 2)
self.servers.cleanup()
self.assertTrue(len(self.servers) == 0)
def test_new_server(self):
self.assertTrue(len(self.servers) == 0)
server_id = self.servers.create('mongod', {}, autostart=False)
info = self.servers.info(server_id)
self.assertTrue(len(self.servers) == 1)
self.assertNotIn('pid', info['procInfo'])
server_id2 = self.servers.create('mongod', {}, autostart=True)
info = self.servers.info(server_id2)
self.assertTrue(info['procInfo']['pid'] > 0)
self.assertRaises(mongo_orchestration.errors.TimeoutError, self.servers.create, 'fake_process_', {})
def test_new_server_with_auth(self):
server_id = self.servers.create('mongod', {}, login='adminko', password='password', autostart=True)
hostname = self.servers.hostname(server_id)
c = pymongo.MongoClient(hostname)
self.assertRaises(pymongo.errors.OperationFailure, c.admin.list_collection_names)
c.close()
c = pymongo.MongoClient(hostname, username='adminko', password='password')
self.assertTrue(c.admin.command('isMaster'))
self.assertTrue(isinstance(c.admin.list_collection_names(), list))
c.close()
def test_hdel(self):
self.assertEqual(len(self.servers), 0)
h_id = self.servers.create('mongod', {}, autostart=True)
self.assertEqual(len(self.servers), 1)
h_info = self.servers.info(h_id)['procInfo']
self.assertTrue(os.path.exists(h_info['params']['dbpath']))
self.assertTrue(os.path.exists(h_info['optfile']))
self.servers.remove(h_id)
self.assertEqual(len(self.servers), 0) # check length
# check cleanup
self.assertFalse(os.path.exists(h_info['params']['dbpath']))
self.assertFalse(os.path.exists(h_info['optfile']))
def test_hcommand(self):
h_id = self.servers.create('mongod', {}, autostart=False)
self.assertTrue(self.servers.command(h_id, 'start'))
self.assertTrue(self.servers.command(h_id, 'stop'))
self.assertTrue(self.servers.command(h_id, 'start'))
self.assertTrue(self.servers.command(h_id, 'restart'))
self.assertRaises(ValueError, self.servers.command, h_id, 'fake')
def test_hinfo(self):
h_id = self.servers.create('mongod', {}, autostart=False)
info = self.servers.info(h_id)
self.assertEqual(info['id'], h_id)
self.assertNotIn('pid', info['procInfo'])
self.assertEqual(info['statuses'], {})
self.assertEqual(info['serverInfo'], {})
def test_host_to_server_id(self):
h_id = self.servers.create('mongod', {}, autostart=True)
h_uri = self.servers.hostname(h_id)
h2_id = self.servers.create('mongod', {}, autostart=True)
h2_uri = self.servers.hostname(h2_id)
self.assertEqual(self.servers.host_to_server_id(h_uri), h_id)
self.assertEqual(self.servers.host_to_server_id(h2_uri), h2_id)
def test_hostname(self):
h_id = self.servers.create('mongod', {}, autostart=True)
h_uri = self.servers.hostname(h_id)
self.assertEqual(self.servers.hostname(h_id), h_uri)
def test_is_alive(self):
h_id = self.servers.create('mongod', {}, autostart=True)
self.assertEqual(self.servers.is_alive(h_id), True)
self.servers.command(h_id, 'stop')
self.assertEqual(self.servers.is_alive(h_id), False)
def test_db_command(self):
h_id = self.servers.create('mongod', {}, autostart=False)
self.assertRaises(pymongo.errors.PyMongoError, self.servers.db_command, h_id, 'serverStatus', None, False)
self.servers.command(h_id, 'start', 10)
self.assertEqual(self.servers.db_command(h_id, 'serverStatus', arg=None, is_eval=False).get('ok', -1), 1)
def test_require_api_version(self):
h_id = self.servers.create('mongod', {}, require_api_version="1", autostart=True)
self.assertEqual(self.servers.is_alive(h_id), True)
def test_id_specified(self):
id = 'xyzzy'
h_id = self.servers.create('mongod', {}, autostart=False, server_id=id)
self.assertEqual(id, h_id)
def test_majority_read_concern(self):
Server.enable_majority_read_concern = True
server_id = self.servers.create('mongod', {})
try:
opts = self.servers.db_command(server_id, 'getCmdLineOpts')
majority_rc_enabled = (opts
.get('parsed', {})
.get('replication', {})
.get('enableMajorityReadConcern'))
if SERVER_VERSION >= (3, 2) and SERVER_VERSION < (5, 0):
self.assertTrue(majority_rc_enabled)
else:
self.assertFalse(majority_rc_enabled)
finally:
Server.enable_majority_read_concern = False
|
class ServersTestCase(unittest.TestCase):
def setUp(self):
pass
def remove_path(self, path):
pass
def tearDown(self):
pass
def test_singleton(self):
pass
def test_bool(self):
pass
def test_operations(self):
pass
def test_cleanup(self):
pass
def test_new_server(self):
pass
def test_new_server_with_auth(self):
pass
def test_hdel(self):
pass
def test_hcommand(self):
pass
def test_hinfo(self):
pass
def test_host_to_server_id(self):
pass
def test_hostname(self):
pass
def test_is_alive(self):
pass
def test_db_command(self):
pass
def test_require_api_version(self):
pass
def test_id_specified(self):
pass
def test_majority_read_concern(self):
pass
| 20 | 0 | 7 | 0 | 7 | 0 | 1 | 0.01 | 1 | 8 | 4 | 0 | 19 | 2 | 19 | 91 | 155 | 20 | 134 | 52 | 114 | 2 | 129 | 52 | 109 | 3 | 2 | 2 | 23 |
218 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/sharded_clusters.py
|
mongo_orchestration.sharded_clusters.ShardedClusters
|
class ShardedClusters(Singleton, Container):
""" ShardedClusters is a dict-like collection for ShardedCluster objects"""
_name = 'shards'
_obj_type = ShardedCluster
releases = {}
pids_file = tempfile.mktemp(prefix="mongo-")
def set_settings(self, releases=None, default_release=None):
"""set path to storage"""
super(ShardedClusters, self).set_settings(releases, default_release)
ReplicaSets().set_settings(releases, default_release)
def __getitem__(self, key):
return self.info(key)
def cleanup(self):
"""remove all servers with their data"""
for server in self:
self.remove(server)
def create(self, params):
"""create new ShardedCluster
Args:
params - dictionary with specific params for instance
Return cluster_id
where cluster_id - id which can use to take the cluster from servers collection
"""
sh_id = params.get('id', str(uuid4()))
if sh_id in self:
raise ShardedClusterError(
"Sharded cluster with id %s already exists." % sh_id)
params['id'] = sh_id
cluster = ShardedCluster(params)
self[cluster.id] = cluster
return cluster.id
def remove(self, cluster_id):
"""remove cluster and data stuff
Args:
cluster_id - cluster identity
"""
cluster = self._storage.pop(cluster_id)
cluster.cleanup()
def info(self, cluster_id):
"""return dictionary object with info about cluster
Args:
cluster_id - cluster identity
"""
return self._storage[cluster_id].info()
def configsvrs(self, cluster_id):
"""return list of config servers"""
return self._storage[cluster_id].configsvrs
def routers(self, cluster_id):
"""return list of routers"""
return self._storage[cluster_id].routers
def router_add(self, cluster_id, params):
"""add new router"""
cluster = self._storage[cluster_id]
result = cluster.router_add(params)
self._storage[cluster_id] = cluster
return result
def router_del(self, cluster_id, router_id):
"""remove router from the ShardedCluster"""
cluster = self._storage[cluster_id]
result = cluster.router_remove(router_id)
self._storage[cluster_id] = cluster
return result
def members(self, cluster_id):
"""return list of members"""
return self._storage[cluster_id].members
def member_info(self, cluster_id, member_id):
"""return info about member"""
cluster = self._storage[cluster_id]
return cluster.member_info(member_id)
def command(self, cluster_id, command, *args):
"""Call a ShardedCluster method."""
cluster = self._storage[cluster_id]
try:
return getattr(cluster, command)(*args)
except AttributeError:
raise ValueError("Cannot issue the command %r to ShardedCluster %s"
% (command, cluster_id))
def member_del(self, cluster_id, member_id):
"""remove member from cluster cluster"""
cluster = self._storage[cluster_id]
result = cluster.member_remove(member_id)
self._storage[cluster_id] = cluster
return result
def member_add(self, cluster_id, params):
"""add new member into configuration"""
cluster = self._storage[cluster_id]
result = cluster.member_add(params.get('id', None), params.get('shardParams', {}))
self._storage[cluster_id] = cluster
return result
|
class ShardedClusters(Singleton, Container):
''' ShardedClusters is a dict-like collection for ShardedCluster objects'''
def set_settings(self, releases=None, default_release=None):
'''set path to storage'''
pass
def __getitem__(self, key):
pass
def cleanup(self):
'''remove all servers with their data'''
pass
def create(self, params):
'''create new ShardedCluster
Args:
params - dictionary with specific params for instance
Return cluster_id
where cluster_id - id which can use to take the cluster from servers collection
'''
pass
def remove(self, cluster_id):
'''remove cluster and data stuff
Args:
cluster_id - cluster identity
'''
pass
def info(self, cluster_id):
'''return dictionary object with info about cluster
Args:
cluster_id - cluster identity
'''
pass
def configsvrs(self, cluster_id):
'''return list of config servers'''
pass
def routers(self, cluster_id):
'''return list of routers'''
pass
def router_add(self, cluster_id, params):
'''add new router'''
pass
def router_del(self, cluster_id, router_id):
'''remove router from the ShardedCluster'''
pass
def members(self, cluster_id):
'''return list of members'''
pass
def member_info(self, cluster_id, member_id):
'''return info about member'''
pass
def command(self, cluster_id, command, *args):
'''Call a ShardedCluster method.'''
pass
def member_del(self, cluster_id, member_id):
'''remove member from cluster cluster'''
pass
def member_add(self, cluster_id, params):
'''add new member into configuration'''
pass
| 16 | 15 | 6 | 0 | 4 | 2 | 1 | 0.41 | 2 | 7 | 3 | 0 | 15 | 0 | 15 | 31 | 104 | 15 | 63 | 34 | 47 | 26 | 61 | 34 | 45 | 2 | 2 | 1 | 18 |
219 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/singleton.py
|
mongo_orchestration.singleton.Singleton
|
class Singleton(object):
_instances = {}
def __new__(class_, *args, **kwargs):
if class_ not in class_._instances:
class_._instances[class_] = super(Singleton, class_).__new__(class_, *args, **kwargs)
return class_._instances[class_]
|
class Singleton(object):
def __new__(class_, *args, **kwargs):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 2 | 0 | 1 | 1 | 0 | 4 | 1 | 0 | 1 | 1 | 7 | 1 | 6 | 3 | 4 | 0 | 6 | 3 | 4 | 2 | 1 | 1 | 2 |
220 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/common.py
|
mongo_orchestration.common.BaseModel
|
class BaseModel(object):
"""Base object for Server, ReplicaSet, and ShardedCluster."""
_user_role_documents = [
{'role': 'userAdminAnyDatabase', 'db': 'admin'},
{'role': 'clusterAdmin', 'db': 'admin'},
{'role': 'dbAdminAnyDatabase', 'db': 'admin'},
{'role': 'readWriteAnyDatabase', 'db': 'admin'},
{'role': 'restore', 'db': 'admin'},
{'role': 'backup', 'db': 'admin'}
]
socket_timeout = DEFAULT_SOCKET_TIMEOUT
@property
def key_file(self):
"""Get the path to the key file containig our auth key, or None."""
if self.auth_key:
key_file_path = os.path.join(orchestration_mkdtemp(), 'key')
with open(key_file_path, 'w') as fd:
fd.write(self.auth_key)
os.chmod(key_file_path, stat.S_IRUSR)
return key_file_path
def _strip_auth(self, proc_params):
"""Remove options from parameters that cause auth to be enabled."""
params = proc_params.copy()
params.pop("auth", None)
params.pop("clusterAuthMode", None)
return params
def mongodb_auth_uri(self, hosts):
"""Get a connection string with all info necessary to authenticate."""
parts = ['mongodb://']
if self.login:
parts.append(self.login)
if self.password:
parts.append(':' + self.password)
parts.append('@')
parts.append(hosts + '/')
if self.login:
parts.append('?authSource=' + self.auth_source)
if self.x509_extra_user:
parts.append('&authMechanism=MONGODB-X509')
return ''.join(parts)
def _get_server_version(self, client):
return tuple(client.admin.command('buildinfo')['versionArray'])
def _user_roles(self, client):
server_version_tuple = self._get_server_version(client)
if server_version_tuple < (2, 6):
# MongoDB 2.4 roles are an array of strs like ['clusterAdmin', ...].
return [role['role'] for role in self._user_role_documents]
return self._user_role_documents
def _add_users(self, db, mongo_version):
"""Add given user, and extra x509 user if necessary."""
roles = self._user_roles(db.client)
if self.x509_extra_user:
db.command('createUser', DEFAULT_SUBJECT, roles=roles,
writeConcern=db.write_concern.document)
# Fix kwargs to MongoClient.
self.kwargs['tlsCertificateKeyFile'] = DEFAULT_CLIENT_CERT
# Add secondary user given from request.
create_user(db, mongo_version, self.login, self.password, roles)
|
class BaseModel(object):
'''Base object for Server, ReplicaSet, and ShardedCluster.'''
@property
def key_file(self):
'''Get the path to the key file containig our auth key, or None.'''
pass
def _strip_auth(self, proc_params):
'''Remove options from parameters that cause auth to be enabled.'''
pass
def mongodb_auth_uri(self, hosts):
'''Get a connection string with all info necessary to authenticate.'''
pass
def _get_server_version(self, client):
pass
def _user_roles(self, client):
pass
def _add_users(self, db, mongo_version):
'''Add given user, and extra x509 user if necessary.'''
pass
| 8 | 5 | 8 | 0 | 7 | 1 | 2 | 0.16 | 1 | 1 | 0 | 3 | 6 | 0 | 6 | 6 | 66 | 8 | 50 | 16 | 42 | 8 | 41 | 14 | 34 | 5 | 1 | 2 | 13 |
221 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/daemon.py
|
mongo_orchestration.daemon.Daemon
|
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
source: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
"""
def __init__(self, pidfile,
stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL,
timeout=0):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.timeout = timeout # sleep before exit from parent
def daemonize(self):
if os.name == 'nt':
return self.daemonize_win32()
else:
return self.daemonize_posix()
def daemonize_win32(self):
logger.info('daemonize_win32: %r' % (sys.argv, ))
DETACHED_PROCESS = 0x00000008
pid = subprocess.Popen(sys.argv + ["--no-fork"],
creationflags=DETACHED_PROCESS, shell=True,
stderr=sys.stderr, stdout=sys.stdout).pid
try:
with open(self.pidfile, 'w+') as fd:
fd.write("%s\n" % pid)
except:
logger.exception('write pidfile %r' % self.pidfile)
raise
return pid
def daemonize_posix(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
logger.info('daemonize_posix')
try:
pid = os.fork()
if pid > 0:
logger.debug('forked first child, pid = %d' % (pid,))
return pid
logger.debug('in child after first fork, pid = %d' % (pid, ))
except OSError as error:
logger.exception('fork #1')
sys.stderr.write("fork #1 failed: %d (%s)\n" % (error.errno, error.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
logger.debug('forked second child, pid = %d, exiting' % (pid,))
sys.exit(0)
except OSError as error:
logger.exception('fork #2')
sys.stderr.write("fork #2 failed: %d (%s)\n" % (error.errno, error.strerror))
sys.exit(1)
# redirect standard file descriptors
logger.info('daemonized, pid = %d' % (pid, ))
sys.stdin.flush()
sys.stdout.flush()
sys.stderr.flush()
os.dup2(self.stdin.fileno(), sys.stdin.fileno())
os.dup2(self.stdout.fileno(), sys.stdout.fileno())
os.dup2(self.stderr.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
with open(self.pidfile, 'w+') as fd:
fd.write("%s\n" % pid)
def delpid(self):
"""remove pidfile"""
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
logger.info('Starting daemon')
try:
with open(self.pidfile, 'r') as fd:
pid = int(fd.read().strip())
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
pid = self.daemonize()
if pid:
return pid
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
logger.debug("reading %s" % (self.pidfile,))
try:
with open(self.pidfile, 'r') as fd:
pid = int(fd.read().strip())
except IOError:
logger.exception("reading %s" % (self.pidfile, ))
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
if os.name == "nt":
subprocess.call(["taskkill", "/f", "/t", "/pid", str(pid)])
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
# Try killing the daemon process
try:
os.kill(pid, SIGTERM)
while is_unix_process_running(pid):
time.sleep(0.25)
except OSError as err:
if err.errno == errno.ESRCH:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
raise
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
|
class Daemon(object):
'''
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
source: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
'''
def __init__(self, pidfile,
stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL,
timeout=0):
pass
def daemonize(self):
pass
def daemonize_win32(self):
pass
def daemonize_posix(self):
'''
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
'''
pass
def delpid(self):
'''remove pidfile'''
pass
def start(self):
'''
Start the daemon
'''
pass
def stop(self):
'''
Stop the daemon
'''
pass
def restart(self):
'''
Restart the daemon
'''
pass
def run(self):
'''
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
'''
pass
| 10 | 7 | 17 | 1 | 12 | 4 | 3 | 0.35 | 1 | 4 | 0 | 1 | 9 | 5 | 9 | 9 | 165 | 22 | 110 | 30 | 98 | 39 | 103 | 22 | 93 | 9 | 1 | 4 | 26 |
222 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/errors.py
|
mongo_orchestration.errors.OperationFailure
|
class OperationFailure(MongoOrchestrationError):
"""Raised when an operation fails."""
def __init__(self, error, code=None):
self.code = code # pragma: no cover
MongoOrchestrationError.__init__(self, error)
|
class OperationFailure(MongoOrchestrationError):
'''Raised when an operation fails.'''
def __init__(self, error, code=None):
pass
| 2 | 1 | 3 | 0 | 3 | 2 | 1 | 0.75 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 11 | 6 | 1 | 4 | 3 | 2 | 3 | 4 | 3 | 2 | 1 | 4 | 0 | 1 |
223 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/errors.py
|
mongo_orchestration.errors.RequestError
|
class RequestError(MongoOrchestrationError):
"""Raised when a bad request is made to the web interface."""
|
class RequestError(MongoOrchestrationError):
'''Raised when a bad request is made to the web interface.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 4 | 0 | 0 |
224 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/errors.py
|
mongo_orchestration.errors.TimeoutError
|
class TimeoutError(OperationFailure):
"""Raised when an operation times out."""
|
class TimeoutError(OperationFailure):
'''Raised when an operation times out.'''
pass
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 2 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 5 | 0 | 0 |
225 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/launch.py
|
mongo_orchestration.launch.ReplicaSet
|
class ReplicaSet(MCTestObject):
_resource = 'replica_sets'
def __init__(self, id=None, uri=None, primary=None, secondary=None,
single=False, **kwargs):
self.single = single
self.id = id
self.uri = uri
self.primary = primary
self.secondary = secondary
self._proc_params = kwargs
self.members = []
def proc_params(self):
params = super(ReplicaSet, self).proc_params()
# params.setdefault('setParameter', {}).setdefault('transactionLifetimeLimitSeconds', 3)
# params.setdefault('setParameter', {}).setdefault('periodicNoopIntervalSecs', 1)
return params
def get_config(self):
members = [{'procParams': self.proc_params()}]
if not self.single:
members.extend([
{'procParams': self.proc_params()},
{#'rsParams': {'arbiterOnly': True},
'procParams': self.proc_params()}
])
return {'members': members}
def _init_from_response(self, response):
self.id = response['id']
self.uri = response.get('mongodb_auth_uri', response['mongodb_uri'])
for member in response['members']:
m = Server(member['server_id'], member['host'])
self.members.append(m)
if member['state'] == 1:
self.primary = m
elif member['state'] == 2:
self.secondary = m
return self
def start(self):
# We never need to restart a replica set, only start new ones.
return self._init_from_response(self._make_post_request())
def restart_primary(self):
self.primary.stop(destroy=False)
time.sleep(5)
self.primary.start()
time.sleep(1)
self._init_from_response(self._make_get_request())
print('New primary: %s' % self.primary.uri)
|
class ReplicaSet(MCTestObject):
def __init__(self, id=None, uri=None, primary=None, secondary=None,
single=False, **kwargs):
pass
def proc_params(self):
pass
def get_config(self):
pass
def _init_from_response(self, response):
pass
def start(self):
pass
def restart_primary(self):
pass
| 7 | 0 | 7 | 0 | 7 | 1 | 2 | 0.09 | 1 | 2 | 1 | 1 | 6 | 7 | 6 | 12 | 53 | 7 | 43 | 20 | 35 | 4 | 37 | 19 | 30 | 4 | 2 | 2 | 10 |
226 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/launch.py
|
mongo_orchestration.launch.ReplicaSetSingle
|
class ReplicaSetSingle(ReplicaSet):
def get_config(self):
return {
'members': [
{'procParams': self.proc_params()}
]
}
|
class ReplicaSetSingle(ReplicaSet):
def get_config(self):
pass
| 2 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 13 | 8 | 1 | 7 | 2 | 5 | 0 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
227 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/tests/test_servers.py
|
tests.test_servers.ServerSSLTestCase
|
class ServerSSLTestCase(SSLTestCase):
def setUp(self):
self.mongod_name = os.path.join(
os.environ.get('MONGOBIN', ''), 'mongod')
def tearDown(self):
if hasattr(self, 'server'):
self.server.stop()
self.server.cleanup()
def test_ssl_auth(self):
if SERVER_VERSION < (2, 4):
raise SkipTest("Need to be able to set 'authenticationMechanisms' "
"parameter to test.")
proc_params = {
'setParameter': {
'authenticationMechanisms': 'MONGODB-X509'
}
}
ssl_params = {
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsCAFile': certificate('ca.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
# Should not raise an Exception.
self.server = Server(
self.mongod_name, proc_params, ssl_params,
login=TEST_SUBJECT, auth_source='$external')
self.server.start()
# Should create an extra user. Doesn't raise.
client = pymongo.MongoClient(
self.server.hostname, tlsCertificateKeyFile=DEFAULT_CLIENT_CERT,
tlsAllowInvalidCertificates=True)
client['$external'].authenticate(
DEFAULT_SUBJECT, mechanism='MONGODB-X509')
# Should also create the user we requested. Doesn't raise.
client = pymongo.MongoClient(
self.server.hostname, tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True)
client['$external'].authenticate(
TEST_SUBJECT, mechanism='MONGODB-X509')
def test_scram_with_ssl(self):
ssl_params = {
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsCAFile': certificate('ca.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
# Should not raise an Exception.
self.server = Server(
self.mongod_name, {}, ssl_params, login='luke', password='ekul')
self.server.start()
# Should create the user we requested. No raise on authenticate.
client = pymongo.MongoClient(
self.server.hostname, tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True)
client.admin.authenticate('luke', 'ekul')
# This should be the only user.
self.assertEqual(len(client.admin.command('usersInfo')['users']), 1)
self.assertFalse(client['$external'].command('usersInfo')['users'])
def test_ssl(self):
ssl_params = {
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsCAFile': certificate('ca.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
# Should not raise an Exception.
self.server = Server(self.mongod_name, {}, ssl_params)
self.server.start()
# Server should require SSL.
with self.assertRaises(pymongo.errors.ConnectionFailure):
connected(pymongo.MongoClient(self.server.hostname))
# Doesn't raise with certificate provided.
connected(pymongo.MongoClient(
self.server.hostname, tlsCertificateKeyFile=certificate('client.pem'),
tlsAllowInvalidCertificates=True))
def test_mongodb_auth_uri(self):
if SERVER_VERSION < (2, 4):
raise SkipTest("Need to be able to set 'authenticationMechanisms' "
"parameter to test.")
proc_params = {
'setParameter': {
'authenticationMechanisms': 'MONGODB-X509'
}
}
ssl_params = {
'tlsCertificateKeyFile': certificate('server.pem'),
'tlsCAFile': certificate('ca.pem'),
'tlsMode': 'requireTLS',
'tlsAllowInvalidCertificates': True
}
self.server = Server(
self.mongod_name, proc_params, ssl_params,
login=TEST_SUBJECT, auth_source='$external')
self.server.start()
self.assertIn('mongodb_auth_uri', self.server.info())
auth_uri = self.server.info()['mongodb_auth_uri']
self.assertIn(self.server.hostname, auth_uri)
self.assertIn(TEST_SUBJECT, auth_uri)
self.assertIn('authSource=$external', auth_uri)
self.assertIn('authMechanism=MONGODB-X509', auth_uri)
|
class ServerSSLTestCase(SSLTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_ssl_auth(self):
pass
def test_scram_with_ssl(self):
pass
def test_ssl_auth(self):
pass
def test_mongodb_auth_uri(self):
pass
| 7 | 0 | 17 | 1 | 15 | 2 | 2 | 0.1 | 1 | 2 | 1 | 0 | 6 | 2 | 6 | 79 | 110 | 9 | 92 | 18 | 85 | 9 | 46 | 18 | 39 | 2 | 3 | 1 | 9 |
228 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/launch.py
|
mongo_orchestration.launch.ShardedCluster
|
class ShardedCluster(MCTestObject):
_resource = 'sharded_clusters'
_shard_type = ReplicaSet
def __init__(self, **kwargs):
self.id = None
self.uri = None
self.shards = []
self._proc_params = kwargs
def get_config(self):
return {
# 'configsvrs': [{'members': [DEFAULT_OPTIONS.copy()]}],
'routers': [self.proc_params(), self.proc_params()],
'shards': [
{'id': 'demo-set-0', 'shardParams':
self._shard_type().get_config()},
# {'id': 'demo-set-1', 'shardParams':
# self._shard_type().get_config()}
]
}
def start(self):
# We never need to restart a sharded cluster, only start new ones.
response = self._make_post_request()
for shard in response['shards']:
shard_resp = requests.get(_mo_url('replica_sets', shard['_id']))
shard_json = shard_resp.json()
self.shards.append(self._shard_type()._init_from_response(shard_json))
self.id = response['id']
self.uri = response.get('mongodb_auth_uri', response['mongodb_uri'])
return self
|
class ShardedCluster(MCTestObject):
def __init__(self, **kwargs):
pass
def get_config(self):
pass
def start(self):
pass
| 4 | 0 | 9 | 0 | 7 | 1 | 1 | 0.16 | 1 | 0 | 0 | 1 | 3 | 4 | 3 | 9 | 33 | 4 | 25 | 14 | 21 | 4 | 19 | 14 | 15 | 2 | 2 | 1 | 4 |
229 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/process.py
|
mongo_orchestration.process.PortPool
|
class PortPool(Singleton):
__ports = set()
__closed = set()
__id = None
def __init__(self, min_port=1025, max_port=2000, port_sequence=None):
"""
Args:
min_port - min port number (ignoring if 'port_sequence' is not None)
max_port - max port number (ignoring if 'port_sequence' is not None)
port_sequence - iterate sequence which contains numbers of ports
"""
if not self.__id: # singleton checker
self.__id = id(self)
self.__init_range(min_port, max_port, port_sequence)
def __init_range(self, min_port=1025, max_port=2000, port_sequence=None):
if port_sequence:
self.__ports = set(port_sequence)
else:
self.__ports = set(range(min_port, max_port + 1))
self.__closed = set()
self.refresh()
def __check_port(self, port):
"""check port status
return True if port is free, False else
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((DEFAULT_BIND, port))
return True
except socket.error:
return False
finally:
s.close()
def release_port(self, port):
"""release port"""
if port in self.__closed:
self.__closed.remove(port)
self.__ports.add(port)
def port(self, check=False):
"""return next opened port
Args:
check - check is port realy free
"""
if not self.__ports: # refresh ports if sequence is empty
self.refresh()
try:
port = self.__ports.pop()
if check:
while not self.__check_port(port):
self.release_port(port)
port = self.__ports.pop()
except (IndexError, KeyError):
raise IndexError("Could not find a free port,\nclosed ports: {closed}".format(closed=self.__closed))
self.__closed.add(port)
return port
def refresh(self, only_closed=False):
"""refresh ports status
Args:
only_closed - check status only for closed ports
"""
if only_closed:
opened = filter(self.__check_port, self.__closed)
self.__closed = self.__closed.difference(opened)
self.__ports = self.__ports.union(opened)
else:
ports = self.__closed.union(self.__ports)
self.__ports = set(filter(self.__check_port, ports))
self.__closed = ports.difference(self.__ports)
def change_range(self, min_port=1025, max_port=2000, port_sequence=None):
"""change Pool port range"""
self.__init_range(min_port, max_port, port_sequence)
|
class PortPool(Singleton):
def __init__(self, min_port=1025, max_port=2000, port_sequence=None):
'''
Args:
min_port - min port number (ignoring if 'port_sequence' is not None)
max_port - max port number (ignoring if 'port_sequence' is not None)
port_sequence - iterate sequence which contains numbers of ports
'''
pass
def __init_range(self, min_port=1025, max_port=2000, port_sequence=None):
pass
def __check_port(self, port):
'''check port status
return True if port is free, False else
'''
pass
def release_port(self, port):
'''release port'''
pass
def port(self, check=False):
'''return next opened port
Args:
check - check is port realy free
'''
pass
def refresh(self, only_closed=False):
'''refresh ports status
Args:
only_closed - check status only for closed ports
'''
pass
def change_range(self, min_port=1025, max_port=2000, port_sequence=None):
'''change Pool port range'''
pass
| 8 | 6 | 10 | 0 | 7 | 3 | 2 | 0.4 | 1 | 6 | 0 | 0 | 7 | 0 | 7 | 8 | 80 | 9 | 52 | 15 | 44 | 21 | 49 | 15 | 41 | 5 | 2 | 3 | 16 |
230 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/replica_sets.py
|
mongo_orchestration.replica_sets.ReplicaSet
|
class ReplicaSet(BaseModel):
"""class represents ReplicaSet"""
_servers = Servers() # singleton to manage servers instances
# replica set's default parameters
default_params = {'arbiterOnly': False, 'hidden': False, 'slaveDelay': 0}
def __init__(self, rs_params):
"""create replica set according members config
Args:
rs_params - replica set configuration
"""
self.server_map = {}
self.auth_key = rs_params.get('auth_key', None)
self.login = rs_params.get('login', '')
self.auth_source = rs_params.get('authSource', 'admin')
self.password = rs_params.get('password', '')
self.admin_added = False
self.repl_id = rs_params.get('id', None) or str(uuid4())
self._version = rs_params.get('version')
self._require_api_version = rs_params.get('requireApiVersion', '')
if self._require_api_version:
raise RuntimeError("requireApiVersion is not supported for replica sets, see SERVER-97010")
self.sslParams = rs_params.get('sslParams', {})
self.kwargs = {}
self.restart_required = self.login or self.auth_key
self.x509_extra_user = False
if self.sslParams:
self.kwargs.update(DEFAULT_SSL_OPTIONS)
members = rs_params.get('members', [])
self._members = members
# Enable ipv6 on all members if any have it enabled.
self.enable_ipv6 = ipv6_enabled_repl(rs_params)
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(self.member_create, member, i)
for i, member in enumerate(members)]
config_members = [f.result() for f in futures]
config = {"_id": self.repl_id, "members": config_members}
if 'rsSettings' in rs_params:
config['settings'] = rs_params['rsSettings']
# Explicitly set write concern to number of data-bearing members.
# If we add a user later, we need to guarantee that every node
# has the user before we authenticate ('majority' is insufficient).
self._write_concern = len(
[m for m in members
if not m.get('rsParams', {}).get('arbiterOnly')]
)
logger.debug("replica config: {config}".format(**locals()))
if not self.repl_init(config):
self.cleanup()
raise ReplicaSetError("Could not create replica set.")
if not self.waiting_config_state():
raise ReplicaSetError(
"Could not actualize replica set configuration.")
if self.login:
# If the only authentication mechanism enabled is MONGODB-X509,
# we'll need to add our own user using SSL certificates we already
# have. Otherwise, the user of MO would have to copy their own
# certificates to wherever MO happens to be running so that MO
# might authenticate.
for member in members:
proc_params = member.get('procParams', {})
set_params = proc_params.get('setParameter', {})
auth_mechs = set_params.get('authenticationMechanisms', '')
auth_mechs = auth_mechs.split(',')
if len(auth_mechs) == 1 and auth_mechs[0] == 'MONGODB-X509':
self.x509_extra_user = True
break
if config["members"]:
server_id = self._servers.host_to_server_id(
self.member_id_to_host(0))
version = self._servers.version(server_id)
else:
version = (2, 4, 0)
self._add_users(self.connection()[self.auth_source], version)
if self.restart_required:
self.restart_with_auth()
if not self.waiting_member_state() and self.waiting_config_state():
raise ReplicaSetError(
"Could not actualize replica set configuration.")
for i in range(100):
if self.connection().primary:
break
time.sleep(0.1)
else:
raise ReplicaSetError("No primary was ever elected.")
def restart_with_auth(self, cluster_auth_mode=None):
for server in self.server_instances():
server.restart_required = False
self.restart_required = False
for idx, member in enumerate(self._members):
server_id = self._servers.host_to_server_id(
self.member_id_to_host(idx))
server = self._servers._storage[server_id]
# If this is an arbiter, we can't authenticate as the user,
# so don't set the login/password.
if not member.get('rsParams', {}).get('arbiterOnly'):
server.x509_extra_user = self.x509_extra_user
server.auth_source = self.auth_source
server.ssl_params = self.sslParams
server.login = self.login
server.password = self.password
server.auth_key = self.auth_key
def add_auth(config):
if self.auth_key:
config['keyFile'] = self.key_file
# Add clusterAuthMode back in.
if cluster_auth_mode:
config['clusterAuthMode'] = cluster_auth_mode
return config
# Restart all the servers with auth flags and ssl.
self.restart(config_callback=add_auth)
def __len__(self):
return len(self.server_map)
def cleanup(self):
"""remove all members without reconfig"""
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(self.member_del, item, reconfig=False)
for item in self.server_map]
for f in futures:
f.result()
self.server_map.clear()
def member_id_to_host(self, member_id):
"""return hostname by member id"""
return self.server_map[member_id]
def host2id(self, hostname):
"""return member id by hostname"""
for key, value in self.server_map.items():
if value == hostname:
return key
def update_server_map(self, config):
"""update server_map ({member_id:hostname})"""
self.server_map = dict([(member['_id'], member['host']) for member in config['members']])
def repl_init(self, config):
"""create replica set by config
return True if replica set created successfuly, else False"""
self.update_server_map(config)
# init_server - server which can init replica set
init_server = [member['host'] for member in config['members']
if not (member.get('arbiterOnly', False)
or member.get('priority', 1) == 0)][0]
servers = [member['host'] for member in config['members']]
if not self.wait_while_reachable(servers):
logger.error("all servers must be reachable")
self.cleanup()
return False
try:
result = self.connection(init_server).admin.command("replSetInitiate", config)
logger.debug("replica init result: {result}".format(**locals()))
except pymongo.errors.PyMongoError:
raise
if int(result.get('ok', 0)) == 1:
# Wait while members come up
return self.waiting_member_state()
else:
self.cleanup()
return False
def reset(self):
"""Ensure all members are running and available."""
# Need to use self.server_map, in case no Servers are left running.
for member_id in self.server_map:
host = self.member_id_to_host(member_id)
server_id = self._servers.host_to_server_id(host)
# Reset each member.
self._servers.command(server_id, 'reset')
# Wait for all members to have a state of 1, 2, or 7.
# Note that this also waits for a primary to become available.
self.waiting_member_state()
# Wait for Server states to match the config from the primary.
self.waiting_config_state()
return self.info()
def repl_update(self, config):
"""Reconfig Replicaset with new config"""
cfg = config.copy()
cfg['version'] += 1
try:
result = self.run_command("replSetReconfig", cfg)
if int(result.get('ok', 0)) != 1:
return False
except pymongo.errors.AutoReconnect:
self.update_server_map(cfg) # use new server_map
self.waiting_member_state()
self.waiting_config_state()
return self.connection() and True
def info(self):
"""return information about replica set"""
hosts = ','.join(x['host'] for x in self.members())
mongodb_uri = 'mongodb://' + hosts + '/?replicaSet=' + self.repl_id
result = {"id": self.repl_id,
"auth_key": self.auth_key,
"members": self.members(),
"mongodb_uri": mongodb_uri,
"orchestration": 'replica_sets'}
if self.login:
# Add replicaSet URI parameter.
uri = ('%s&replicaSet=%s'
% (self.mongodb_auth_uri(hosts), self.repl_id))
result['mongodb_auth_uri'] = uri
return result
def repl_member_add(self, params):
"""create new mongod instances and add it to the replica set.
Args:
params - mongod params
return True if operation success otherwise False
"""
repl_config = self.config
member_id = max([member['_id'] for member in repl_config['members']]) + 1
member_config = self.member_create(params, member_id)
repl_config['members'].append(member_config)
if not self.repl_update(repl_config):
self.member_del(member_id, reconfig=True)
raise ReplicaSetError("Could not add member to ReplicaSet.")
return member_id
def run_command(self, command, arg=None, is_eval=False, member_id=None):
"""run command on replica set
if member_id is specified command will be execute on this server
if member_id is not specified command will be execute on the primary
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
member_id - member id
return command's result
"""
logger.debug("run_command({command}, {arg}, {is_eval}, {member_id})".format(**locals()))
mode = is_eval and 'eval' or 'command'
hostname = None
if isinstance(member_id, int):
hostname = self.member_id_to_host(member_id)
result = getattr(self.connection(hostname=hostname).admin, mode)(command, arg)
logger.debug("command result: {result}".format(result=result))
return result
@property
def config(self):
"""return replica set config, use rs.conf() command"""
try:
admin = self.connection().admin
config = admin.command('replSetGetConfig')['config']
except pymongo.errors.OperationFailure:
# replSetGetConfig was introduced in 2.7.5.
config = self.connection().local.system.replset.find_one()
return config
def member_create(self, params, member_id):
"""start new mongod instances as part of replica set
Args:
params - member params
member_id - member index
return member config
"""
member_config = params.get('rsParams', {})
server_id = params.pop('server_id', None)
version = params.pop('version', self._version)
proc_params = {'replSet': self.repl_id}
proc_params.update(params.get('procParams', {}))
if self.enable_ipv6:
enable_ipv6_single(proc_params)
# Make sure that auth isn't set the first time we start the servers.
proc_params = self._strip_auth(proc_params)
# Don't pass in auth_key the first time we start the servers.
server_id = self._servers.create(
name='mongod',
procParams=proc_params,
sslParams=self.sslParams,
version=version,
server_id=server_id
)
member_config.update({"_id": member_id,
"host": self._servers.hostname(server_id)})
return member_config
def member_del(self, member_id, reconfig=True):
"""remove member from replica set
Args:
member_id - member index
reconfig - is need reconfig replica
return True if operation success otherwise False
"""
server_id = self._servers.host_to_server_id(
self.member_id_to_host(member_id))
if reconfig and member_id in [member['_id'] for member in self.members()]:
config = self.config
config['members'].pop(member_id)
self.repl_update(config)
self._servers.remove(server_id)
return True
def member_update(self, member_id, params):
"""update member's values with reconfig replica
Args:
member_id - member index
params - updates member params
return True if operation success otherwise False
"""
config = self.config
config['members'][member_id].update(params.get("rsParams", {}))
return self.repl_update(config)
def member_info(self, member_id):
"""return information about member"""
server_id = self._servers.host_to_server_id(
self.member_id_to_host(member_id))
server_info = self._servers.info(server_id)
result = {'_id': member_id, 'server_id': server_id,
'mongodb_uri': server_info['mongodb_uri'],
'procInfo': server_info['procInfo'],
'statuses': server_info['statuses']}
if self.login:
result['mongodb_auth_uri'] = self.mongodb_auth_uri(
self._servers.hostname(server_id))
result['rsInfo'] = {}
if server_info['procInfo']['alive']:
# Can't call serverStatus on arbiter when running with auth enabled.
# (SERVER-5479)
if self.login or self.auth_key:
arbiter_ids = [member['_id'] for member in self.arbiters()]
if member_id in arbiter_ids:
result['rsInfo'] = {
'arbiterOnly': True, 'secondary': False, 'primary': False}
return result
repl = self.run_command('serverStatus', arg=None, is_eval=False, member_id=member_id)['repl']
logger.debug("member {member_id} repl info: {repl}".format(**locals()))
for key in ('votes', 'tags', 'arbiterOnly', 'buildIndexes', 'hidden', 'priority', 'slaveDelay', 'secondaryDelaySecs', 'secondary'):
if key in repl:
result['rsInfo'][key] = repl[key]
result['rsInfo']['primary'] = repl.get('ismaster', False)
return result
def member_command(self, member_id, command):
"""apply command (start/stop/restart) to member instance of replica set
Args:
member_id - member index
command - string command (start/stop/restart)
return True if operation success otherwise False
"""
server_id = self._servers.host_to_server_id(
self.member_id_to_host(member_id))
return self._servers.command(server_id, command)
def members(self):
"""return list of members information"""
result = list()
for member in self.run_command(command="replSetGetStatus", is_eval=False)['members']:
result.append({
"_id": member['_id'],
"host": member["name"],
"server_id": self._servers.host_to_server_id(member["name"]),
"state": member['state']
})
return result
def primary(self):
"""return primary hostname of replica set"""
host, port = self.connection().primary
return "{host}:{port}".format(**locals())
def get_members_in_state(self, state):
"""return all members of replica set in specific state"""
members = self.run_command(command='replSetGetStatus', is_eval=False)['members']
return [member['name'] for member in members if member['state'] == state]
def connection(self, hostname=None, read_preference=pymongo.ReadPreference.PRIMARY, timeout=60):
"""Return MongoClient object, if hostname is given it is a directly connected client
Args:
hostname - connection uri
read_preference - default PRIMARY
timeout - specify how long, in seconds, a command can take before server times out.
"""
logger.debug("connection({hostname}, {read_preference}, {timeout})".format(**locals()))
t_start = time.time()
servers = hostname or ",".join(self.server_map.values())
logger.debug("Creating connection to: {servers}".format(**locals()))
kwargs = self.kwargs.copy()
if self.login and not self.restart_required:
kwargs["authSource"] = self.auth_source
if self.x509_extra_user:
kwargs["username"] = DEFAULT_SUBJECT
kwargs["authMechanism"] = "MONGODB-X509"
else:
kwargs["username"] = self.login
kwargs["password"] = self.password
if self._require_api_version:
kwargs["server_api"] = ServerApi(self._require_api_version)
if hostname is None:
c = pymongo.MongoClient(
servers, replicaSet=self.repl_id,
read_preference=read_preference,
socketTimeoutMS=self.socket_timeout,
w=self._write_concern, fsync=True, **kwargs)
else:
c = pymongo.MongoClient(
servers, socketTimeoutMS=self.socket_timeout,
directConnection=True,
w=self._write_concern, fsync=True, **kwargs)
while True:
try:
connected(c)
return c
except pymongo.errors.PyMongoError:
logger.exception("Error attempting to connect to: {servers}".format(**locals()))
if time.time() - t_start > timeout:
raise pymongo.errors.AutoReconnect("Couldn't connect while timeout {timeout} second".format(**locals()))
time.sleep(1)
def secondaries(self):
"""return list of secondaries members"""
return [
{
"_id": self.host2id(member),
"host": member,
"server_id": self._servers.host_to_server_id(member)
}
for member in self.get_members_in_state(SECONDARY_STATE)
]
def arbiters(self):
"""return list of arbiters"""
return [
{
"_id": self.host2id(member),
"host": member,
"server_id": self._servers.host_to_server_id(member)
}
for member in self.get_members_in_state(ARBITER_STATE)
]
def hidden(self):
"""return list of hidden members"""
members = [self.member_info(item["_id"]) for item in self.members()]
result = []
for member in members:
if member['rsInfo'].get('hidden'):
server_id = member['server_id']
result.append({
'_id': member['_id'],
'host': self._servers.hostname(server_id),
'server_id': server_id})
return result
def passives(self):
"""return list of passive servers"""
servers = self.run_command('ismaster').get('passives', [])
return [member for member in self.members() if member['host'] in servers]
def servers(self):
"""return list of servers (not hidden nodes)"""
servers = self.run_command('ismaster').get('hosts', [])
return [member for member in self.members() if member['host'] in servers]
def wait_while_reachable(self, servers, timeout=60):
"""wait while all servers be reachable
Args:
servers - list of servers
"""
t_start = time.time()
while True:
try:
for server in servers:
# TODO: use state code to check if server is reachable
server_info = self.connection(
hostname=server, timeout=5).admin.command('ismaster')
logger.debug("server_info: {server_info}".format(server_info=server_info))
if int(server_info['ok']) != 1:
raise pymongo.errors.OperationFailure("{server} is not reachable".format(**locals))
return True
except (KeyError, AttributeError, pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure):
if time.time() - t_start > timeout:
return False
time.sleep(0.1)
def waiting_member_state(self, timeout=300):
"""Wait for all RS members to be in an acceptable state."""
t_start = time.time()
while not self.check_member_state():
if time.time() - t_start > timeout:
return False
time.sleep(0.1)
return True
def waiting_config_state(self, timeout=300):
"""waiting while real state equal config state
Args:
timeout - specify how long, in seconds, a command can take before server times out.
return True if operation success otherwise False
"""
t_start = time.time()
while not self.check_config_state():
if time.time() - t_start > timeout:
return False
time.sleep(0.1)
return True
def check_member_state(self):
"""Verify that all RS members have an acceptable state."""
bad_states = (0, 3, 4, 5, 6, 9)
try:
rs_status = self.run_command('replSetGetStatus')
bad_members = [member for member in rs_status['members']
if member['state'] in bad_states]
if bad_members:
return False
except pymongo.errors.AutoReconnect:
# catch 'No replica set primary available' Exception
return False
logger.debug("all members in correct state")
return True
def check_config_state(self):
"""Return True if real state equal config state otherwise False."""
config = self.config
self.update_server_map(config)
for member in config['members']:
cfg_member_info = self.default_params.copy()
cfg_member_info.update(member)
# Remove attributes we can't check.
for attr in ('priority', 'votes', 'tags', 'buildIndexes'):
cfg_member_info.pop(attr, None)
cfg_member_info['host'] = cfg_member_info['host'].lower()
real_member_info = self.default_params.copy()
info = self.member_info(member["_id"])
real_member_info["_id"] = info['_id']
member_hostname = self._servers.hostname(info['server_id'])
real_member_info["host"] = member_hostname.lower()
real_member_info.update(info['rsInfo'])
# Rename slaveDelay->secondaryDelaySecs to match SERVER-52349.
if 'secondaryDelaySecs' in cfg_member_info:
cfg_member_info.pop('slaveDelay', None)
real_member_info['secondaryDelaySecs'] = real_member_info.pop('slaveDelay', None)
logger.debug("real_member_info({member_id}): {info}".format(member_id=member['_id'], info=info))
for key in cfg_member_info:
if cfg_member_info[key] != real_member_info.get(key, None):
logger.debug("{key}: {value1} ! = {value2}".format(key=key, value1=cfg_member_info[key], value2=real_member_info.get(key, None)))
return False
return True
def server_instances(self):
servers = []
for host in self.server_map.values():
server_id = self._servers.host_to_server_id(host)
servers.append(self._servers._storage[server_id])
return servers
def restart(self, timeout=300, config_callback=None):
"""Restart each member of the replica set."""
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(s.restart, timeout, config_callback)
for s in self.server_instances()]
for f in futures:
f.result()
self.waiting_member_state()
|
class ReplicaSet(BaseModel):
'''class represents ReplicaSet'''
def __init__(self, rs_params):
'''create replica set according members config
Args:
rs_params - replica set configuration
'''
pass
def restart_with_auth(self, cluster_auth_mode=None):
pass
def add_auth(config):
pass
def __len__(self):
pass
def cleanup(self):
'''remove all members without reconfig'''
pass
def member_id_to_host(self, member_id):
'''return hostname by member id'''
pass
def host2id(self, hostname):
'''return member id by hostname'''
pass
def update_server_map(self, config):
'''update server_map ({member_id:hostname})'''
pass
def repl_init(self, config):
'''create replica set by config
return True if replica set created successfuly, else False'''
pass
def reset(self):
'''Ensure all members are running and available.'''
pass
def repl_update(self, config):
'''Reconfig Replicaset with new config'''
pass
def info(self):
'''return information about replica set'''
pass
def repl_member_add(self, params):
'''create new mongod instances and add it to the replica set.
Args:
params - mongod params
return True if operation success otherwise False
'''
pass
def run_command(self, command, arg=None, is_eval=False, member_id=None):
'''run command on replica set
if member_id is specified command will be execute on this server
if member_id is not specified command will be execute on the primary
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
member_id - member id
return command's result
'''
pass
@property
def config(self):
'''return replica set config, use rs.conf() command'''
pass
def member_create(self, params, member_id):
'''start new mongod instances as part of replica set
Args:
params - member params
member_id - member index
return member config
'''
pass
def member_del(self, member_id, reconfig=True):
'''remove member from replica set
Args:
member_id - member index
reconfig - is need reconfig replica
return True if operation success otherwise False
'''
pass
def member_update(self, member_id, params):
'''update member's values with reconfig replica
Args:
member_id - member index
params - updates member params
return True if operation success otherwise False
'''
pass
def member_info(self, member_id):
'''return information about member'''
pass
def member_command(self, member_id, command):
'''apply command (start/stop/restart) to member instance of replica set
Args:
member_id - member index
command - string command (start/stop/restart)
return True if operation success otherwise False
'''
pass
def members(self):
'''return list of members information'''
pass
def primary(self):
'''return primary hostname of replica set'''
pass
def get_members_in_state(self, state):
'''return all members of replica set in specific state'''
pass
def connection(self, hostname=None, read_preference=pymongo.ReadPreference.PRIMARY, timeout=60):
'''Return MongoClient object, if hostname is given it is a directly connected client
Args:
hostname - connection uri
read_preference - default PRIMARY
timeout - specify how long, in seconds, a command can take before server times out.
'''
pass
def secondaries(self):
'''return list of secondaries members'''
pass
def arbiters(self):
'''return list of arbiters'''
pass
def hidden(self):
'''return list of hidden members'''
pass
def passives(self):
'''return list of passive servers'''
pass
def servers(self):
'''return list of servers (not hidden nodes)'''
pass
def wait_while_reachable(self, servers, timeout=60):
'''wait while all servers be reachable
Args:
servers - list of servers
'''
pass
def waiting_member_state(self, timeout=300):
'''Wait for all RS members to be in an acceptable state.'''
pass
def waiting_config_state(self, timeout=300):
'''waiting while real state equal config state
Args:
timeout - specify how long, in seconds, a command can take before server times out.
return True if operation success otherwise False
'''
pass
def check_member_state(self):
'''Verify that all RS members have an acceptable state.'''
pass
def check_config_state(self):
'''Return True if real state equal config state otherwise False.'''
pass
def server_instances(self):
pass
def restart_with_auth(self, cluster_auth_mode=None):
'''Restart each member of the replica set.'''
pass
| 38 | 33 | 15 | 1 | 12 | 3 | 3 | 0.28 | 1 | 10 | 1 | 0 | 35 | 17 | 35 | 41 | 585 | 58 | 414 | 144 | 376 | 115 | 346 | 139 | 309 | 14 | 2 | 4 | 103 |
231 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/replica_sets.py
|
mongo_orchestration.replica_sets.ReplicaSets
|
class ReplicaSets(Singleton, Container):
""" ReplicaSets is a dict-like collection for replica set"""
_name = 'rs'
_obj_type = ReplicaSet
releases = {}
pids_file = tempfile.mktemp(prefix="mongo-")
def set_settings(self, releases=None, default_release=None):
"""set path to storage"""
super(ReplicaSets, self).set_settings(releases, default_release)
Servers().set_settings(releases, default_release)
def cleanup(self):
"""remove all servers with their data"""
Servers().cleanup()
self._storage and self._storage.clear()
def create(self, rs_params):
"""create new replica set
Args:
rs_params - replica set configuration
Return repl_id which can use to take the replica set
"""
repl_id = rs_params.get('id', None)
if repl_id is not None and repl_id in self:
raise ReplicaSetError(
"replica set with id={id} already exists".format(id=repl_id))
repl = ReplicaSet(rs_params)
self[repl.repl_id] = repl
return repl.repl_id
def info(self, repl_id):
"""return information about replica set
Args:
repl_id - replica set identity
"""
return self[repl_id].info()
def primary(self, repl_id):
"""find and return primary hostname
Args:
repl_id - replica set identity
"""
repl = self[repl_id]
primary = repl.primary()
return repl.member_info(repl.host2id(primary))
def remove(self, repl_id):
"""remove replica set with kill members
Args:
repl_id - replica set identity
return True if operation success otherwise False
"""
repl = self._storage.pop(repl_id)
repl.cleanup()
del(repl)
def members(self, repl_id):
"""return list [{"_id": member_id, "host": hostname}] of replica set members
Args:
repl_id - replica set identity
"""
return self[repl_id].members()
def secondaries(self, repl_id):
"""return list of secondaries members"""
return self[repl_id].secondaries()
def arbiters(self, repl_id):
"""return list of arbiters"""
return self[repl_id].arbiters()
def hidden(self, repl_id):
"""return list of hidden members"""
return self[repl_id].hidden()
def passives(self, repl_id):
"""return list of passive nodes"""
return self[repl_id].passives()
def servers(self, repl_id):
"""return list of servers"""
return self[repl_id].servers()
def member_info(self, repl_id, member_id):
"""return information about member
Args:
repl_id - replica set identity
member_id - member index
"""
return self[repl_id].member_info(member_id)
def command(self, rs_id, command, *args):
"""Call a ReplicaSet method."""
rs = self._storage[rs_id]
try:
return getattr(rs, command)(*args)
except AttributeError:
raise ValueError("Cannot issue the command %r to ReplicaSet %s"
% (command, rs_id))
def member_del(self, repl_id, member_id):
"""remove member from replica set (reconfig replica)
Args:
repl_id - replica set identity
member_id - member index
"""
repl = self[repl_id]
result = repl.member_del(member_id)
self[repl_id] = repl
return result
def member_add(self, repl_id, params):
"""create instance and add it to existing replcia
Args:
repl_id - replica set identity
params - member params
return True if operation success otherwise False
"""
repl = self[repl_id]
member_id = repl.repl_member_add(params)
self[repl_id] = repl
return member_id
def member_command(self, repl_id, member_id, command):
"""apply command(start, stop, restart) to the member of replica set
Args:
repl_id - replica set identity
member_id - member index
command - command: start, stop, restart
return True if operation success otherwise False
"""
repl = self[repl_id]
result = repl.member_command(member_id, command)
self[repl_id] = repl
return result
def member_update(self, repl_id, member_id, params):
"""apply new params to replica set member
Args:
repl_id - replica set identity
member_id - member index
params - new member's params
return True if operation success otherwise False
"""
repl = self[repl_id]
result = repl.member_update(member_id, params)
self[repl_id] = repl
return result
|
class ReplicaSets(Singleton, Container):
''' ReplicaSets is a dict-like collection for replica set'''
def set_settings(self, releases=None, default_release=None):
'''set path to storage'''
pass
def cleanup(self):
'''remove all servers with their data'''
pass
def create(self, rs_params):
'''create new replica set
Args:
rs_params - replica set configuration
Return repl_id which can use to take the replica set
'''
pass
def info(self, repl_id):
'''return information about replica set
Args:
repl_id - replica set identity
'''
pass
def primary(self, repl_id):
'''find and return primary hostname
Args:
repl_id - replica set identity
'''
pass
def remove(self, repl_id):
'''remove replica set with kill members
Args:
repl_id - replica set identity
return True if operation success otherwise False
'''
pass
def members(self, repl_id):
'''return list [{"_id": member_id, "host": hostname}] of replica set members
Args:
repl_id - replica set identity
'''
pass
def secondaries(self, repl_id):
'''return list of secondaries members'''
pass
def arbiters(self, repl_id):
'''return list of arbiters'''
pass
def hidden(self, repl_id):
'''return list of hidden members'''
pass
def passives(self, repl_id):
'''return list of passive nodes'''
pass
def servers(self, repl_id):
'''return list of servers'''
pass
def member_info(self, repl_id, member_id):
'''return information about member
Args:
repl_id - replica set identity
member_id - member index
'''
pass
def command(self, rs_id, command, *args):
'''Call a ReplicaSet method.'''
pass
def member_del(self, repl_id, member_id):
'''remove member from replica set (reconfig replica)
Args:
repl_id - replica set identity
member_id - member index
'''
pass
def member_add(self, repl_id, params):
'''create instance and add it to existing replcia
Args:
repl_id - replica set identity
params - member params
return True if operation success otherwise False
'''
pass
def member_command(self, repl_id, member_id, command):
'''apply command(start, stop, restart) to the member of replica set
Args:
repl_id - replica set identity
member_id - member index
command - command: start, stop, restart
return True if operation success otherwise False
'''
pass
def member_update(self, repl_id, member_id, params):
'''apply new params to replica set member
Args:
repl_id - replica set identity
member_id - member index
params - new member's params
return True if operation success otherwise False
'''
pass
| 19 | 19 | 7 | 0 | 4 | 3 | 1 | 0.87 | 2 | 6 | 3 | 0 | 18 | 0 | 18 | 34 | 152 | 21 | 70 | 37 | 51 | 61 | 68 | 37 | 49 | 2 | 2 | 1 | 20 |
232 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/server.py
|
mongo_orchestration.server.MyDaemon
|
class MyDaemon(Daemon):
"""class uses to run server as daemon"""
def __init__(self, *args, **kwd):
super(MyDaemon, self).__init__(*args, **kwd)
def run(self):
log = logging.getLogger(__name__)
from bottle import run
setup(getattr(self.args, 'releases', {}), self.args.env)
BaseModel.socket_timeout = self.args.socket_timeout
if self.args.command in ('start', 'restart'):
print("Starting Mongo Orchestration on port %d..." % self.args.port)
try:
log.debug('Starting HTTP server on host: %s; port: %d',
self.args.bind, self.args.port)
run(get_app(), host=self.args.bind, port=self.args.port,
debug=False, reloader=False, quiet=not self.args.no_fork,
server=self.args.server)
except Exception:
traceback.print_exc(file=sys.stdout)
log.exception('Could not start a new server.')
raise
def set_args(self, args):
self.args = args
|
class MyDaemon(Daemon):
'''class uses to run server as daemon'''
def __init__(self, *args, **kwd):
pass
def run(self):
pass
def set_args(self, args):
pass
| 4 | 1 | 7 | 0 | 7 | 0 | 2 | 0.05 | 1 | 3 | 1 | 0 | 3 | 1 | 3 | 12 | 27 | 4 | 22 | 7 | 17 | 1 | 19 | 7 | 14 | 3 | 2 | 2 | 5 |
233 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/servers.py
|
mongo_orchestration.servers.Server
|
class Server(BaseModel):
"""Class Server represents behaviour of mongo instances """
# Try to enable majority read concern?
enable_majority_read_concern = False
# default params for all mongo instances
mongod_default = {"oplogSize": 100, "logappend": True, "verbose": "v"}
# regular expression matching MongoDB versions
version_patt = re.compile(
r'(?:db version v?|MongoS version v?|mongos db version v?)'
r'(?P<version>(\d+\.)+\d+)',
re.IGNORECASE)
def __init_db(self, dbpath):
if not dbpath:
dbpath = orchestration_mkdtemp(prefix="mongod-")
if not os.path.exists(dbpath):
os.makedirs(dbpath)
return dbpath
def __init_logpath(self, log_path):
logger.debug('Creating log file for %s: %s', self.name, log_path)
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
def __init_config_params(self, config):
"""Conditionally enable options in the Server's config file."""
if self.version >= (2, 4):
params = config.get('setParameter', {})
# Set enableTestCommands by default but allow enableTestCommands:0.
params.setdefault('enableTestCommands', 1)
# Increase transaction lock timeout to reduce the chance that tests
# fail with LockTimeout: "Unable to acquire lock {...} within 5ms".
if self.version >= (4, 0) and not self.is_mongos:
params.setdefault('maxTransactionLockRequestTimeoutMillis', 25)
# Reduce periodicNoopIntervalSecs for faster driver change stream testing.
if self.version >= (3, 6) and not self.is_mongos:
# SERVER-31132 added periodicNoopIntervalSecs in 3.6.0.
params.setdefault('periodicNoopIntervalSecs', 1)
params.setdefault('writePeriodicNoops', 1)
config['setParameter'] = params
# no 'journal' after 6.1 onwards...
# https://www.mongodb.com/docs/manual/reference/program/mongod/#options
if self.version >= (6, 1):
if config.get('journal'):
config.pop('journal')
if config.get('nojournal'):
config.pop('nojournal')
compressors = config.get('networkMessageCompressors')
if compressors is None:
if self.version >= (4, 1, 7):
# SERVER-38168 added zstd support in 4.1.7.
config['networkMessageCompressors'] = 'zstd,zlib,snappy,noop'
elif self.version >= (3, 5, 9):
# SERVER-27310 added zlib support in 3.5.9.
config['networkMessageCompressors'] = 'zlib,snappy,noop'
elif self.version >= (3, 4):
config['networkMessageCompressors'] = 'snappy,noop'
def __init_mongod(self, params, add_auth=False):
cfg = self.mongod_default.copy()
cfg.update(params)
# create db folder
cfg['dbpath'] = self.__init_db(cfg.get('dbpath', None))
if add_auth:
cfg['auth'] = True
if self.auth_key:
cfg['keyFile'] = self.key_file
# create logpath: goes in dbpath by default under process name + ".log"
logpath = cfg.setdefault(
'logpath', os.path.join(cfg['dbpath'], 'mongod.log'))
self.__init_logpath(logpath)
# find open port
if 'port' not in cfg:
cfg['port'] = process.PortPool().port(check=True)
self.__init_config_params(cfg)
# Read concern majority requires MongoDB >= 3.2, WiredTiger storage
# engine, and protocol version 1.
# Starting in MongoDB 5.0, enableMajorityReadConcern and --enableMajorityReadConcern
# cannot be changed and are always set to true due to storage engine improvements.
# The flag is removed altogether in MongoDB 8.1.
# https://docs.mongodb.com/manual/reference/read-concern/
if ('enableMajorityReadConcern' not in cfg
and self.enable_majority_read_concern
and self.version >= (3, 2)
and self.version < (5, 0)):
if (cfg.get('storageEngine', 'wiredTiger') == 'wiredTiger'
and cfg.get('protocolVersion', 1) == 1):
cfg['enableMajorityReadConcern'] = True
else:
logger.info('Not adding enableMajorityReadConcern because '
'storageEngine=%r and protocolVersion=%r is '
'incompatible' % (cfg.get('storageEngine'),
cfg.get('protocolVersion')))
return process.write_config(cfg), cfg
def __init_mongos(self, params):
cfg = params.copy()
log_path = cfg.setdefault(
'logpath',
os.path.join(orchestration_mkdtemp(prefix='mongos-'), 'mongos.log'))
self.__init_logpath(log_path)
# use keyFile
if self.auth_key:
cfg['keyFile'] = self.key_file
if 'port' not in cfg:
cfg['port'] = process.PortPool().port(check=True)
self.__init_config_params(cfg)
return process.write_config(cfg), cfg
def __init__(self, name, procParams, sslParams={}, auth_key=None,
login='', password='', auth_source='admin', require_api_version=None):
"""Args:
name - name of process (mongod or mongos)
procParams - dictionary with params for mongo process
auth_key - authorization key
login - username for the admin collection
password - password
auth_source - the auth source database
require_api_version - whether to require a stable api version
"""
logger.debug("Server.__init__({name}, {procParams}, {sslParams}, {auth_key}, {login}, {password})".format(**locals()))
self.name = name # name of process
self.login = login
self.auth_source = auth_source
self.password = password
self.auth_key = auth_key
self.pid = None # process pid
self.proc = None # Popen object
self.host = None # hostname without port
self.hostname = None # string like host:port
self.is_mongos = False
self.kwargs = {}
self.ssl_params = sslParams
self.restart_required = self.login or self.auth_key
self.require_api_version = require_api_version
self.__version = None
if self.ssl_params:
self.kwargs.update(DEFAULT_SSL_OPTIONS)
proc_name = os.path.split(name)[1].lower()
procParams.update(sslParams)
if proc_name.startswith('mongod'):
self.config_path, self.cfg = self.__init_mongod(procParams)
elif proc_name.startswith('mongos'):
self.is_mongos = True
self.config_path, self.cfg = self.__init_mongos(procParams)
else:
self.config_path, self.cfg = None, {}
self.port = self.cfg.get('port', None) # connection port
@property
def connection(self):
"""return authenticated connection"""
kwargs = self.kwargs.copy()
if self.login and not self.restart_required:
kwargs["authSource"] = self.auth_source
if self.x509_extra_user:
kwargs["username"] = DEFAULT_SUBJECT
kwargs["authMechanism"] = "MONGODB-X509"
else:
kwargs["username"] = self.login
kwargs["password"] = self.password
if self.require_api_version:
kwargs["server_api"] = ServerApi(self.require_api_version)
c = pymongo.MongoClient(
self.hostname, fsync=True, directConnection=True,
socketTimeoutMS=self.socket_timeout, **kwargs)
connected(c)
if self.require_api_version:
c.admin.command("setParameter", 1, requireApiVersion=int(self.require_api_version))
return c
@property
def version(self):
"""Get the version of MongoDB that this Server runs as a tuple."""
if not self.__version:
command = (self.name, '--version')
logger.debug(command)
stdout, _ = subprocess.Popen(
command, stdout=subprocess.PIPE).communicate()
version_output = str(stdout)
match = re.search(self.version_patt, version_output)
if match is None:
raise ServersError(
'Could not determine version of %s from string: %s'
% (self.name, version_output))
version_string = match.group('version')
self.__version = tuple(map(int, version_string.split('.')))
return self.__version
def freeze(self, timeout=60):
"""Run `replSetFreeze` on this server.
May raise `pymongo.errors.OperationFailure` if this server is not a
replica set member.
"""
return self.run_command('replSetFreeze', timeout)
def stepdown(self, timeout=60):
"""Run `replSetStepDown` on this server.
May raise `pymongo.errors.OperationFailure` if this server is not a
replica set member.
"""
try:
self.run_command('replSetStepDown', timeout)
except pymongo.errors.AutoReconnect:
pass
def run_command(self, command, arg=None, is_eval=False):
"""run command on the server
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
return command's result
"""
mode = is_eval and 'eval' or 'command'
if isinstance(arg, tuple):
name, d = arg
else:
name, d = arg, {}
result = getattr(self.connection.admin, mode)(command, name, **d)
return result
@property
def is_alive(self):
return process.proc_alive(self.proc)
def info(self):
"""return info about server as dict object"""
proc_info = {"name": self.name,
"params": self.cfg,
"alive": self.is_alive,
"optfile": self.config_path}
if self.is_alive:
proc_info['pid'] = self.proc.pid
logger.debug("proc_info: {proc_info}".format(**locals()))
mongodb_uri = ''
server_info = {}
status_info = {}
if self.hostname and self.cfg.get('port', None):
try:
c = self.connection
server_info = c.server_info()
logger.debug("server_info: {server_info}".format(**locals()))
mongodb_uri = 'mongodb://' + self.hostname
status_info = {"primary": c.is_primary, "mongos": c.is_mongos}
logger.debug("status_info: {status_info}".format(**locals()))
except (pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure, pymongo.errors.ConnectionFailure):
server_info = {}
status_info = {}
result = {"mongodb_uri": mongodb_uri, "statuses": status_info,
"serverInfo": server_info, "procInfo": proc_info,
"orchestration": 'servers'}
if self.login:
result['mongodb_auth_uri'] = self.mongodb_auth_uri(self.hostname)
logger.debug("return {result}".format(result=result))
return result
@property
def _is_locked(self):
lock_file = os.path.join(self.cfg['dbpath'], 'mongod.lock')
# If neither journal nor nojournal is specified, assume nojournal=True
journaling_enabled = (self.cfg.get('journal') or
not self.cfg.get('nojournal', True))
try:
with open(lock_file, 'r') as fd:
return (not journaling_enabled and len(fd.read())) > 0
except IOError as e:
# Permission denied -- mongod holds the lock on the file.
if platform.system() == 'Windows' and e.errno == errno.EACCES:
return True
return False
def start(self, timeout=300):
"""start server
return True of False"""
if self.is_alive:
return True
try:
dbpath = self.cfg.get('dbpath')
if dbpath and self._is_locked:
# repair if needed
logger.info("Performing repair on locked dbpath %s", dbpath)
process.repair_mongo(self.name, self.cfg['dbpath'])
self.proc, self.hostname = process.mprocess(
self.name, self.config_path, self.cfg.get('port', None),
timeout)
self.pid = self.proc.pid
logger.debug("pid={pid}, hostname={hostname}".format(pid=self.pid, hostname=self.hostname))
self.host = self.hostname.split(':')[0]
self.port = int(self.hostname.split(':')[1])
# Wait for Server to respond to isMaster.
# Only try 6 times, each ConnectionFailure is 30 seconds.
max_attempts = 6
for i in range(max_attempts):
try:
self.run_command('isMaster')
break
except pymongo.errors.ConnectionFailure:
logger.exception('isMaster command failed:')
else:
raise TimeoutError(
"Server did not respond to 'isMaster' after %d attempts."
% max_attempts)
except (OSError, TimeoutError):
logpath = self.cfg.get('logpath')
if logpath and not os.path.exists(logpath):
logger.exception(
'Could not start Server')
reraise(TimeoutError,
'Could not start Server. '
'Please check the mongo-orchestration log in ' +
LOG_FILE + ' for more details.')
if logpath:
# Copy the server logs into the mongo-orchestration logs.
logger.error(
"Could not start Server. Please find server log below.\n"
"=====================================================")
with open(logpath) as lp:
logger.error(lp.read())
else:
logger.exception(
'Could not start Server, and no logpath was provided!')
reraise(TimeoutError,
'Could not start Server. '
'Please check server log located in ' +
self.cfg.get('logpath', '<no logpath given>') +
' or the mongo-orchestration log in ' +
LOG_FILE + ' for more details.')
if self.restart_required:
if self.login:
# Add users to the appropriate database.
self._add_users()
self.stop()
# Restart with keyfile and auth.
if self.is_mongos:
self.config_path, self.cfg = self.__init_mongos(self.cfg)
else:
# Add auth options to this Server's config file.
self.config_path, self.cfg = self.__init_mongod(
self.cfg, add_auth=True)
self.restart_required = False
self.start()
return True
def shutdown(self):
"""Send shutdown command and wait for the process to exit."""
# Return early if this server has already exited.
if not process.proc_alive(self.proc):
return
logger.info("Attempting to connect to %s", self.hostname)
client = self.connection
# If we have set requireApiVersion, disable it.
if self.require_api_version:
client.admin.command("setParameter", 1, requireApiVersion=0)
# Attempt the shutdown command twice, the first attempt might fail due
# to an election.
attempts = 2
for i in range(attempts):
logger.info("Attempting to send shutdown command to %s",
self.hostname)
try:
# SERVER-46951: Disable quiesce mode which defaults to 15 seconds.
client.admin.command("shutdown", force=True, timeoutSecs=0)
except ConnectionFailure:
# A shutdown succeeds by closing the connection but a
# connection error does not necessarily mean that the shutdown
# has succeeded.
pass
# Wait for the server to exit otherwise rerun the shutdown command.
try:
return process.wait_mprocess(self.proc, 5)
except TimeoutError as exc:
logger.info("Timed out waiting on process: %s", exc)
continue
raise ServersError("Server %s failed to shutdown after %s attempts" %
(self.hostname, attempts))
def stop(self):
"""stop server"""
try:
return self.shutdown() == 0
except (PyMongoError, ServersError) as exc:
logger.info("Killing %s with signal, shutdown command failed: %r",
self.name, exc)
return process.kill_mprocess(self.proc)
def restart(self, timeout=300, config_callback=None):
"""restart server: stop() and start()
return status of start command
"""
self.stop()
if config_callback:
self.cfg = config_callback(self.cfg.copy())
self.config_path = process.write_config(self.cfg)
return self.start(timeout)
def reset(self):
"""Ensure Server has started and responds to isMaster."""
self.start()
return self.info()
def _add_users(self):
try:
# Determine authentication mechanisms.
set_params = self.cfg.get('setParameter', {})
auth_mechs = set_params.get(
'authenticationMechanisms', '').split(',')
# We need to add an additional user if MONGODB-X509 is the only auth
# mechanism.
self.x509_extra_user = False
if len(auth_mechs) == 1 and auth_mechs[0] == 'MONGODB-X509':
self.x509_extra_user = True
super(Server, self)._add_users(self.connection[self.auth_source],
self.version)
except pymongo.errors.OperationFailure as e:
logger.error("Error: {0}".format(e))
# user added successfuly but OperationFailure exception raises
pass
def cleanup(self):
"""remove server data"""
process.cleanup_mprocess(self.config_path, self.cfg)
|
class Server(BaseModel):
'''Class Server represents behaviour of mongo instances '''
def __init_db(self, dbpath):
pass
def __init_logpath(self, log_path):
pass
def __init_config_params(self, config):
'''Conditionally enable options in the Server's config file.'''
pass
def __init_mongod(self, params, add_auth=False):
pass
def __init_mongos(self, params):
pass
def __init__(self, name, procParams, sslParams={}, auth_key=None,
login='', password='', auth_source='admin', require_api_version=None):
'''Args:
name - name of process (mongod or mongos)
procParams - dictionary with params for mongo process
auth_key - authorization key
login - username for the admin collection
password - password
auth_source - the auth source database
require_api_version - whether to require a stable api version
'''
pass
@property
def connection(self):
'''return authenticated connection'''
pass
@property
def version(self):
'''Get the version of MongoDB that this Server runs as a tuple.'''
pass
def freeze(self, timeout=60):
'''Run `replSetFreeze` on this server.
May raise `pymongo.errors.OperationFailure` if this server is not a
replica set member.
'''
pass
def stepdown(self, timeout=60):
'''Run `replSetStepDown` on this server.
May raise `pymongo.errors.OperationFailure` if this server is not a
replica set member.
'''
pass
def run_command(self, command, arg=None, is_eval=False):
'''run command on the server
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
return command's result
'''
pass
@property
def is_alive(self):
pass
def info(self):
'''return info about server as dict object'''
pass
@property
def _is_locked(self):
pass
def start(self, timeout=300):
'''start server
return True of False'''
pass
def shutdown(self):
'''Send shutdown command and wait for the process to exit.'''
pass
def stop(self):
'''stop server'''
pass
def restart(self, timeout=300, config_callback=None):
'''restart server: stop() and start()
return status of start command
'''
pass
def reset(self):
'''Ensure Server has started and responds to isMaster.'''
pass
def _add_users(self):
pass
def cleanup(self):
'''remove server data'''
pass
| 26 | 15 | 20 | 2 | 15 | 4 | 4 | 0.28 | 1 | 11 | 2 | 0 | 21 | 19 | 21 | 27 | 458 | 56 | 320 | 89 | 293 | 88 | 260 | 78 | 238 | 11 | 2 | 3 | 76 |
234 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/servers.py
|
mongo_orchestration.servers.Servers
|
class Servers(Singleton, Container):
""" Servers is a dict-like collection for Server objects"""
_name = 'servers'
_obj_type = Server
releases = {}
pids_file = tempfile.mktemp(prefix="mongo-")
def __getitem__(self, key):
return self.info(key)
def cleanup(self):
"""remove all servers with their data"""
for server_id in self:
self.remove(server_id)
def create(self, name, procParams, sslParams={},
auth_key=None, login=None, password=None,
auth_source='admin', timeout=300, autostart=True,
server_id=None, version=None, require_api_version=None):
"""create new server
Args:
name - process name or path
procParams - dictionary with specific params for instance
auth_key - authorization key
login - username for the admin collection
password - password
timeout - specify how long, in seconds, a command can take before times out.
autostart - (default: True), autostart instance
server_id - the server_id to use, defaults to a new uuid
version - the version of the server to use use
require_api_version - the stable api version to require
Return server_id
where server_id - id which can use to take the server from servers collection
"""
name = os.path.split(name)[1]
if server_id is None:
server_id = str(uuid4())
if server_id in self:
raise ServersError("Server with id %s already exists." % server_id)
bin_path = self.bin_path(version)
server = Server(os.path.join(bin_path, name), procParams, sslParams,
auth_key, login, password, auth_source, require_api_version=require_api_version)
if autostart:
server.start(timeout)
self[server_id] = server
return server_id
def restart(self, server_id, timeout=300, config_callback=None):
self._storage[server_id].restart(timeout, config_callback)
def remove(self, server_id):
"""remove server and data stuff
Args:
server_id - server identity
"""
server = self._storage.pop(server_id)
server.stop()
server.cleanup()
def db_command(self, server_id, command, arg=None, is_eval=False):
server = self._storage[server_id]
result = server.run_command(command, arg, is_eval)
self._storage[server_id] = server
return result
def command(self, server_id, command, *args):
"""run command
Args:
server_id - server identity
command - command which apply to server
"""
server = self._storage[server_id]
try:
if args:
result = getattr(server, command)(*args)
else:
result = getattr(server, command)()
except AttributeError:
raise ValueError("Cannot issue the command %r to server %s"
% (command, server_id))
self._storage[server_id] = server
return result
def info(self, server_id):
"""return dicionary object with info about server
Args:
server_id - server identity
"""
result = self._storage[server_id].info()
result['id'] = server_id
return result
def version(self, server_id):
"""return the binary version of the given server
Args:
server_id - server identity
"""
return self._storage[server_id].version
def hostname(self, server_id):
return self._storage[server_id].hostname
def host_to_server_id(self, hostname):
for server_id in self._storage:
if self._storage[server_id].hostname == hostname:
return server_id
def is_alive(self, server_id):
return self._storage[server_id].is_alive
|
class Servers(Singleton, Container):
''' Servers is a dict-like collection for Server objects'''
def __getitem__(self, key):
pass
def cleanup(self):
'''remove all servers with their data'''
pass
def create(self, name, procParams, sslParams={},
auth_key=None, login=None, password=None,
auth_source='admin', timeout=300, autostart=True,
server_id=None, version=None, require_api_version=None):
'''create new server
Args:
name - process name or path
procParams - dictionary with specific params for instance
auth_key - authorization key
login - username for the admin collection
password - password
timeout - specify how long, in seconds, a command can take before times out.
autostart - (default: True), autostart instance
server_id - the server_id to use, defaults to a new uuid
version - the version of the server to use use
require_api_version - the stable api version to require
Return server_id
where server_id - id which can use to take the server from servers collection
'''
pass
def restart(self, server_id, timeout=300, config_callback=None):
pass
def remove(self, server_id):
'''remove server and data stuff
Args:
server_id - server identity
'''
pass
def db_command(self, server_id, command, arg=None, is_eval=False):
pass
def command(self, server_id, command, *args):
'''run command
Args:
server_id - server identity
command - command which apply to server
'''
pass
def info(self, server_id):
'''return dicionary object with info about server
Args:
server_id - server identity
'''
pass
def version(self, server_id):
'''return the binary version of the given server
Args:
server_id - server identity
'''
pass
def hostname(self, server_id):
pass
def host_to_server_id(self, hostname):
pass
def is_alive(self, server_id):
pass
| 13 | 7 | 8 | 0 | 5 | 3 | 2 | 0.54 | 2 | 5 | 2 | 0 | 12 | 0 | 12 | 28 | 110 | 13 | 63 | 30 | 47 | 34 | 57 | 27 | 44 | 4 | 2 | 2 | 20 |
235 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/launch.py
|
mongo_orchestration.launch.Server
|
class Server(MCTestObject):
_resource = 'servers'
def __init__(self, id=None, uri=None, **kwargs):
self.id = id
self.uri = uri
self._proc_params = kwargs
def get_config(self):
return {
'name': 'mongod',
'procParams': self.proc_params()}
def start(self):
if self.id is None:
try:
response = self._make_post_request()
except requests.ConnectionError as e:
print('Please start mongo-orchestration!')
sys.exit(1)
self.id = response['id']
self.uri = response.get('mongodb_auth_uri',
response['mongodb_uri'])
else:
requests.post(
_mo_url('servers', self.id), timeout=None,
json={'action': 'start'}
)
return self
def stop(self, destroy=True):
if destroy:
super(Server, self).stop()
else:
requests.post(_mo_url('servers', self.id), timeout=None,
json={'action': 'stop'})
|
class Server(MCTestObject):
def __init__(self, id=None, uri=None, **kwargs):
pass
def get_config(self):
pass
def start(self):
pass
def stop(self, destroy=True):
pass
| 5 | 0 | 8 | 0 | 8 | 0 | 2 | 0 | 1 | 2 | 0 | 0 | 4 | 3 | 4 | 10 | 37 | 5 | 32 | 11 | 27 | 0 | 23 | 10 | 18 | 3 | 2 | 2 | 7 |
236 |
10gen/mongo-orchestration
|
10gen_mongo-orchestration/mongo_orchestration/sharded_clusters.py
|
mongo_orchestration.sharded_clusters.ShardedCluster
|
class ShardedCluster(BaseModel):
"""class represents Sharding configuration"""
def __init__(self, params):
"""init configuration acording params"""
self.id = params.get('id', None) or str(uuid4())
self.admin_added = False
self.login = params.get('login', '')
self.password = params.get('password', '')
self.auth_key = params.get('auth_key', None)
self.auth_source = params.get('authSource', 'admin')
self._version = params.get('version')
self._require_api_version = params.get('requireApiVersion', '')
self._configsvrs = []
self._routers = []
self._shards = {}
self.tags = {}
self.sslParams = params.get('sslParams', {})
self.kwargs = {}
self.restart_required = self.login or self.auth_key
self.x509_extra_user = False
if self.sslParams:
self.kwargs.update(DEFAULT_SSL_OPTIONS)
self.enable_ipv6 = common.ipv6_enabled_sharded(params)
# Determine what to do with config servers via mongos version.
mongos_name = os.path.join(Servers().bin_path(self._version), 'mongos')
mongos = Server(name=mongos_name, procParams={})
self.mongos_version = mongos.version
mongos.cleanup()
configsvr_configs = params.get('configsvrs', [{}])
self.uses_rs_configdb = (self.mongos_version >= (3, 1, 2) and
len(configsvr_configs) == 1)
self.configdb_singleton = (
ReplicaSets() if self.uses_rs_configdb else Servers())
if self.uses_rs_configdb:
self.__init_configrs(configsvr_configs[0])
elif self.mongos_version >= (3, 3, 2):
raise ShardedClusterError(
'mongos >= 3.3.2 requires the config database to be backed by '
'a replica set.')
elif self.mongos_version >= (3, 1, 2) and len(configsvr_configs) != 3:
raise ShardedClusterError(
"mongos >= 3.1.2 needs a config replica set or 3 old-style "
"config servers.")
else:
self.__init_configsvrs(configsvr_configs)
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(self.router_add, r)
for r in params.get('routers', [{}])]
for f in futures:
f.result()
def add_shard(cfg):
shard_params = cfg.get('shardParams', {})
shard_tags = shard_params.pop('tags', None)
info = self.member_add(cfg.get('id', None), shard_params)
if shard_tags:
self.tags[info['id']] = shard_tags
futures = [executor.submit(add_shard, cfg)
for cfg in params.get('shards', [])]
for f in futures:
f.result()
# SERVER-37631 changed 3.6 sharded cluster setup so that it's required
# to run refreshLogicalSessionCacheNow on the config server followed by
# each mongos. Only then will each 3.6 mongos correctly report
# logicalSessionTimeoutMinutes in its isMaster responses.
if self.mongos_version[:2] == (3, 6):
router_clients = self.router_connections()
is_master = router_clients[0].admin.command('isMaster')
if 'logicalSessionTimeoutMinutes' not in is_master:
self.config_connection().admin.command(
'refreshLogicalSessionCacheNow')
for client in router_clients:
client.admin.command('refreshLogicalSessionCacheNow')
if self.tags:
for sh_id in self.tags:
logger.debug('Add tags %r to %s' % (self.tags[sh_id], sh_id))
db = self.connection().get_database(
'config',
write_concern=write_concern.WriteConcern(fsync=True))
db.shards.update_one(
{'_id': sh_id},
{'$addToSet': {'tags': {'$each': self.tags[sh_id]}}})
shard_configs = [s.get('shardParams', {}).get('procParams', {})
for s in params.get('shards', [])]
if self.login:
# Do we need to add an extra x509 user?
def only_x509(config):
set_params = config.get('setParameter', {})
auth_mechs = set_params.get('authenticationMechanisms', '')
auth_mechs = auth_mechs.split(',')
if len(auth_mechs) == 1 and auth_mechs[0] == 'MONGODB-X509':
return True
return False
any_only_x509 = lambda l: any(map(only_x509, l))
rs_shard_configs = [
m.get('procParams', {})
for s in params.get('shards', [])
for m in s.get('shardParams', {}).get('members', [])
]
router_configs = params.get('routers', [])
self.x509_extra_user = (any_only_x509(configsvr_configs) or
any_only_x509(shard_configs) or
any_only_x509(rs_shard_configs) or
any_only_x509(router_configs))
self._add_users(
self.connection().get_database(
self.auth_source, write_concern=write_concern.WriteConcern(
fsync=True)), self.mongos_version)
# Create the user on all the shards.
roles = self._user_roles(self.connection())
for shard_id, config in zip(self._shards, shard_configs):
shard = self._shards[shard_id]
instance_id = shard['_id']
if shard.get('isServer'):
client = Servers()._storage[instance_id].connection
elif shard.get('isReplicaSet'):
client = ReplicaSets()._storage[instance_id].connection()
db = client[self.auth_source]
if self.x509_extra_user:
db.command('createUser', DEFAULT_SUBJECT, roles=roles)
create_user(db, self.mongos_version, self.login, self.password,
roles)
if self.restart_required:
# Do we need to add clusterAuthMode back?
cluster_auth_mode = None
for cfg in shard_configs:
cam = cfg.get('clusterAuthMode')
if cam:
cluster_auth_mode = cam
break
def restart_with_auth(server_or_rs):
server_or_rs.x509_extra_user = self.x509_extra_user
server_or_rs.auth_source = self.auth_source
server_or_rs.ssl_params = self.sslParams
server_or_rs.login = self.login
server_or_rs.password = self.password
server_or_rs.auth_key = self.auth_key
def add_auth(cfg):
if self.auth_key:
cfg['keyFile'] = self.key_file
# Add clusterAuthMode back in.
if cluster_auth_mode:
cfg['clusterAuthMode'] = cluster_auth_mode
return cfg
if isinstance(server_or_rs, ReplicaSet):
server_or_rs.restart_with_auth(cluster_auth_mode=cluster_auth_mode)
else:
server_or_rs.restart(config_callback=add_auth)
server_or_rs.restart_required = False
with ThreadPoolExecutor(max_workers=10) as executor:
servers = []
for config_id in self._configsvrs:
servers.append(self.configdb_singleton._storage[config_id])
for server_id in self._routers:
server = Servers()._storage[server_id]
servers.append(server)
for shard_id in self._shards:
shard = self._shards[shard_id]
instance_id = shard['_id']
klass = ReplicaSets if shard.get('isReplicaSet') else Servers
server_or_rs = klass()._storage[instance_id]
servers.append(server_or_rs)
futures = [executor.submit(restart_with_auth, s) for s in servers]
for f in futures:
f.result()
self.restart_required = False
if self._require_api_version:
for router in self.routers:
client = self.create_connection(router['hostname'])
client[self.auth_source].command("setParameter", 1, requireApiVersion=int(self._require_api_version))
def __init_configrs(self, rs_cfg):
"""Create and start a config replica set."""
# Use 'rs_id' to set the id for consistency, but need to rename
# to 'id' to use with ReplicaSets.create()
rs_cfg['id'] = rs_cfg.pop('rs_id', None)
for member in rs_cfg.setdefault('members', [{}]):
member['procParams'] = self._strip_auth(
member.get('procParams', {}))
member['procParams']['configsvr'] = True
if self.enable_ipv6:
common.enable_ipv6_single(member['procParams'])
rs_cfg['sslParams'] = self.sslParams
self._configsvrs.append(ReplicaSets().create(rs_cfg))
def __init_configsvrs(self, params):
"""create and start config servers"""
self._configsvrs = []
for cfg in params:
# Remove flags that turn on auth.
cfg = self._strip_auth(cfg)
server_id = cfg.pop('server_id', None)
version = cfg.pop('version', self._version)
cfg.update({'configsvr': True})
if self.enable_ipv6:
common.enable_ipv6_single(cfg)
self._configsvrs.append(Servers().create(
'mongod', cfg, sslParams=self.sslParams, autostart=True,
version=version, server_id=server_id))
def __len__(self):
return len(self._shards)
@property
def configsvrs(self):
"""return list of config servers"""
if self.uses_rs_configdb:
rs_id = self._configsvrs[0]
mongodb_uri = ReplicaSets().info(rs_id)['mongodb_uri']
return [{'id': rs_id, 'mongodb_uri': mongodb_uri}]
return [{'id': h_id, 'hostname': Servers().hostname(h_id)}
for h_id in self._configsvrs]
@property
def routers(self):
"""return list of routers"""
return [{'id': h_id, 'hostname': Servers().hostname(h_id)} for h_id in self._routers]
@property
def members(self):
"""return list of members"""
# return [{'id': shard, 'hostname': Servers().hostname(info['_id'])} for shard, info in self._shards.items()]
return [self.member_info(item) for item in self._shards]
@property
def router(self):
"""return first available router"""
for server in self._routers:
info = Servers().info(server)
if info['procInfo'].get('alive', False):
return {'id': server, 'hostname': Servers().hostname(server)}
def router_add(self, params):
"""add new router (mongos) into existing configuration"""
# featureFlagLoadBalancer was added in 5.0.7 (SERVER-60679) and
# removed in 6.1.0 (SERVER-64205).
if (5, 0, 7) <= self.mongos_version[:3] <= (6, 1, -1):
set_params = params.get('setParameter', {})
if 'loadBalancerPort' in set_params:
set_params.setdefault('featureFlagLoadBalancer', True)
if self.uses_rs_configdb:
# Replica set configdb.
rs_id = self._configsvrs[0]
config_members = ReplicaSets().members(rs_id)
configdb = '%s/%s' % (
rs_id, ','.join(m['host'] for m in config_members))
else:
configdb = ','.join(Servers().hostname(item)
for item in self._configsvrs)
server_id = params.pop('server_id', None)
version = params.pop('version', self._version)
params.update({'configdb': configdb})
if self.enable_ipv6:
common.enable_ipv6_single(params)
# Remove flags that turn auth on.
params = self._strip_auth(params)
server_id = Servers().create(
'mongos', params, sslParams=self.sslParams, autostart=True,
version=version, server_id=server_id)
self._routers.append(server_id)
return {'id': server_id, 'hostname': Servers().hostname(server_id)}
def create_connection(self, host):
kwargs = self.kwargs.copy()
if self.login and not self.restart_required:
kwargs["authSource"] = self.auth_source
kwargs["username"] = self.login
kwargs["password"] = self.password
if self._require_api_version:
kwargs["server_api"] = ServerApi(self._require_api_version)
c = MongoClient(
host, w='majority', fsync=True,
socketTimeoutMS=self.socket_timeout, **kwargs)
connected(c)
return c
def connection(self):
return self.create_connection(self.router['hostname'])
def config_connection(self):
"""Return a MongoClient connected to the replica set config db."""
return self.create_connection(self.configsvrs[0]['mongodb_uri'])
def router_connections(self):
"""Return a list of MongoClients, one for each mongos."""
clients = []
for server in self._routers:
if Servers().is_alive(server):
client = self.create_connection(Servers().hostname(server))
clients.append(client)
return clients
def router_command(self, command, arg=None, is_eval=False):
"""run command on the router server
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
return command's result
"""
mode = is_eval and 'eval' or 'command'
if isinstance(arg, tuple):
name, d = arg
else:
name, d = arg, {}
result = getattr(self.connection().admin, mode)(command, name, **d)
return result
def router_remove(self, router_id):
"""remove """
result = Servers().remove(router_id)
del self._routers[ self._routers.index(router_id) ]
return { "ok": 1, "routers": self._routers }
def _add(self, shard_uri, name):
"""execute addShard command"""
return self.router_command("addShard", (shard_uri, {"name": name}), is_eval=False)
def member_add(self, member_id=None, params=None):
"""add new member into existing configuration"""
member_id = member_id or str(uuid4())
if self.enable_ipv6:
common.enable_ipv6_repl(params)
if 'members' in params:
# is replica set
for member in params['members']:
if not member.get('rsParams', {}).get('arbiterOnly', False):
member.setdefault('procParams', {})['shardsvr'] = True
rs_params = params.copy()
# Turn 'rs_id' -> 'id', to be consistent with 'server_id' below.
rs_params['id'] = rs_params.pop('rs_id', None)
rs_params.update({'sslParams': self.sslParams})
rs_params['version'] = params.pop('version', self._version)
rs_params['members'] = [
self._strip_auth(params) for params in rs_params['members']]
rs_id = ReplicaSets().create(rs_params)
members = ReplicaSets().members(rs_id)
cfgs = rs_id + r"/" + ','.join([item['host'] for item in members])
result = self._add(cfgs, member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isReplicaSet': True, '_id': rs_id}
# return self._shards[result['shardAdded']].copy()
return self.member_info(member_id)
else:
# is single server
params.setdefault('procParams', {})['shardsvr'] = True
params.update({'autostart': True, 'sslParams': self.sslParams})
params = params.copy()
params['procParams'] = self._strip_auth(
params.get('procParams', {}))
params.setdefault('version', self._version)
logger.debug("servers create params: {params}".format(**locals()))
server_id = Servers().create('mongod', **params)
result = self._add(Servers().hostname(server_id), member_id)
if result.get('ok', 0) == 1:
self._shards[result['shardAdded']] = {'isServer': True, '_id': server_id}
return self.member_info(member_id)
def member_info(self, member_id):
"""return info about member"""
info = self._shards[member_id].copy()
info['id'] = member_id
info['tags'] = self.tags.get(member_id, list())
return info
def _remove(self, shard_name):
"""remove member from configuration"""
result = self.router_command("removeShard", shard_name, is_eval=False)
if result['ok'] == 1 and result['state'] == 'completed':
shard = self._shards.pop(shard_name)
if shard.get('isServer', False):
Servers().remove(shard['_id'])
if shard.get('isReplicaSet', False):
ReplicaSets().remove(shard['_id'])
return result
def member_remove(self, member_id):
"""remove member from configuration"""
return self._remove(member_id)
def reset(self):
"""Ensure all shards, configs, and routers are running and available."""
# Ensure all shards by calling "reset" on each.
for shard_id in self._shards:
if self._shards[shard_id].get('isReplicaSet'):
singleton = ReplicaSets()
elif self._shards[shard_id].get('isServer'):
singleton = Servers()
singleton.command(self._shards[shard_id]['_id'], 'reset')
# Ensure all config servers by calling "reset" on each.
for config_id in self._configsvrs:
self.configdb_singleton.command(config_id, 'reset')
# Ensure all routers by calling "reset" on each.
for router_id in self._routers:
Servers().command(router_id, 'reset')
return self.info()
def info(self):
"""return info about configuration"""
uri = ','.join(x['hostname'] for x in self.routers)
mongodb_uri = 'mongodb://' + uri
result = {'id': self.id,
'shards': self.members,
'configsvrs': self.configsvrs,
'routers': self.routers,
'mongodb_uri': mongodb_uri,
'orchestration': 'sharded_clusters'}
if self.login:
result['mongodb_auth_uri'] = self.mongodb_auth_uri(uri)
return result
def cleanup(self):
"""cleanup configuration: stop and remove all servers"""
with ThreadPoolExecutor(max_workers=10) as executor:
futures = []
for _id, shard in self._shards.items():
if shard.get('isServer', False):
futures.append(executor.submit(
Servers().remove, shard['_id']))
if shard.get('isReplicaSet', False):
futures.append(executor.submit(
ReplicaSets().remove, shard['_id']))
for mongos in self._routers:
futures.append(executor.submit(Servers().remove, mongos))
for config_id in self._configsvrs:
futures.append(executor.submit(
self.configdb_singleton.remove, config_id))
for f in futures:
f.result()
self._configsvrs = []
self._routers = []
self._shards = {}
|
class ShardedCluster(BaseModel):
'''class represents Sharding configuration'''
def __init__(self, params):
'''init configuration acording params'''
pass
def add_shard(cfg):
pass
def only_x509(config):
pass
def restart_with_auth(server_or_rs):
pass
def add_auth(cfg):
pass
def __init_configrs(self, rs_cfg):
'''Create and start a config replica set.'''
pass
def __init_configsvrs(self, params):
'''create and start config servers'''
pass
def __len__(self):
pass
@property
def configsvrs(self):
'''return list of config servers'''
pass
@property
def routers(self):
'''return list of routers'''
pass
@property
def members(self):
'''return list of members'''
pass
@property
def routers(self):
'''return first available router'''
pass
def router_add(self, params):
'''add new router (mongos) into existing configuration'''
pass
def create_connection(self, host):
pass
def connection(self):
pass
def config_connection(self):
'''Return a MongoClient connected to the replica set config db.'''
pass
def router_connections(self):
'''Return a list of MongoClients, one for each mongos.'''
pass
def router_command(self, command, arg=None, is_eval=False):
'''run command on the router server
Args:
command - command string
arg - command argument
is_eval - if True execute command as eval
return command's result
'''
pass
def router_remove(self, router_id):
'''remove '''
pass
def _add(self, shard_uri, name):
'''execute addShard command'''
pass
def member_add(self, member_id=None, params=None):
'''add new member into existing configuration'''
pass
def member_info(self, member_id):
'''return info about member'''
pass
def _remove(self, shard_name):
'''remove member from configuration'''
pass
def member_remove(self, member_id):
'''remove member from configuration'''
pass
def reset(self):
'''Ensure all shards, configs, and routers are running and available.'''
pass
def info(self):
'''return info about configuration'''
pass
def cleanup(self):
'''cleanup configuration: stop and remove all servers'''
pass
| 32 | 21 | 18 | 1 | 14 | 2 | 4 | 0.14 | 1 | 11 | 5 | 0 | 23 | 20 | 23 | 29 | 469 | 60 | 358 | 131 | 326 | 51 | 300 | 125 | 272 | 28 | 2 | 3 | 96 |
237 |
1and1/infrascope
|
1and1_infrascope/src/infrascope/cli.py
|
infrascope.cli.InfrascopeCLI
|
class InfrascopeCLI(App):
""" The main `infrascope` command line application.
"""
log = logging.getLogger(__name__)
def __init__(self):
"""Set up main command."""
project = pkg_info()
super(InfrascopeCLI, self).__init__(
description=project["description"],
version='0.1', # TODO: need to get version at runtime
command_manager=CommandManager('infrascope.cli'),
)
def initialize_app(self, argv):
"""Called after main argument parsing, but before command processing."""
self.log.debug('initialize_app: %r', argv)
def prepare_to_run_command(self, cmd):
"""Called after command identification, and before executing it."""
self.log.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
"""Called after command execution; `err` is an unhandled exception, or `None`."""
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
|
class InfrascopeCLI(App):
''' The main `infrascope` command line application.
'''
def __init__(self):
'''Set up main command.'''
pass
def initialize_app(self, argv):
'''Called after main argument parsing, but before command processing.'''
pass
def prepare_to_run_command(self, cmd):
'''Called after command identification, and before executing it.'''
pass
def clean_up(self, cmd, result, err):
'''Called after command execution; `err` is an unhandled exception, or `None`.'''
pass
| 5 | 5 | 5 | 0 | 4 | 1 | 1 | 0.41 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 4 | 28 | 5 | 17 | 7 | 12 | 7 | 13 | 7 | 8 | 2 | 1 | 1 | 5 |
238 |
1and1/infrascope
|
1and1_infrascope/src/infrascope/configuration.py
|
infrascope.configuration.Configuration
|
class Configuration(object):
""" Reads and manages the configuation.
"""
# Singleton instance
instance = None
@classmethod
def create(cls, config_file=None):
""" Return the default configuration.
"""
if cls.instance is None:
cls.instance = cls(config_file)
# Load config file, possibly overwriting the defaults
cls.instance.load_ini()
if config_file and config_file != cls.instance.config_file:
raise RuntimeError("Configuration initialized a second time with a different file!")
return cls.instance
def __init__(self, config_file=None):
""" Initialize configuration.
"""
self.config_file = config_file
# Set defaults
#self.default("apt", "repositories", "primary", list)
#self.default("apt", "repositories", "secondary", list)
self._validate()
def _validate(self):
""" Validate a loaded configuration.
"""
#if isinstance(self.foobar, basestring):
# try:
# self.foobar = int(self.foobar, 10)
# except (ValueError, TypeError), exc:
# raise ValueError("Bad foobar %r: %s" % (self.foobar, exc))
def load_ini(self):
""" Load the given .INI file.
"""
if not self.config_file:
return
# Load INI file
ini_file = ConfigParser.SafeConfigParser()
if not ini_file.read(self.config_file):
raise ConfigParser.ParsingError("Global configuration file %r not found!" % (
self.config_file,
))
"""
# Make sure there's our global settings section
if not ini_file.has_section(self.SECTION):
raise ConfigParser.ParsingError("%r needs to have a [%s] section!" % (
self.config_file, self.SECTION,
))
# Get the given values
for key, val in ini_file.items(self.SECTION):
# Ensure that all names are known (to prevent uncaught typos)
if key not in self.KEYS:
raise ConfigParser.ParsingError("%r has an unknown key %s in the [%s] section!" % (
self.config_file, key, self.SECTION,
))
# Do some shell-like path expansion
val = os.path.expanduser(os.path.expandvars(val))
# Set as attribute for easy access
setattr(self, key, val)
"""
self._validate()
|
class Configuration(object):
''' Reads and manages the configuation.
'''
@classmethod
def create(cls, config_file=None):
''' Return the default configuration.
'''
pass
def __init__(self, config_file=None):
''' Initialize configuration.
'''
pass
def _validate(self):
''' Validate a loaded configuration.
'''
pass
def load_ini(self):
''' Load the given .INI file.
'''
pass
| 6 | 5 | 15 | 3 | 5 | 8 | 2 | 1.7 | 1 | 3 | 0 | 0 | 3 | 1 | 4 | 4 | 81 | 19 | 23 | 9 | 17 | 39 | 20 | 8 | 15 | 3 | 1 | 1 | 8 |
239 |
1and1/infrascope
|
1and1_infrascope/src/infrascope/repositories.py
|
infrascope.repositories.Versions
|
class Versions(Lister):
"""Version reporting of artifacts in repositories."""
log = logging.getLogger(__name__)
def take_action(self, args):
"""Execute 'repo versions' command."""
self.log.debug("repo versions: %r", args)
return (("Package", "reponame"), (("foo", "1.1"),))
|
class Versions(Lister):
'''Version reporting of artifacts in repositories.'''
def take_action(self, args):
'''Execute 'repo versions' command.'''
pass
| 2 | 2 | 4 | 0 | 3 | 1 | 1 | 0.4 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 9 | 2 | 5 | 3 | 3 | 2 | 5 | 3 | 3 | 1 | 1 | 0 | 1 |
240 |
20c/django-handleref
|
src/django_handleref/manager.py
|
django_handleref.manager.HandleRefQuerySet
|
class HandleRefQuerySet(models.QuerySet):
"""
Custom queryset to provide handleref querying
"""
def last_change(self):
"""
queries the database for the most recent time an object was either created or
updated
returns datetime or None if db is empty
"""
try:
cdt = self.latest("created")
udt = self.latest("updated")
# print cdt, udt
return max(cdt.created, udt.updated)
except ObjectDoesNotExist:
return None
def since(self, timestamp=None, version=None, deleted=False):
"""
Queries the database for objects updated since timestamp or version
Arguments:
timestamp <DateTime=None|int=None> if specified return all objects modified since
that specified time. If integer is submitted it is treated like a unix timestamp
version <int=None> if specified return all objects with a version greater
then the one specified
deleted <bool=False> if true include soft-deleted objects in the result
Either timestamp or version needs to be provided
"""
qset = self
if timestamp is not None:
if isinstance(timestamp, numbers.Real):
timestamp = datetime.datetime.fromtimestamp(timestamp)
qset = qset.filter(
models.Q(created__gt=timestamp) | models.Q(updated__gt=timestamp)
)
if version is not None:
qset = qset.filter(version__gt=version)
if not deleted:
qset = qset.undeleted()
return qset
def undeleted(self):
"""
Only return objects that are not soft-deleted
"""
return self.exclude(status="deleted")
|
class HandleRefQuerySet(models.QuerySet):
'''
Custom queryset to provide handleref querying
'''
def last_change(self):
'''
queries the database for the most recent time an object was either created or
updated
returns datetime or None if db is empty
'''
pass
def since(self, timestamp=None, version=None, deleted=False):
'''
Queries the database for objects updated since timestamp or version
Arguments:
timestamp <DateTime=None|int=None> if specified return all objects modified since
that specified time. If integer is submitted it is treated like a unix timestamp
version <int=None> if specified return all objects with a version greater
then the one specified
deleted <bool=False> if true include soft-deleted objects in the result
Either timestamp or version needs to be provided
'''
pass
def undeleted(self):
'''
Only return objects that are not soft-deleted
'''
pass
| 4 | 4 | 18 | 5 | 7 | 6 | 3 | 0.96 | 1 | 2 | 0 | 0 | 3 | 0 | 3 | 3 | 63 | 18 | 23 | 7 | 19 | 22 | 21 | 7 | 17 | 5 | 1 | 2 | 8 |
241 |
20c/django-handleref
|
src/django_handleref/manager.py
|
django_handleref.manager.HandleRefManager
|
class HandleRefManager(models.Manager):
"""
Custom manager to provide handleref querying
"""
@property
def tag(self):
return self.prop("tag")
def prop(self, key):
"""
Convenience function for retrieving properties off the
HandleRef class instance on the model
"""
return getattr(self.model._handleref, key)
def get_queryset(self):
return HandleRefQuerySet(self.model, using=self._db)
def last_change(self, **kwargs):
return self.get_queryset().last_change(**kwargs)
def since(self, **kwargs):
return self.get_queryset().since(**kwargs)
def undeleted(self):
return self.get_queryset().undeleted()
|
class HandleRefManager(models.Manager):
'''
Custom manager to provide handleref querying
'''
@property
def tag(self):
pass
def prop(self, key):
'''
Convenience function for retrieving properties off the
HandleRef class instance on the model
'''
pass
def get_queryset(self):
pass
def last_change(self, **kwargs):
pass
def since(self, **kwargs):
pass
def undeleted(self):
pass
| 8 | 2 | 3 | 0 | 2 | 1 | 1 | 0.5 | 1 | 1 | 1 | 0 | 6 | 1 | 6 | 6 | 29 | 8 | 14 | 9 | 6 | 7 | 13 | 7 | 6 | 1 | 1 | 0 | 6 |
242 |
20c/django-handleref
|
src/django_handleref/admin.py
|
django_handleref.admin.VersionAdmin
|
class VersionAdmin(admin.ModelAdmin):
"""
ModelAdmin mixin that will enable handleref version
history for any model it is attached to
"""
# admin view templates, grappelli versions exist
# at "handleref/grappelli/"
object_history_template = "handleref/object_history.html"
version_details_template = "handleref/version_details.html"
version_revert_template = "handleref/version_revert.html"
version_rollback_template = "handleref/version_rollback.html"
# which version abstraction to use for operations
# set to reversion as default
version_cls = ReversionVersion
reverter_cls = ReversionReverter
# display these fields in the object history listing
# fields starting with the `version_` prefix will
# automatically redirect to the Version objects property
#
# so `version_id` will go to Version.id for example
version_list_fields = [
("version_id", _("Version ID")),
("version", _("Version")),
("version_date", _("Date")),
("version_user", _("User")),
("status", _("Object Status")),
("version_changes", _("Changes")),
]
def get_urls(self):
urls = super().get_urls()
opts = self.model._meta
info = (
opts.app_label,
opts.model_name,
)
my_urls = [
re_path(
r"^([^/]+)/history/revert/process/$",
self.admin_site.admin_view(self.version_revert_process),
name="%s_%s_version_revert_process" % info,
),
re_path(
r"^([^/]+)/history/revert/$",
self.admin_site.admin_view(self.version_revert_view),
name="%s_%s_version_revert" % info,
),
re_path(
r"^([^/]+)/history/(\d+)/rollback/process/$",
self.admin_site.admin_view(self.version_rollback_process),
name="%s_%s_version_rollback_process" % info,
),
re_path(
r"^([^/]+)/history/(\d+)/rollback/$",
self.admin_site.admin_view(self.version_rollback_view),
name="%s_%s_version_rollback" % info,
),
re_path(
r"^([^/]+)/history/(\d+)/$",
self.admin_site.admin_view(self.version_details_view),
name="%s_%s_version" % info,
),
]
return my_urls + urls
def history_query_set(self, object_id):
"""
Arguments:
- object_id(int)
Returns:
- reversion.models.Version queryset
"""
instance = self.model.objects.get(pk=object_id)
# TODO: abstract this away from reversion
# we are only supporting django-reversion it this point
# so it's ok for now
history_qset = reversion.models.Version.objects.get_for_object(instance)
history_qset = history_qset.order_by("-revision_id")
return history_qset
def history_entry(self, version, previous):
"""
Compile object history list entry dict
Argument(s):
- version(handleref.Version.Version): newer version
- previous(handleref.version.Version): older version
if no older version exists will be an empty Version instance
Returns:
- dict: {"id","fields","comment"}
"""
fields = []
entry = {"id": version.id, "fields": fields, "comment": version.comment}
for field, label in self.version_list_fields:
if field == "version_changes":
fields.append((field, version.changes(previous)))
elif field == "version_changed_fields":
fields.append((field, version.changed_fields(previous)))
elif field.find("version_") == 0:
fields.append((field, getattr(version, field.split("_")[1])))
else:
fields.append((field, version.data.get(field, "")))
return entry
def history(self, history_qset):
"""
Compile and return history listing data from history queryset
Argument(s):
- history_qset (queryset): queryset of versions
Returns:
- list: list containing `history_entry` dicts
"""
history = []
versions = [v for v in history_qset]
versions.reverse()
# If there are no previous versions, return an empty history
if not versions:
return history
previous = self.version_cls(versions[0]).previous
for _version in versions:
version = self.version_cls(_version)
history.insert(0, self.history_entry(version, previous))
previous = version
return history
def history_view(self, request, object_id):
"""
object history view
"""
# require superuser
if not request.user.is_superuser:
return redirect("admin:login")
action = request.POST.get("action")
# if action is set to revert, it means one or more versions
# have been selected to preview for revert so redirect to
# reversion version view
if action == "revert":
return self.version_revert_view(request, object_id)
history_qset = self.history_query_set(object_id)
listing = HistoryListing(self, request, history_qset)
history = self.history(listing.result_list)
context = dict(
self.admin_site.each_context(request),
object_id=object_id,
model=self.model,
action_form=HistoryActionsForm(),
history=history,
history_qset=history_qset,
listing=listing,
version_list_fields=self.version_list_fields,
field_count=len(self.version_list_fields),
title=_("Version History"),
)
return super().history_view(request, object_id, context)
def version_details_view(self, request, object_id, version_id, extra_context=None):
"""
Show version details
"""
# require superuser
if not request.user.is_superuser:
return redirect("admin:login")
version = self.version_cls(reversion.models.Version.objects.get(id=version_id))
previous = version.previous
context = dict(
self.admin_site.each_context(request),
object_id=object_id,
version_id=version_id,
instance=self.model.objects.get(id=object_id),
opts=self.model._meta,
version=version,
previous=previous,
changes=version.changes(previous),
)
context.update(extra_context or {})
return TemplateResponse(request, self.version_details_template, context)
def version_revert_view(self, request, object_id, extra_context=None):
"""
Show version revert preview / confiformation view
"""
# require superuser
if not request.user.is_superuser:
return redirect("admin:login")
version_ids = request.GET.getlist(
"version_id", request.POST.getlist("version_id", [])
)
if not isinstance(version_ids, list):
version_ids = [version_ids]
versions = [
self.version_cls(reversion.models.Version.objects.get(id=version_id))
for version_id in version_ids
]
changes = self.version_cls.changes_summary(versions)
context = dict(
self.admin_site.each_context(request),
object_id=object_id,
instance=self.model.objects.get(id=object_id),
opts=self.model._meta,
versions=versions,
count=len(versions),
changes=changes,
)
context.update(extra_context or {})
return TemplateResponse(request, self.version_revert_template, context)
def version_revert_process(self, request, object_id, extra_context=None):
"""
Process revert version(s)
"""
# require superuser
if not request.user.is_superuser:
return redirect("admin:login")
# compile field versions from request args
# by looking for any arg that has the `field_`
# prefix - treat their values as version pks
field_versions = {}
for key, value in request.POST.items():
m = re.match("field_(.+)", key)
if not m:
continue
if not int(value):
continue
field_versions[m.group(1)] = self.version_cls(int(value))
errors = {}
try:
# revert
reverter = self.reverter_cls()
instance = self.model.objects.get(pk=object_id)
reverter.revert_fields(instance, field_versions, user=request.user)
except ValidationError as exc:
# validation errors are collected
errors = exc.message_dict
except Exception as exc:
# any other errors are logged
errors = {"non_field_errors": ["Internal Error (check server logs)"]}
logger.error(traceback.format_exc(exc))
# if there were errors we want to show the revert preview again
# and include error information
if errors:
return self.version_revert_view(
request, object_id, extra_context={"errors": errors}
)
opts = self.model._meta
# on success return to the object history view
return redirect(
"{}:{}_{}_history".format(
self.admin_site.name, opts.app_label, opts.model_name
),
instance.id,
)
def version_rollback_view(self, request, object_id, version_id, extra_context=None):
"""
Version rollback preview / confirmation view
"""
# require superuser
if not request.user.is_superuser:
return redirect("admin:login")
version = self.version_cls(int(version_id))
context = dict(
self.admin_site.each_context(request),
object_id=object_id,
instance=self.model.objects.get(id=object_id),
opts=self.model._meta,
version=version,
)
context.update(extra_context or {})
return TemplateResponse(request, self.version_rollback_template, context)
def version_rollback_process(
self, request, object_id, version_id, extra_context=None
):
"""
Version rollback process
"""
# require super user
if not request.user.is_superuser:
return redirect("admin:login")
version = self.version_cls(int(version_id))
errors = {}
try:
# rollback
reverter = self.reverter_cls()
instance = self.model.objects.get(pk=object_id)
reverter.rollback(instance, version, user=request.user)
except ValidationError as exc:
# collect validation errors
errors = exc.message_dict
except Exception as exc:
# log any other errors
errors = {"non_field_errors": ["Internal Error (check server logs)"]}
logger.error(traceback.format_exc(exc))
# if there were errors show the rollback preview / confirmation
# view again with error information
if errors:
return self.version_rollback_view(
request, object_id, version_id, extra_context={"errors": errors}
)
opts = self.model._meta
# on success return to object history
return redirect(
"{}:{}_{}_history".format(
self.admin_site.name, opts.app_label, opts.model_name
),
instance.id,
)
|
class VersionAdmin(admin.ModelAdmin):
'''
ModelAdmin mixin that will enable handleref version
history for any model it is attached to
'''
def get_urls(self):
pass
def history_query_set(self, object_id):
'''
Arguments:
- object_id(int)
Returns:
- reversion.models.Version queryset
'''
pass
def history_entry(self, version, previous):
'''
Compile object history list entry dict
Argument(s):
- version(handleref.Version.Version): newer version
- previous(handleref.version.Version): older version
if no older version exists will be an empty Version instance
Returns:
- dict: {"id","fields","comment"}
'''
pass
def history_query_set(self, object_id):
'''
Compile and return history listing data from history queryset
Argument(s):
- history_qset (queryset): queryset of versions
Returns:
- list: list containing `history_entry` dicts
'''
pass
def history_view(self, request, object_id):
'''
object history view
'''
pass
def version_details_view(self, request, object_id, version_id, extra_context=None):
'''
Show version details
'''
pass
def version_revert_view(self, request, object_id, extra_context=None):
'''
Show version revert preview / confiformation view
'''
pass
def version_revert_process(self, request, object_id, extra_context=None):
'''
Process revert version(s)
'''
pass
def version_rollback_view(self, request, object_id, version_id, extra_context=None):
'''
Version rollback preview / confirmation view
'''
pass
def version_rollback_process(
self, request, object_id, version_id, extra_context=None
):
'''
Version rollback process
'''
pass
| 11 | 10 | 34 | 8 | 20 | 7 | 3 | 0.38 | 1 | 7 | 2 | 1 | 10 | 1 | 10 | 10 | 388 | 92 | 215 | 63 | 202 | 81 | 121 | 58 | 110 | 8 | 1 | 2 | 33 |
243 |
20c/django-handleref
|
src/django_handleref/admin.py
|
django_handleref.admin.HistoryListing
|
class HistoryListing(ChangeList):
"""
History listing view derived from how django admin does it's
ChangeList. This is mostly so we can support pagination
"""
def __init__(self, model_admin, request, qset):
try:
self.page_num = int(request.GET.get("p", 1))
except ValueError:
self.page_num = 1
self.list_per_page = 100
self.paginator = model_admin.get_paginator(request, qset, self.list_per_page)
result_count = self.paginator.count
self.show_all = False
self.can_show_all = False
self.result_count = result_count
self.full_result_count = qset.count()
self.multi_page = result_count > self.list_per_page
self.result_list = self.paginator.page(self.page_num).object_list
self.params = dict(request.GET.items())
if "p" in self.params:
del self.params["p"]
if "e" in self.params:
del self.params["e"]
|
class HistoryListing(ChangeList):
'''
History listing view derived from how django admin does it's
ChangeList. This is mostly so we can support pagination
'''
def __init__(self, model_admin, request, qset):
pass
| 2 | 1 | 23 | 4 | 19 | 0 | 4 | 0.2 | 1 | 3 | 0 | 0 | 1 | 10 | 1 | 1 | 30 | 6 | 20 | 13 | 18 | 4 | 20 | 13 | 18 | 4 | 1 | 1 | 4 |
244 |
20c/django-handleref
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_django-handleref/tests/models.py
|
tests.models.Sub.HandleRef
|
class HandleRef:
tag = "sub"
|
class HandleRef:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 0 | 0 | 0 |
245 |
20c/django-handleref
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_django-handleref/src/django_handleref/models.py
|
django_handleref.models.HandleRefModel.Meta
|
class Meta:
abstract = True
get_latest_by = "updated"
ordering = (
"-updated",
"-created",
)
|
class Meta:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0 | 7 | 4 | 6 | 0 | 4 | 4 | 3 | 0 | 0 | 0 | 0 |
246 |
20c/django-handleref
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_django-handleref/src/django_handleref/rest/serializers.py
|
django_handleref.rest.serializers.HandleRefSerializer.Meta
|
class Meta:
model = HandleRefModel
fields = ["created", "updated", "status"]
|
class Meta:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 0 | 0 | 0 |
247 |
20c/django-handleref
|
src/django_handleref/models.py
|
django_handleref.models.CreatedDateTimeField
|
class CreatedDateTimeField(models.DateTimeField):
"""DateTimeField that's set to now() on create"""
def __init__(self, verbose_name=None, name=None, **kwargs):
if not verbose_name:
verbose_name = _("Created")
# force timestamp options
kwargs["auto_now"] = False
kwargs["auto_now_add"] = True
super(models.DateTimeField, self).__init__(verbose_name, name, **kwargs)
|
class CreatedDateTimeField(models.DateTimeField):
'''DateTimeField that's set to now() on create'''
def __init__(self, verbose_name=None, name=None, **kwargs):
pass
| 2 | 1 | 8 | 1 | 6 | 1 | 2 | 0.29 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 11 | 2 | 7 | 2 | 5 | 2 | 7 | 2 | 5 | 2 | 1 | 1 | 2 |
248 |
20c/django-handleref
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_django-handleref/tests/reversion_models.py
|
tests.reversion_models.VersionedOrg.HandleRef
|
class HandleRef:
tag = "org"
delete_cascade = ["sub_entities"]
|
class HandleRef:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 0 | 0 | 0 |
249 |
20c/django-handleref
|
src/django_handleref/admin.py
|
django_handleref.admin.HistoryActionsForm
|
class HistoryActionsForm(forms.Form):
action = forms.ChoiceField(choices=(("revert", _("Revert")),))
|
class HistoryActionsForm(forms.Form):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
250 |
20c/django-handleref
|
src/django_handleref/models.py
|
django_handleref.models.HandleRefMeta
|
class HandleRefMeta(models.base.ModelBase):
def __new__(cls, name, bases, attrs):
super_new = super().__new__
# only init subclass
parents = [b for b in bases if isinstance(b, HandleRefMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
new = super_new(cls, name, bases, attrs)
opts = attrs.pop("HandleRef", None)
if not opts:
opts = getattr(new, "HandleRef", None)
setattr(new, "_handleref", HandleRefOptions(new, opts))
return new
|
class HandleRefMeta(models.base.ModelBase):
def __new__(cls, name, bases, attrs):
pass
| 2 | 0 | 15 | 3 | 11 | 1 | 3 | 0.08 | 1 | 2 | 1 | 1 | 1 | 0 | 1 | 1 | 16 | 3 | 12 | 6 | 10 | 1 | 12 | 6 | 10 | 3 | 1 | 1 | 3 |
251 |
20c/django-handleref
|
src/django_handleref/version.py
|
django_handleref.version.ReversionVersion
|
class ReversionVersion(Version):
"""
Version abtraction for django-reversion
"""
def __init__(self, version):
"""
Argument(s):
- Version(int|reversion.models.Version): can be either
a reversion version instance or the id of one
"""
if isinstance(version, int):
version = reversion.models.Version.objects.get(id=version)
super().__init__(version)
@property
def date(self):
"""
Returns:
- datetime: date of revision
"""
return self.version.revision.date_created
@property
def user(self):
"""
Returns:
- User: user that authored revision
- None: if no such user exists
"""
return self.version.revision.user
@property
def comment(self):
"""
Returns:
- str: comment stored with revision
- None: if no such comment exists
"""
return self.version.revision.comment
@property
def id(self):
"""
Returns:
- int:version instance id
"""
return self.version.id
@property
def data(self):
"""
Returns:
- dict: object data
"""
return self.version.field_dict
@property
def model(self):
"""
Returns:
- model: django model for the object
snapshotted by this version
"""
return self.version._model
@property
def data_sorted(self):
"""
Returns:
- list: list of (field, value) tuples for
object data
"""
data = []
for field, value in self.data.items():
data.append((field, value))
return sorted(data, key=lambda i: i[0])
@property
def previous(self):
"""
Returns:
- Version: previous version - if no previous version exists
the Version instance will be empty
"""
if hasattr(self, "_previous"):
return self._previous
versions = reversion.models.Version.objects.get_for_object(
self.version.object
).order_by("-id")
for version in versions:
if version.id < self.version.id:
self._previous = self.__class__(version)
return self._previous
return None
@property
def next(self):
"""
Returns:
- Version: next version - if no next version exists
the Version instance will be empty
"""
if hasattr(self, "_next"):
return self._next
qset = reversion.models.Version.objects.filter(
content_type_id=self.version.content_type_id,
object_id=self.version.object_id,
id__gt=self.version.id,
)
qset = qset.order_by("id")
self._next = self.__class__(qset.first())
return self._next
|
class ReversionVersion(Version):
'''
Version abtraction for django-reversion
'''
def __init__(self, version):
'''
Argument(s):
- Version(int|reversion.models.Version): can be either
a reversion version instance or the id of one
'''
pass
@property
def date(self):
'''
Returns:
- datetime: date of revision
'''
pass
@property
def user(self):
'''
Returns:
- User: user that authored revision
- None: if no such user exists
'''
pass
@property
def comment(self):
'''
Returns:
- str: comment stored with revision
- None: if no such comment exists
'''
pass
@property
def id(self):
'''
Returns:
- int:version instance id
'''
pass
@property
def data(self):
'''
Returns:
- dict: object data
'''
pass
@property
def model(self):
'''
Returns:
- model: django model for the object
snapshotted by this version
'''
pass
@property
def data_sorted(self):
'''
Returns:
- list: list of (field, value) tuples for
object data
'''
pass
@property
def previous(self):
'''
Returns:
- Version: previous version - if no previous version exists
the Version instance will be empty
'''
pass
@property
def next(self):
'''
Returns:
- Version: next version - if no next version exists
the Version instance will be empty
'''
pass
| 20 | 11 | 12 | 3 | 4 | 5 | 2 | 0.94 | 1 | 2 | 0 | 0 | 10 | 2 | 10 | 23 | 141 | 38 | 53 | 27 | 33 | 50 | 38 | 18 | 27 | 4 | 1 | 2 | 16 |
252 |
20c/django-handleref
|
src/django_handleref/models.py
|
django_handleref.models.HandleRefOptions
|
class HandleRefOptions:
delete_cascade = []
def __init__(self, cls, opts):
if opts:
for key, value in opts.__dict__.items():
if key.startswith("__"):
continue
setattr(self, key, value)
if not getattr(self, "tag", None):
self.tag = cls.__name__.lower()
|
class HandleRefOptions:
def __init__(self, cls, opts):
pass
| 2 | 0 | 9 | 1 | 8 | 0 | 5 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 12 | 2 | 10 | 5 | 8 | 0 | 10 | 5 | 8 | 5 | 0 | 3 | 5 |
253 |
20c/django-handleref
|
src/django_handleref/models.py
|
django_handleref.models.UpdatedDateTimeField
|
class UpdatedDateTimeField(models.DateTimeField):
"""DateTimeField that's set to now() every update"""
def __init__(self, verbose_name=None, name=None, **kwargs):
if not verbose_name:
verbose_name = _("Updated")
# force timestamp options
kwargs["auto_now"] = True
kwargs["auto_now_add"] = False
super(models.DateTimeField, self).__init__(verbose_name, name, **kwargs)
|
class UpdatedDateTimeField(models.DateTimeField):
'''DateTimeField that's set to now() every update'''
def __init__(self, verbose_name=None, name=None, **kwargs):
pass
| 2 | 1 | 8 | 1 | 6 | 1 | 2 | 0.29 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 11 | 2 | 7 | 2 | 5 | 2 | 7 | 2 | 5 | 2 | 1 | 1 | 2 |
254 |
20c/django-handleref
|
src/django_handleref/version.py
|
django_handleref.version.Diff
|
class Diff:
"""
Describes changes between two versions
"""
# when generating diff ignore these fields
diff_ignore_fields = [
"version",
"created",
"updated",
]
def __init__(self, version_a, version_b):
"""
Argument(s):
- version_a(Version): older version
- version_b(Version): newer version
"""
self.version_a = version_a
self.version_b = version_b
@property
def changes(self):
"""
Compile and return a dict describing changes between
the two versions tracked in this diff
Returns:
- dict: dict mapping field names to a dict describing
changed made to the field
{
field_name: {
"old": old_value,
"changed": changed_value,
},
...
}
"""
if not self.version_a or not self.version_b:
return None
if not self.version_a.version or not self.version_b.version:
return None
data_a = self.version_a.data
data_b = self.version_b.data
diff = {}
for field, value_b in data_b.items():
if field in self.diff_ignore_fields:
continue
value_a = data_a.get(field)
if value_a == value_b:
continue
if isinstance(value_a, str) or isinstance(value_a, int):
diff[field] = {"old": value_a, "changed": value_b}
else:
diff[field] = {
"old": self.format_value(value_a),
"changed": self.format_value(value_b),
}
return diff
def format_value(self, value):
return f"{value}"
|
class Diff:
'''
Describes changes between two versions
'''
def __init__(self, version_a, version_b):
'''
Argument(s):
- version_a(Version): older version
- version_b(Version): newer version
'''
pass
@property
def changes(self):
'''
Compile and return a dict describing changes between
the two versions tracked in this diff
Returns:
- dict: dict mapping field names to a dict describing
changed made to the field
{
field_name: {
"old": old_value,
"changed": changed_value,
},
...
}
'''
pass
def format_value(self, value):
pass
| 5 | 3 | 21 | 6 | 9 | 6 | 3 | 0.68 | 0 | 2 | 0 | 0 | 3 | 2 | 3 | 3 | 80 | 23 | 34 | 13 | 29 | 23 | 25 | 12 | 21 | 7 | 0 | 2 | 9 |
255 |
20c/django-handleref
|
src/django_handleref/version.py
|
django_handleref.version.ReversionReverter
|
class ReversionReverter(Reverter):
"""
Reverter abstraction for django-reversion
"""
def revert_fields(self, instance, field_versions, user=None):
"""
Revert a set of fields
Argument(s):
- instance(model instance): instance of django model
to be reverted
- field_versions(dict): dict mapping field names to
version pk
Keyword Argument(s):
- user(User): user that authored the revision
Raises:
- ValidationError: if any of the fields fail validation
"""
with reversion.create_revision():
if user:
reversion.set_user(user)
version_ids = [
"{}".format(version.data["version"])
for version in field_versions.values()
]
version_ids = list(set(version_ids))
reversion.set_comment(
"reverted some fields via versions: {}".format(", ".join(version_ids))
)
super().revert_fields(instance, field_versions)
def rollback(self, instance, version, user=None):
"""
Rollback to a specific version
Argument(s):
- instance(model instance): instance of django model
to be reverted
- version(Version): version to roll back to
Keyword Argument(s):
- user(User): user that authored the revision
Raises:
- ValidationError: if any of the fields fail validation
"""
with reversion.create_revision():
if user:
reversion.set_user(user)
reversion.set_comment(
"rollback to version {}".format(version.data["version"])
)
super().rollback(instance, version)
|
class ReversionReverter(Reverter):
'''
Reverter abstraction for django-reversion
'''
def revert_fields(self, instance, field_versions, user=None):
'''
Revert a set of fields
Argument(s):
- instance(model instance): instance of django model
to be reverted
- field_versions(dict): dict mapping field names to
version pk
Keyword Argument(s):
- user(User): user that authored the revision
Raises:
- ValidationError: if any of the fields fail validation
'''
pass
def rollback(self, instance, version, user=None):
'''
Rollback to a specific version
Argument(s):
- instance(model instance): instance of django model
to be reverted
- version(Version): version to roll back to
Keyword Argument(s):
- user(User): user that authored the revision
Raises:
- ValidationError: if any of the fields fail validation
'''
pass
| 3 | 3 | 30 | 8 | 11 | 12 | 2 | 1.18 | 1 | 3 | 0 | 0 | 2 | 0 | 2 | 6 | 67 | 19 | 22 | 4 | 19 | 26 | 15 | 4 | 12 | 2 | 1 | 2 | 4 |
256 |
20c/django-handleref
|
src/django_handleref/version.py
|
django_handleref.version.Reverter
|
class Reverter:
"""
Allows to revert / rollback changes
"""
def revert_fields(self, instance, field_versions, **kwargs):
"""
Revert a set of fields
Argument(s):
- instance(model instance): instance of django model
to be reverted
- field_versions(dict): dict mapping field names to
version object
Raises:
- ValidationError: if any of the fields fail validation
"""
for field, version in field_versions.items():
setattr(instance, field, version.data[field])
if field == "status":
self.validate_status_change(instance, version.data[field])
instance.full_clean()
instance.save()
def rollback(self, instance, version, **kwargs):
"""
Rollback to a specific version
Argument(s):
- instance(model instance): instance of django model
to be reverted
- version(Version): version to roll back to
Raises:
- ValidationError: if any of the fields fail validation
"""
for field, value in version.data.items():
if field in ["created", "updated", "version"]:
continue
if field == "status":
self.validate_status_change(instance, value)
setattr(instance, field, value)
instance.full_clean()
instance.save()
def validate_status_change(self, instance, status):
"""
Validate a status value change - this will make sure
an object cannot be undeleted if a parent relationship
is still flagged as deleted
Argument(s):
- instance(model instance): instance of django model
to be reverted
- status(str)
"""
for field in instance.__class__._meta.get_fields():
if not field.is_relation or not field.many_to_one:
continue
try:
relation = getattr(instance, field.name)
except Exception:
continue
self.validate_parent_status(instance, relation, status)
def validate_parent_status(self, instance, parent, status):
if not hasattr(parent, "HandleRef"):
return
if parent.status == "deleted" and status != "deleted":
raise ValidationError(
{
"non_field_errors": "Parent object {} is currently flagged as deleted."
"This object may not be undeleted while the parent "
"is still deleted.".format(parent)
}
)
|
class Reverter:
'''
Allows to revert / rollback changes
'''
def revert_fields(self, instance, field_versions, **kwargs):
'''
Revert a set of fields
Argument(s):
- instance(model instance): instance of django model
to be reverted
- field_versions(dict): dict mapping field names to
version object
Raises:
- ValidationError: if any of the fields fail validation
'''
pass
def rollback(self, instance, version, **kwargs):
'''
Rollback to a specific version
Argument(s):
- instance(model instance): instance of django model
to be reverted
- version(Version): version to roll back to
Raises:
- ValidationError: if any of the fields fail validation
'''
pass
def validate_status_change(self, instance, status):
'''
Validate a status value change - this will make sure
an object cannot be undeleted if a parent relationship
is still flagged as deleted
Argument(s):
- instance(model instance): instance of django model
to be reverted
- status(str)
'''
pass
def validate_parent_status(self, instance, parent, status):
pass
| 5 | 4 | 20 | 4 | 9 | 7 | 4 | 0.84 | 0 | 1 | 0 | 1 | 4 | 0 | 4 | 4 | 90 | 22 | 37 | 9 | 32 | 31 | 31 | 9 | 26 | 4 | 0 | 2 | 14 |
257 |
20c/django-handleref
|
tests/models.py
|
tests.models.Org
|
class Org(HandleRefModel):
name = models.CharField(max_length=255, unique=True)
website = models.URLField(blank=True)
notes = models.TextField(blank=True)
class HandleRef:
tag = "org"
delete_cascade = ["sub_entities"]
def __unicode__(self):
return self.name
|
class Org(HandleRefModel):
class HandleRef:
def __unicode__(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 6 | 11 | 2 | 9 | 8 | 6 | 0 | 9 | 8 | 6 | 1 | 3 | 0 | 1 |
258 |
20c/django-handleref
|
tests/models.py
|
tests.models.Sub
|
class Sub(HandleRefModel):
name = models.CharField(max_length=255, unique=True)
org = models.ForeignKey(Org, on_delete=models.CASCADE, related_name="sub_entities")
class HandleRef:
tag = "sub"
def __unicode__(self):
return self.name
|
class Sub(HandleRefModel):
class HandleRef:
def __unicode__(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 6 | 9 | 2 | 7 | 6 | 4 | 0 | 7 | 6 | 4 | 1 | 3 | 0 | 1 |
259 |
20c/django-handleref
|
tests/reversion_models.py
|
tests.reversion_models.OrgAdmin
|
class OrgAdmin(VersionAdmin, ModelAdmin):
pass
|
class OrgAdmin(VersionAdmin, ModelAdmin):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 2 | 0 | 0 |
260 |
20c/django-handleref
|
tests/test_datetime_fields.py
|
tests.test_datetime_fields.FieldTestCase
|
class FieldTestCase(TestCase):
def setUp(self):
self.org = Org.objects.create(**data_org)
self.created = datetime.now()
self.one_sec = timedelta(seconds=1)
pass
# org = Org.objects.create(**data_org)
def test_obj_creation(self):
assert self.one_sec > self.created - self.org.created
assert self.one_sec > self.created - self.org.updated
def test_updated(self):
self.org.name = "Updated"
self.org.save()
now = datetime.now()
assert self.one_sec > self.created - self.org.created
assert self.one_sec > now - self.org.updated
|
class FieldTestCase(TestCase):
def setUp(self):
pass
def test_obj_creation(self):
pass
def test_updated(self):
pass
| 4 | 0 | 5 | 0 | 5 | 0 | 1 | 0.07 | 1 | 3 | 1 | 0 | 3 | 3 | 3 | 3 | 20 | 4 | 15 | 8 | 11 | 1 | 15 | 8 | 11 | 1 | 1 | 0 | 3 |
261 |
20c/django-handleref
|
tests/test_manager.py
|
tests.test_manager.ManagerTests
|
class ManagerTests(TestCase):
"""
Test handle-ref manager functionality
"""
@classmethod
def setUpTestData(cls):
cls.initTime = time.time()
cls.orgs = []
i = 0
while i < 10:
cls.orgs.append(Org.objects.create(name="Org %d" % i))
i = i + 1
cls.orgs[8].delete()
cls.orgs[9].delete(hard=True)
def test_last_change(self):
org = self.orgs[8]
self.assertEqual(Org.handleref.last_change(), org.updated)
def test_since(self):
org = self.orgs[0]
t = time.time()
# we wait a second, so we have a valid timestamp to query
time.sleep(1)
# update the org
org.name = "Updated name 0"
org.save()
# the org we just updated should be only org in the query set
qset = Org.handleref.since(timestamp=t)
self.assertEqual(qset.count(), 1)
self.assertEqual(qset.first().id, org.id)
# we also want to check that org #8 is in the qset when
# the deleted parameter is passed as true
qset = Org.handleref.since(timestamp=self.initTime, deleted=True)
self.assertIn(self.orgs[8].id, [o.id for o in qset])
# and that it's missing if we don't
qset = Org.handleref.since(timestamp=self.initTime, deleted=True)
self.assertIn(self.orgs[8].id, [o.id for o in qset])
def test_undeleted(self):
qset = Org.handleref.undeleted()
self.assertNotIn(self.orgs[8].id, [o.id for o in qset])
|
class ManagerTests(TestCase):
'''
Test handle-ref manager functionality
'''
@classmethod
def setUpTestData(cls):
pass
def test_last_change(self):
pass
def test_since(self):
pass
def test_undeleted(self):
pass
| 6 | 1 | 10 | 2 | 7 | 2 | 1 | 0.3 | 1 | 1 | 1 | 0 | 3 | 1 | 4 | 4 | 49 | 10 | 30 | 13 | 24 | 9 | 29 | 11 | 24 | 2 | 1 | 1 | 5 |
262 |
20c/django-handleref
|
tests/test_models.py
|
tests.test_models.ModelTests
|
class ModelTests(TestCase):
def test_model_init(self):
org = Org()
self.assertEqual("org", org.ref_tag)
self.assertEqual("org", Org.handleref.tag)
with self.assertRaises(ValueError):
org.handle
widget = Widget()
# no tag specified on model, should default to lower-case
# class name
self.assertEqual("widget", widget.ref_tag)
self.assertEqual("widget", Widget.handleref.tag)
self.assertEqual("passthrough", widget._handleref.custom_option)
self.assertEqual("passthrough", Widget.handleref.prop("custom_option"))
def test_soft_delete(self):
org = Org.objects.create(name="TEST SOFT DELETE", status="ok")
sub1 = Sub.objects.create(name="TEST SUB 1", status="ok", org=org)
sub2 = Sub.objects.create(name="TEST SUB 2", status="deleted", org=org)
time.sleep(1)
u = sub2.updated
self.assertEqual(org.status, "ok")
org.delete()
org.refresh_from_db()
self.assertEqual(org.status, "deleted")
sub1.refresh_from_db()
sub2.refresh_from_db()
self.assertEqual(sub1.status, "deleted")
self.assertEqual(sub2.updated, u)
def test_hard_delete(self):
org = Org.objects.create(name="TEST HARD DELETE", status="ok")
self.assertEqual(org.status, "ok")
org.delete(hard=True)
with self.assertRaises(Org.DoesNotExist):
org.refresh_from_db()
|
class ModelTests(TestCase):
def test_model_init(self):
pass
def test_soft_delete(self):
pass
def test_hard_delete(self):
pass
| 4 | 0 | 14 | 3 | 10 | 1 | 1 | 0.06 | 1 | 4 | 3 | 0 | 3 | 0 | 3 | 3 | 45 | 11 | 32 | 11 | 28 | 2 | 32 | 11 | 28 | 1 | 1 | 1 | 3 |
263 |
20c/django-handleref
|
src/django_handleref/models.py
|
django_handleref.models.HandleRefModel
|
class HandleRefModel(models.Model, metaclass=HandleRefMeta):
"""
Provides timestamps for creation and change times,
versioning (using django-reversion) as well as
the ability to soft-delete
"""
id = models.AutoField(primary_key=True)
status = models.CharField(_("Status"), max_length=255, blank=True)
created = CreatedDateTimeField()
updated = UpdatedDateTimeField()
version = models.IntegerField(default=0)
handleref = HandleRefManager()
objects = models.Manager()
class Meta:
abstract = True
get_latest_by = "updated"
ordering = (
"-updated",
"-created",
)
@property
def ref_tag(self):
if not self._handleref.tag:
raise ValueError("tag not set")
return self._handleref.tag
@property
def handle(self):
if not self.id:
raise ValueError("id not set")
return self._handleref.tag + str(self.id)
def __unicode__(self):
if not hasattr(self, "name"):
name = self.__class__.__name__
else:
name = self.name
return name + "-" + self.handle
def delete(self, hard=False):
"""
Override the vanilla delete functionality to soft-delete
instead. Soft-delete is accomplished by setting the
status field to "deleted"
Arguments:
hard <bool=False> if true, do a hard delete instead, effectively
removing the object from the database
"""
if hard:
return models.Model.delete(self)
self.status = "deleted"
self.save()
for key in self._handleref.delete_cascade:
q = getattr(self, key).all()
if not hard:
# if we are soft deleting only trigger delete on
# objects that are not already deleted, as to avoid
# unnecessary re-saves and overriding of updated dates
q = q.exclude(status="deleted")
for child in q:
child.delete(hard=hard)
|
class HandleRefModel(models.Model, metaclass=HandleRefMeta):
'''
Provides timestamps for creation and change times,
versioning (using django-reversion) as well as
the ability to soft-delete
'''
class Meta:
@property
def ref_tag(self):
pass
@property
def handle(self):
pass
def __unicode__(self):
pass
def delete(self, hard=False):
'''
Override the vanilla delete functionality to soft-delete
instead. Soft-delete is accomplished by setting the
status field to "deleted"
Arguments:
hard <bool=False> if true, do a hard delete instead, effectively
removing the object from the database
'''
pass
| 8 | 2 | 10 | 1 | 6 | 3 | 3 | 0.38 | 2 | 2 | 0 | 4 | 4 | 0 | 4 | 5 | 70 | 12 | 42 | 21 | 34 | 16 | 36 | 19 | 30 | 5 | 2 | 2 | 11 |
264 |
20c/django-handleref
|
tests/reversion_models.py
|
tests.reversion_models.VersionedOrg
|
class VersionedOrg(HandleRefModel):
name = models.CharField(max_length=255, unique=True)
website = models.URLField(blank=True)
notes = models.TextField(blank=True)
class HandleRef:
tag = "org"
delete_cascade = ["sub_entities"]
def __unicode__(self):
return self.name
|
class VersionedOrg(HandleRefModel):
class HandleRef:
def __unicode__(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 6 | 11 | 2 | 9 | 8 | 6 | 0 | 9 | 8 | 6 | 1 | 3 | 0 | 1 |
265 |
20c/django-handleref
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_django-handleref/tests/models.py
|
tests.models.Widget.HandleRef
|
class HandleRef:
custom_option = "passthrough"
|
class HandleRef:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 0 | 0 | 0 |
266 |
20c/facsimile
|
20c_facsimile/tests/venv0.py
|
venv0.Venv0
|
class Venv0(util.ProjBase, VirtualEnv):
name = 'venv0'
# helpers for tests
TEST_CURVER='0.4.0'
def deploy(self):
self.status_msg("deploying...")
|
class Venv0(util.ProjBase, VirtualEnv):
def deploy(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0.2 | 2 | 0 | 0 | 0 | 1 | 0 | 1 | 79 | 8 | 2 | 5 | 4 | 3 | 1 | 5 | 4 | 3 | 1 | 4 | 0 | 1 |
267 |
20c/facsimile
|
20c_facsimile/facsimile/base.py
|
facsimile.base.FileCompare
|
class FileCompare(object):
def __init__(self, base):
self.base = base
self.files = set()
def add_file(self, filename):
self.files.add(filename)
def check(self, targets, user, host):
cf = self.base.defined["deploy"]
args = ['ssh', '%s@%s' % (
user, host), "sh -c 'for file in %s ; do if [ -f $file ] ; then echo $file ; fi ; done'" % (' '.join(self.files))]
rv, out, err = self.base.call_get_output(args)
if rv or err:
raise RuntimeError(
"could not check remote files for host %s. rv %d err %s" % (host, rv, err))
has = set()
for filename in out.split('\n'):
if filename:
has.add(filename)
ignore_set = set()
if 'libcheck_ignorelist' in cf:
ign = cf['libcheck_ignorelist']
for target in targets:
if target in ign:
for fn in ign[target]:
ignore_set.add(fn)
missing = self.files - has - ignore_set
if missing:
for missed in missing:
print('libcheck FAILURE for presence of %s on %s' % (missed, host))
return False
else:
print('libcheck ok for %s' % host)
return True
|
class FileCompare(object):
def __init__(self, base):
pass
def add_file(self, filename):
pass
def check(self, targets, user, host):
pass
| 4 | 0 | 13 | 2 | 11 | 0 | 4 | 0 | 1 | 2 | 0 | 0 | 3 | 2 | 3 | 3 | 42 | 9 | 33 | 17 | 29 | 0 | 30 | 17 | 26 | 10 | 1 | 4 | 12 |
268 |
20c/facsimile
|
20c_facsimile/facsimile/base.py
|
facsimile.base.VirtualEnv
|
class VirtualEnv(Facsimile):
"""
virtualenv from repo
expects requirements.txt in basedir
"""
name = 'virtualenv'
def build(self):
self.status_msg("building...")
self.make_virtualenv()
def install(self):
self.rmdir(self.deploy_dir)
self.mkdir(self.deploy_dir)
self.write_version()
self.copy_virtualenv()
return True
|
class VirtualEnv(Facsimile):
'''
virtualenv from repo
expects requirements.txt in basedir
'''
def build(self):
pass
def install(self):
pass
| 3 | 1 | 5 | 1 | 5 | 0 | 1 | 0.36 | 1 | 0 | 0 | 1 | 2 | 0 | 2 | 77 | 19 | 4 | 11 | 4 | 8 | 4 | 11 | 4 | 8 | 1 | 3 | 0 | 2 |
269 |
20c/facsimile
|
20c_facsimile/facsimile/definition.py
|
facsimile.definition.Definition
|
class Definition(collections.MutableMapping):
""" Package and instance definition only, no state info """
def __init__(self, fax):
self.data_ext = None
self._fax = fax
self.__modules = []
self.__def = {}
def _load(self):
fax = self._fax
for each in self.def_file_list():
if not self.data_ext:
self.data_ext = os.path.splitext(each[1])[1].lstrip('.')
self.add_definition(*each)
# TODO - remove from legacy
if not self.__modules:
self.__modules = self._fax.load_datafile("modules", default=[])
for each in self._fax.find_datafile("install", os.path.join(fax.src_dir, "config")):
self.add_definition(*each)
def add_definition(self, codec, def_file):
self._fax.debug_msg("adding definition %s" % def_file)
data = open(def_file).read()
homedir = os.getenv('HOME')
user = os.getenv('USER')
data = data.replace('%HOMEDIR%', homedir)
# tmp til proper tmpl render
data = data.replace('{{environ.HOME}}', homedir)
data = data.replace('{{environ.USER}}', user)
util.recursive_update(self.__def, codec().loads(data), merge_lists=False)
#print "LOADED: [%s]" % def_file, self.__def
def def_list(self):
fax = self._fax
return ['facsimile', self._fax.name] + fax.release_environment.split('.')
def def_file_list(self):
define_dir = self._fax.define_dir
# make sure we're not loading before source checkout
# this should go away once bootstrapping is in
if not os.path.exists(define_dir):
raise IOError("define dir '%s' does not exist" % (define_dir,))
rv = []
for each in self.def_list():
files = self._fax.find_datafile(each, define_dir)
rv.extend(files)
if not rv:
raise IOError("unable to open definition file '%s' in %s" % (each, define_dir))
return rv
@property
def data(self):
if not self.__def:
self._load()
return self.__def
def modules(self):
if not self.__modules:
self.__modules = self._fax.load_datafile("modules", default=[])
return self.__modules
def __getitem__(self, key):
if not self.__def:
self._load()
return self.__def[key]
def __setitem__(self, key, value):
raise TypeError("Definition is immutable")
def __delitem__(self, key):
raise TypeError("Definition is immutable")
def __iter__(self):
if not self.__def:
self._load()
return iter(self.__def)
def __len__(self):
if not self.__def:
self._load()
return len(self.__def)
|
class Definition(collections.MutableMapping):
''' Package and instance definition only, no state info '''
def __init__(self, fax):
pass
def _load(self):
pass
def add_definition(self, codec, def_file):
pass
def def_list(self):
pass
def def_file_list(self):
pass
@property
def data(self):
pass
def modules(self):
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def __iter__(self):
pass
def __len__(self):
pass
| 14 | 1 | 6 | 1 | 5 | 0 | 2 | 0.09 | 1 | 1 | 0 | 0 | 12 | 4 | 12 | 12 | 93 | 23 | 64 | 28 | 50 | 6 | 63 | 27 | 50 | 5 | 1 | 2 | 24 |
270 |
20c/facsimile
|
20c_facsimile/facsimile/base.py
|
facsimile.base.Facsimile
|
class Facsimile(Base):
"""
Generic Facimile definition
"""
name = 'facsimile'
stages = ["checkout", "build", "install", "deploy"]
def checkout(self):
if not hasattr(self, 'repo'):
raise Exception("repo not defined")
if isinstance(self.repo, str):
self.debug_msg("checking out %s" % (self.repo,))
self.git_checkout(self.repo)
return True
# ensure current is loaded
self.defined._load()
# check for checked out def file, switch define dir and load
define_dir = os.path.join(self.src_dir, 'facsimile')
if not self.find_datafile("facsimile", define_dir):
return True
self.define_dir = define_dir
# load from new path
self.defined._load()
return True
def build(self):
pass
def install(self):
install = self.defined.get('install', None)
if not install:
return
self.rmdir(self.deploy_dir)
self.mkdir(self.deploy_dir)
# pprint.pprint(self.state.tree())
for grp in install['groups']:
if grp.get('type', None) == 'tmpl':
self._install._package_group(grp, self.release_dir)
else:
self._install._package_group(grp, self.deploy_dir)
self.write_version()
return True
def deploy(self):
self.status_msg("deploying...")
# if not self.targets:
self.deploy_to(self.targets)
|
class Facsimile(Base):
'''
Generic Facimile definition
'''
def checkout(self):
pass
def build(self):
pass
def install(self):
pass
def deploy(self):
pass
| 5 | 1 | 11 | 2 | 8 | 1 | 3 | 0.23 | 1 | 2 | 0 | 2 | 4 | 1 | 4 | 75 | 57 | 14 | 35 | 11 | 30 | 8 | 34 | 11 | 29 | 4 | 2 | 2 | 10 |
271 |
20c/facsimile
|
20c_facsimile/facsimile/state.py
|
facsimile.state.State
|
class State(object):
"""
Instance state info
definition overlay in key 'definition'
"""
def __init__(self, fax):
self._fax = fax
self.state_dir = fax.state_dir
self.defined = fax.defined
self.instances_file = os.path.join(self.state_dir, "instances.json")
self.state_file = os.path.join(self.state_dir, 'state.' + self._fax.write_codec.extension)
self._load()
def _load(self):
self._fax.debug_msg('looking for state dir %s' % self.state_dir)
if not os.path.exists(self.state_dir):
os.mkdir(self.state_dir)
self.__state = self._fax.load_datafile("state", self.state_dir, default={})
self.__state.setdefault('passwd', {})
instances = self.__state.get('instances', {})
instances.setdefault("inmap", dict())
instances.setdefault("uiidmap", dict())
envconf = self.defined.get("config", {})
envsvc = self.defined.get("service", {})
envmods = self.defined.get("modules", {})
# mangle modules as needed
modmap = {}
# print 'START OF DEFINED'
# pprint.pprint(self.defined.modules())
# print 'END OF DEFINED'
defined_modules = self.defined.modules()
fresh_modules = []
for each in defined_modules:
if "daemon" in each:
k = each["daemon"]
each["username"] = each["daemon"]
else:
k = each["name"]
each["username"] = each["name"]
# need to append d to name if not set
if "service" in each:
if "name" not in each["service"]:
each["service"]["name"] = k + "d"
# password gen step
each["password"] = self.get_passwd(each["username"])
# environment service overlay, needs to happen before we clone the
# services & gen uiid and n_instances (below)
if k in envsvc:
if "service" in each:
svc = each["service"]
for section, child in envsvc[k].items():
if section in svc:
util.recursive_update(svc[section], child)
else:
svc[section] = child
else:
each["service"] = envsvc[k]
# environment config overlay
if k in envconf:
if "freeform" in each:
eachff = each["freeform"]
for section, child in envconf[k].items():
if section in eachff:
util.recursive_update(eachff[section], child)
else:
eachff[section] = child
else:
each["freeform"] = envconf[k]
modmap[k] = copy.deepcopy(each)
# want to make more than one instance of a module, which will come
# with instance_name and uiid uniqueness
if "service" in each and "logical_name" in each:
logical_name = each["logical_name"]
n_instances = 1
try:
n_instances = envmods[each["name"]][
"service"]["n_instances"]
print('got %d for %s instance count' % (n_instances, each["name"]))
except KeyError:
try:
print('envmods only had %s for %s...' % (envmods[each["name"]], each['name']))
except KeyError:
print('no envmods for %s' % each['name'])
for instance_number in range(n_instances):
# here is where we want to set up the multiple instances
# out of extra.
instance_name = each["name"] + ('d.%d' % instance_number)
if instance_number != 0:
# ok, here we are adding another effective module to the modules defined; this will create more configs in the end.
# copy the each dictionary, and insert it by the instance_name into the
# defined_modules. we leave the first one the same as it would have been for simplicity of leaving non-sharded/HA systems alone.
# this may end up being overly complex in the end; need
# to revisit later.
print('CLONING instance!')
each = copy.deepcopy(each)
fresh_modules.append(each)
if n_instances > 1:
each["config_name"] = instance_name
if instance_name not in instances["inmap"]:
uiid = max(
[int(ui) for ui in instances["uiidmap"].keys()] + [0]) + 1
assert uiid < 256
each["service"]["uiid"] = str(uiid)
each["service"]["logical_name"] = logical_name
each["service"]["instance_name"] = instance_name
entry = {
"uiid": uiid,
"logical_name": logical_name,
"instance_name": instance_name
}
if instance_name not in instances["inmap"]:
instances["inmap"][instance_name] = entry
if str(uiid) not in instances["uiidmap"]:
instances["uiidmap"][str(uiid)] = entry
else:
each["service"]["uiid"] = str(instances["inmap"][instance_name]['uiid'])
each["service"]["logical_name"] = logical_name
each["service"]["instance_name"] = instance_name
self.__state['instances'] = instances
for mod in fresh_modules:
defined_modules.append(mod)
# merge modules overlay from instance
if "modules" in self.defined:
modmap = util.merge(modmap, self.defined["modules"])
self.__tree = {
"facs": self._fax,
# instance deprecated
"instance": self.defined.data,
# definition deprecated
"definition": self.defined.data,
"env": self.defined.data,
"module": modmap
}
# auto save for now
self.write()
def passwd(self):
return self.__state.get('passwd')
def get_passwd(self, name):
return self.__state['passwd'].setdefault(name, str(uuid.uuid4()))
def instances(self):
return self.__state.get('instances')
def tree(self):
return self.__tree
@property
def state(self):
return self.__state
def write(self):
""" write all needed state info to filesystem """
dumped = self._fax.codec.dump(self.__state, open(self.state_file, 'w'))
|
class State(object):
'''
Instance state info
definition overlay in key 'definition'
'''
def __init__(self, fax):
pass
def _load(self):
pass
def passwd(self):
pass
def get_passwd(self, name):
pass
def instances(self):
pass
def tree(self):
pass
@property
def state(self):
pass
def write(self):
''' write all needed state info to filesystem '''
pass
| 10 | 2 | 21 | 4 | 15 | 3 | 4 | 0.22 | 1 | 5 | 0 | 0 | 8 | 7 | 8 | 8 | 187 | 39 | 121 | 37 | 111 | 27 | 102 | 36 | 93 | 25 | 1 | 5 | 32 |
272 |
20c/facsimile
|
20c_facsimile/tests/proj0.py
|
proj0.Proj0
|
class Proj0(util.ProjBase):
name = 'proj0'
# helpers for tests
TEST_CURVER='0.6.0'
def build(self):
self.status_msg("building...")
def deploy(self):
self.status_msg("deploying...")
|
class Proj0(util.ProjBase):
def build(self):
pass
def deploy(self):
pass
| 3 | 0 | 2 | 0 | 2 | 0 | 1 | 0.14 | 1 | 0 | 0 | 0 | 2 | 0 | 2 | 78 | 11 | 3 | 7 | 5 | 4 | 1 | 7 | 5 | 4 | 1 | 4 | 0 | 2 |
273 |
20c/facsimile
|
20c_facsimile/tests/test_base.py
|
test_base.Empty
|
class Empty(Base):
name='empty'
release_environment='instance'
pass
|
class Empty(Base):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 4 | 0 | 4 | 3 | 3 | 0 | 4 | 3 | 3 | 0 | 2 | 0 | 0 |
274 |
20c/facsimile
|
20c_facsimile/facsimile/install.py
|
facsimile.install.Install
|
class Install(object):
def __init__(self, fax):
self._fax = fax
def _package_group(self, grp, dst_dir):
"""
"""
processed=[]
# perform every call in case paths have changed
# TODO replace with config render
# pre codec.load will fix any special char issues
tr_list = [
('$SRC_DIR$', self.os_path_transform(self._fax.src_dir)),
('$BUILD_DIR$', self.os_path_transform(self._fax.build_dirname)),
('$DEPLOY_DIR$', self.os_path_transform(self._fax.deploy_dir)),
('$CONFIGURATION_NAME$', self._fax.configuration_name)
]
additional = []
typ = grp.get('type', 'copy')
if typ == 'tmpl':
Tmpl = twentyc.tmpl.get_engine(self._fax.tmpl_engine)
# FIXME - tmpl bug - tmpl = Tmpl(out_dir=self.release_dir, env=self.state.tree())
for tmpl_dir in self._fax.tmpl_path(grp.get('dir')):
tmpl = Tmpl(tmpl_dir=tmpl_dir, out_dir=dst_dir, env=self._fax.state.tree())
# FIXME tmpl bug, render_walk doesn't return list
#processed.extend(tmpl.render_walk(skip=grp.get('skip', None)))
rendered = tmpl.render_walk(skip=grp.get('skip', None))
self._fax.debug_msg("tmpl rendered %s" % str(rendered))
return
if typ != 'copy':
raise ValueError("invalid typ %s" % (typ))
if 'optfiles' in grp:
additional = self.transform(tr_list, [self.os_path_transform(f) for f in grp['optfiles']])
additional = [src for src in additional if os.path.exists(src)]
if 'files' in grp:
files = self.transform(tr_list, [self.os_path_transform(f) for f in grp['files']])
if 'pattern' in grp:
mangle = re.compile(self.transform(tr_list, self.os_path_transform(grp['pattern'])))
for src in files + additional:
dst = self.resolve_dst(dst_dir, mangle.sub(grp['replace'], src))
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
self._fax.cp(src, dst)
else:
for src in files + additional:
dst = self.resolve_dst(dst_dir, src)
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
self._fax.cp(src, dst)
if 'dir' in grp:
src = self.transform(tr_list, grp['dir'])
if 'pattern' in grp:
self._fax.debug_msg("setting mangle to " + grp['pattern'])
mangle = re.compile(self.transform(tr_list, grp['pattern']))
else:
mangle = None
for root, dirs, files in os.walk(src):
self._fax.debug_msg("got root dir %s" % (root))
for name in dirs:
dir = os.path.join(root, name)
if mangle:
dir = mangle.sub(grp['replace'], dir)
dir = os.path.join(dst_dir, dir)
self._fax.mkdir(dir)
for name in files:
srcfile = os.path.join(root, name)
if mangle:
dstfile = os.path.join(
dst_dir, mangle.sub(grp['replace'], srcfile))
else:
dstfile = os.path.join(dst_dir, srcfile)
self._fax.debug_msg("dstfile is " + dstfile)
self._fax.cp(srcfile, dstfile)
def os_path_transform(self, s, sep=os.path.sep):
""" transforms any os path into unix style """
if sep == '/':
return s
else:
return s.replace(sep, '/')
def transform(self, tr_list, files):
"""
replaces $tokens$ with values
will be replaced with config rendering
"""
singular = False
if not isinstance(files, list) and not isinstance(files, tuple):
singular = True
files = [files]
for _find, _replace in tr_list:
files = [opt.replace(_find, _replace) for opt in files]
if singular:
return files[0]
else:
return files
def resolve_dir(self, dir):
"""
gets the actual dir for source files
relative to FIXME unless absolute path (starts with '/')
// templates relative to define_dir unless abs or ./ ../
templates would be relative to cwd if ./ ../
"""
pass
def resolve_dst(self, dst_dir, src):
"""
finds the destination based on source
if source is an absolute path, and there's no pattern, it copies the file to base dst_dir
"""
if os.path.isabs(src):
return os.path.join(dst_dir, os.path.basename(src))
return os.path.join(dst_dir, src)
|
class Install(object):
def __init__(self, fax):
pass
def _package_group(self, grp, dst_dir):
'''
'''
pass
def os_path_transform(self, s, sep=os.path.sep):
''' transforms any os path into unix style '''
pass
def transform(self, tr_list, files):
'''
replaces $tokens$ with values
will be replaced with config rendering
'''
pass
def resolve_dir(self, dir):
'''
gets the actual dir for source files
relative to FIXME unless absolute path (starts with '/')
// templates relative to define_dir unless abs or ./ ../
templates would be relative to cwd if ./ ../
'''
pass
def resolve_dst(self, dst_dir, src):
'''
finds the destination based on source
if source is an absolute path, and there's no pattern, it copies the file to base dst_dir
'''
pass
| 7 | 5 | 21 | 3 | 14 | 4 | 5 | 0.27 | 1 | 4 | 0 | 0 | 6 | 1 | 6 | 6 | 134 | 25 | 86 | 25 | 79 | 23 | 75 | 25 | 68 | 18 | 1 | 4 | 28 |
275 |
20c/facsimile
|
20c_facsimile/tests/util.py
|
util.ProjBase
|
class ProjBase(Facsimile):
# required for subclasses
required_attrs=('name', 'TEST_CURVER')
def __init__(self, **kwargs):
for attr in self.required_attrs:
if not hasattr(self, attr):
raise Exception("required attr %s not set on object" % attr)
if 'debug' not in kwargs:
kwargs['debug'] = True
if 'clean' not in kwargs:
kwargs['clean'] = True
if 'state_dir' not in kwargs:
kwargs['state_dir'] = os.path.join(top_state_dir, self.name)
self.repo = os.path.join(this_dir, 'data', self.name, 'repo')
self.top_dir = os.path.join(top_parent_dir, 'tmp', self.name)
super(ProjBase, self).__init__(**kwargs)
|
class ProjBase(Facsimile):
def __init__(self, **kwargs):
pass
| 2 | 0 | 16 | 3 | 13 | 0 | 6 | 0.07 | 1 | 2 | 0 | 2 | 1 | 2 | 1 | 76 | 21 | 5 | 15 | 6 | 13 | 1 | 15 | 6 | 13 | 6 | 3 | 2 | 6 |
276 |
20c/grainy
|
src/grainy/core.py
|
src.grainy.core.Applicator
|
class Applicator:
"""
Handles application of permissions to a dataset contained
in a dict
Any data that is not permissioned to be read will be removed
during application of permissions.
"""
def __init__(self, pset: PermissionSet) -> None:
self.pset = pset
self.handlers = {}
def handler(
self,
path: str,
key: Callable | None = None,
explicit: bool = False,
**kwargs: Any,
) -> None:
if not isinstance(path, Namespace):
path = Namespace(path, strip=False)
handler = {"namespace": path, "key": key, "explicit": explicit}
handler.update(**kwargs)
self.handlers[str(path)] = handler
def find_handler(self, path):
handler = None
if path and self.handlers:
namespace = Namespace(path, strip=False)
for _handler in list(self.handlers.values()):
if namespace.match(_handler.get("namespace").keys, partial=False):
handler = _handler
break
return handler
def apply(self, data: dict, path: list[str] | None = None) -> dict:
"""
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
**Arguments**
- data (`dict`)
**Returns**
`dict`: cleaned data
"""
if path is None:
path = []
if not isinstance(data, dict):
return data
def _enumerate(value: dict) -> Iterator:
if isinstance(value, list):
yield from enumerate(value)
elif isinstance(value, dict):
yield from list(value.items())
def _set(container: dict, key: str, value: dict | bool | str) -> None:
if isinstance(container, list):
container.append(value)
else:
container[key] = value
def _apply(
ramap: dict,
value: dict,
status: bool = False,
wc: bool = False,
path: list[str] = [],
) -> dict[str, bool]:
if not isinstance(value, dict) and not isinstance(value, list):
if status:
return value
else:
return None
status = ramap.get("__", status)
handler = None
key_handler = None
if path and self.handlers:
namespace = Namespace(path)
for _handler in list(self.handlers.values()):
if namespace.match(_handler.get("namespace").keys, partial=False):
handler = _handler
key_handler = handler.get("key")
break
if isinstance(value, list):
if not key_handler:
key_handler = list_key_handler
rv = []
else:
rv = {}
for k, v in _enumerate(value):
if key_handler:
k = key_handler(v, k)
k = str(k)
if isinstance(v, dict) or isinstance(v, list):
if k in ramap:
r = _apply(ramap[k], v, status=status, path=path + [k])
if r:
_set(rv, k, r)
elif "*" in ramap:
r = _apply(
ramap["*"], v, status=status, wc=True, path=path + [k]
)
if r:
_set(rv, k, r)
elif status:
_set(rv, k, v)
else:
if k in ramap:
if ramap[k].get("__", True):
_set(rv, k, v)
elif "*" in ramap and ramap["*"].get("__", True):
_set(rv, k, v)
elif status:
_set(rv, k, v)
return rv
# loop through all the handlers that specify the `explicit` arguments
# and temprorarily add deny rules for those to the targeted permissionset
tmpns = {}
for ns, handler in list(self.handlers.items()):
if handler.get("explicit"):
p = self.pset.get_permissions(ns)
if p & const.PERM_READ:
exists = False
for _ns in self.pset.namespaces:
if Namespace(_ns).match(Namespace(ns).keys, partial=False):
exists = True
break
if exists:
continue
tmpns[ns] = p
self.pset[ns] = const.PERM_DENY
# apply permissions
rv = _apply(self.pset.read_access_map, data, path=path)
# remove temporarily added deny rules
for ns, p in list(tmpns.items()):
if p is None:
del self.pset[ns]
else:
self.pset[ns] = p
return rv
|
class Applicator:
'''
Handles application of permissions to a dataset contained
in a dict
Any data that is not permissioned to be read will be removed
during application of permissions.
'''
def __init__(self, pset: PermissionSet) -> None:
pass
def handler(
self,
path: str,
key: Callable | None = None,
explicit: bool = False,
**kwargs: Any,
) -> None:
pass
def find_handler(self, path):
pass
def apply(self, data: dict, path: list[str] | None = None) -> dict:
'''
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
**Arguments**
- data (`dict`)
**Returns**
`dict`: cleaned data
'''
pass
def _enumerate(value: dict) -> Iterator:
pass
def _set(container: dict, key: str, value: dict | bool | str) -> None:
pass
def _apply(
ramap: dict,
value: dict,
status: bool = False,
wc: bool = False,
path: list[str] = [],
) -> dict[str, bool]:
pass
| 8 | 2 | 31 | 4 | 25 | 2 | 6 | 0.16 | 0 | 8 | 2 | 1 | 4 | 2 | 4 | 4 | 158 | 25 | 115 | 39 | 95 | 18 | 91 | 27 | 83 | 20 | 0 | 5 | 43 |
277 |
20c/grainy
|
tests/test_util.py
|
tests.test_util.TestUtils
|
class TestUtils(unittest.TestCase):
def test_int_flags(self):
self.assertEqual(core.int_flags("c"), const.PERM_CREATE)
self.assertEqual(core.int_flags("cr"), const.PERM_CREATE | const.PERM_READ)
self.assertEqual(
core.int_flags("cru"),
const.PERM_CREATE | const.PERM_READ | const.PERM_UPDATE,
)
self.assertEqual(
core.int_flags("crud"),
const.PERM_CREATE | const.PERM_READ | const.PERM_UPDATE | const.PERM_DELETE,
)
|
class TestUtils(unittest.TestCase):
def test_int_flags(self):
pass
| 2 | 0 | 11 | 0 | 11 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 73 | 12 | 0 | 12 | 2 | 10 | 0 | 6 | 2 | 4 | 1 | 2 | 0 | 1 |
278 |
20c/grainy
|
tests/test_class_permissionset.py
|
tests.test_class_permissionset.TestPermissionSet
|
class TestPermissionSet(unittest.TestCase):
def test_init(self):
pset = core.PermissionSet([p1, p2])
self.assertEqual(pset.permissions["a"], p1)
self.assertEqual(pset.permissions["a.b.c"], p2)
pset = core.PermissionSet(pdict)
self.assertEqual(pset.permissions["a"], p1)
self.assertEqual(pset.permissions["a.b.c"], p2)
def test_update_index(self):
pset = core.PermissionSet(pdict)
expected = {
"a": {
"__": 1,
"__implicit": False,
"c": {"__implicit": False, "__": 14},
"b": {
"__": 1,
"__implicit": True,
"c": {"__": 15, "__implicit": False},
"*": {
"__implicit": True,
"__": 1,
"d": {"__implicit": False, "__": 0},
},
},
},
"b": {"__implicit": True, "__": None, "c": {"__implicit": False, "__": 1}},
"k": {
"__": 1,
"__implicit": False,
"x": {"__": 1, "__implicit": True, "y": {"__": 0, "__implicit": False}},
},
"l": {
"__": 1,
"__implicit": False,
"*": {"__": 1, "__implicit": True, "y": {"__": 0, "__implicit": False}},
},
}
self.maxDiff = None
self.assertEqual(pset.index, expected)
def test_contains(self):
pset = core.PermissionSet(pdict)
self.assertIn("a", pset)
self.assertIn("a.b.c", pset)
self.assertNotIn("x", pset)
def test_update(self):
pset = core.PermissionSet(pdict)
pset.update({"x": const.PERM_READ, "z": core.Permission("z", const.PERM_READ)})
self.assertIn("a", pset)
self.assertIn("a.b.c", pset)
self.assertIn("x", pset)
self.assertIn("z", pset)
self.assertEqual(pset.check("x", const.PERM_READ), True)
self.assertEqual(pset.check("z", const.PERM_READ), True)
pset.update({"x": const.PERM_WRITE}, override=False)
self.assertEqual(pset.check("x", const.PERM_READ), True)
self.assertEqual(pset.check("x", const.PERM_WRITE), False)
def test_setitem_delitem(self):
pset = core.PermissionSet()
pset["a"] = const.PERM_READ
pset["a.b"] = const.PERM_RW
pset["b"] = const.PERM_READ
self.assertEqual(pset.permissions["a"].check(const.PERM_READ), True)
self.assertEqual(pset.permissions["a.b"].check(const.PERM_WRITE), True)
self.assertEqual(pset.permissions["b"].check(const.PERM_READ), True)
pset["a.b"] = const.PERM_READ
self.assertEqual(pset.permissions["a.b"].check(const.PERM_WRITE), False)
del pset["b"]
self.assertNotIn("b", pset)
def test_check(self):
pset = core.PermissionSet(pdict2)
self.assertEqual(pset.check("a.b", const.PERM_READ), True)
self.assertEqual(pset.check("a.b.c", const.PERM_WRITE), True)
self.assertEqual(pset.check("a.b.d", const.PERM_READ), True)
self.assertEqual(pset.check("a.b.c.d", const.PERM_READ), False)
self.assertEqual(pset.check("e.f", const.PERM_READ), True)
self.assertEqual(pset.check("e", const.PERM_READ), False)
self.assertEqual(pset.check("e.j.g", const.PERM_WRITE), True)
self.assertEqual(pset.check("e.k.g.a", const.PERM_WRITE), False)
self.assertEqual(pset.check("e.h.g", const.PERM_READ), False)
self.assertEqual(pset.check("e.h.g.a", const.PERM_WRITE), False)
self.assertEqual(pset.check("e.m.g.a", const.PERM_WRITE), False)
self.assertEqual(pset.check("e.m.g.b", const.PERM_RW), True)
self.assertEqual(pset.check("f", const.PERM_WRITE), False)
self.assertEqual(pset.check("f.g", const.PERM_READ), True)
pset = core.PermissionSet(pdict6)
self.assertEqual(pset.check("a.b.c", const.PERM_WRITE), True)
self.assertEqual(pset.check("a.b.c.d", const.PERM_WRITE), True)
pset = core.PermissionSet(pdict7)
self.assertEqual(pset.check("a.b.c", const.PERM_WRITE), True)
self.assertEqual(pset.check("a.b.c.d", const.PERM_WRITE), True)
pset = core.PermissionSet(pdict8)
self.assertEqual(pset.check("a.b.20525.d", const.PERM_CREATE), True)
self.assertEqual(pset.check("a.b.20525.d.*", const.PERM_CREATE), True)
self.assertEqual(pset.check("a.b.20525.d.1234.f.g", const.PERM_READ), True)
pset = core.PermissionSet(pdict9)
self.assertEqual(
pset.check("a.b.10356.d.20.e.private", const.PERM_READ, explicit=True),
False,
)
pset = core.PermissionSet(pdict10)
self.assertEqual(
pset.check("a.b.10356.d.20.e.private", const.PERM_READ, explicit=True),
False,
)
pset = core.PermissionSet(pdict11)
assert pset.check("a.b.10356.x.2966", const.PERM_CREATE)
assert pset.check("a.b.10356.x.2966.i.private", const.PERM_READ, explicit=True)
pset = core.PermissionSet(pdict12)
assert pset.check("a.5", const.PERM_CREATE)
pset = core.PermissionSet(pdict13)
pset.debug = True
assert pset.check("a.5", const.PERM_CREATE)
def test_check_any(self):
pset = core.PermissionSet(pdict5)
assert pset.check("a.b.?", const.PERM_READ) is True
assert pset.check("a.b.?", const.PERM_WRITE) is True
assert pset.check("a.?.?", const.PERM_READ) is True
assert pset.check("a.?.?", const.PERM_WRITE) is True
assert pset.check("a.?.c", const.PERM_READ) is True
assert pset.check("a.?.c", const.PERM_WRITE) is False
assert pset.check("a.?.d", const.PERM_WRITE) is True
assert pset.check("x.b.?", const.PERM_READ) is True
assert pset.check("x.b.?", const.PERM_WRITE) is True
assert pset.check("x.?.?", const.PERM_READ) is True
assert pset.check("x.?.?", const.PERM_WRITE) is True
assert pset.check("x.?.z", const.PERM_READ) is True
assert pset.check("x.?.z", const.PERM_WRITE) is False
assert pset.check("x.?.x", const.PERM_WRITE) is True
assert pset.check("?.?.?", const.PERM_READ) is True
assert pset.check("?.?.?", const.PERM_WRITE) is True
assert pset.check("a.?", const.PERM_READ) is False
assert pset.check("?.s", const.PERM_READ) is True
def test_check_explicit(self):
pset = core.PermissionSet(pdict)
self.assertEqual(pset.check("a.b", const.PERM_READ, explicit=True), False)
self.assertEqual(pset.check("a", const.PERM_READ, explicit=True), True)
self.assertEqual(pset.check("a", const.PERM_WRITE, explicit=True), False)
self.assertEqual(pset.check("a.b.c", const.PERM_WRITE, explicit=True), True)
self.assertEqual(pset.check("a.b.c", const.PERM_READ, explicit=True), True)
def test_apply(self):
pset = core.PermissionSet(pdict2)
data = {
"a": {"b": {"c": {"A": True}, "d": {"A": True}, "e": {"A": False}}},
"f": {"g": True},
}
expected = {"a": {"b": {"c": {"A": True}, "d": {"A": True}}}, "f": {"g": True}}
rv = pset.apply(data)
self.assertEqual(rv, expected)
def test_apply_explicit(self):
pset = core.PermissionSet(pdict)
data = {
"a": {
"b": {
"c": True,
"d": False,
"e": True,
"f": {"something": "else"},
"g": {"nested": {"something": "else"}, "test": True},
}
},
"k": {"a": {"nested": {"something": "else"}, "test": True}},
}
expected = {
"a": {"b": {"c": True, "e": True, "g": {"test": True}}},
"k": {"a": {"test": True}},
}
applicator = core.Applicator(pset)
applicator.handler("a.b.d", explicit=True)
applicator.handler("a.b.f", explicit=True)
applicator.handler("k.a.nested", explicit=True)
applicator.handler("a.b.*.nested", explicit=True)
rv = pset.apply(data, applicator=applicator)
self.assertEqual(rv, expected)
expected = {
"a": {
"b": {
"c": True,
"d": False,
"e": True,
"f": {"something": "else"},
"g": {"nested": {"something": "else"}, "test": True},
}
},
"k": {"a": {"nested": {"something": "else"}, "test": True}},
}
pset["a.b.d"] = const.PERM_READ
pset["a.b.f"] = const.PERM_READ
pset["k.a.nested"] = const.PERM_READ
pset["a.b.g.nested"] = const.PERM_READ
rv = pset.apply(data, applicator=applicator)
self.assertEqual(rv, expected)
def test_apply_nested_lists(self):
pset = core.PermissionSet(pdict4)
data = {
"a": [{"id": "b"}, {"id": "c"}],
"x": [{"custom": "y"}, {"custom": "z"}],
"nested": [
{
"data": [
{
"level": "public",
"some": "data",
"explicit": {"sekret": "data"},
},
{"level": "private", "sekret": "data"},
]
}
],
}
expected = {
"a": [{"id": "b"}],
"nested": [{"data": [{"level": "public", "some": "data"}]}],
"x": [{"custom": "y"}],
}
applicator = core.Applicator(pset)
applicator.handler("x", key=lambda row, idx: row["custom"])
applicator.handler("nested.*.data", key=lambda row, idx: row["level"])
applicator.handler("nested.*.data.public.explicit", explicit=True)
rv = pset.apply(data, applicator=applicator)
self.assertEqual(rv, expected)
|
class TestPermissionSet(unittest.TestCase):
def test_init(self):
pass
def test_update_index(self):
pass
def test_contains(self):
pass
def test_update_index(self):
pass
def test_setitem_delitem(self):
pass
def test_check(self):
pass
def test_check_any(self):
pass
def test_check_explicit(self):
pass
def test_apply(self):
pass
def test_apply_explicit(self):
pass
def test_apply_nested_lists(self):
pass
| 12 | 0 | 23 | 3 | 20 | 0 | 1 | 0 | 1 | 3 | 3 | 0 | 11 | 1 | 11 | 83 | 266 | 45 | 221 | 36 | 209 | 0 | 141 | 36 | 129 | 1 | 2 | 0 | 11 |
279 |
20c/grainy
|
tests/test_class_permission.py
|
tests.test_class_permission.TestPermission
|
class TestPermission(unittest.TestCase):
def test_init(self):
perm = core.Permission(test_ns, const.PERM_RW)
self.assertEqual(perm.value, const.PERM_RW)
self.assertEqual(perm.namespace, test_ns)
perm = core.Permission("a.b.c", const.PERM_RW)
self.assertEqual(isinstance(perm.namespace, core.Namespace), True)
def test_has_value(self):
perm = core.Permission(test_ns, const.PERM_RW)
self.assertEqual(perm.has_value(), True)
perm = core.Permission(test_ns, const.PERM_DENY)
self.assertEqual(perm.has_value(), True)
perm = core.Permission(test_ns, None)
self.assertEqual(perm.has_value(), False)
def test_check(self):
perm = core.Permission(test_ns, const.PERM_RW)
self.assertEqual(perm.check(const.PERM_READ), True)
self.assertEqual(perm.check(const.PERM_WRITE), True)
perm = core.Permission(test_ns, const.PERM_READ)
self.assertEqual(perm.check(const.PERM_READ), True)
self.assertEqual(perm.check(const.PERM_WRITE), False)
perm = core.Permission(test_ns, const.PERM_DENY)
self.assertEqual(perm.check(const.PERM_READ), False)
self.assertEqual(perm.check(const.PERM_WRITE), False)
|
class TestPermission(unittest.TestCase):
def test_init(self):
pass
def test_has_value(self):
pass
def test_check(self):
pass
| 4 | 0 | 9 | 2 | 8 | 0 | 1 | 0 | 1 | 2 | 2 | 0 | 3 | 0 | 3 | 75 | 31 | 7 | 24 | 7 | 20 | 0 | 24 | 7 | 20 | 1 | 2 | 0 | 3 |
280 |
20c/grainy
|
tests/test_class_namespace.py
|
tests.test_class_namespace.TestNamespace
|
class TestNamespace(unittest.TestCase):
def test_init(self):
ns = core.Namespace("a.b.c")
self.assertEqual(ns.value, "a.b.c")
ns = core.Namespace("a.b.*")
self.assertEqual(ns.value, "a.b")
ns = core.Namespace(["a", "b", "c"])
self.assertEqual(ns.value, "a.b.c")
ns = core.Namespace(["a", "b", 1])
self.assertEqual(ns.value, "a.b.1")
def test_append(self):
a = core.Namespace("a.b")
b = core.Namespace("c.d")
c = core.Namespace("x.y")
self.assertEqual((a + b).keys, ["a", "b", "c", "d"])
c += b
self.assertEqual(c.keys, ["x", "y", "c", "d"])
def test_iter(self):
ns = core.Namespace("a.b.c")
self.assertEqual([k for k in ns], ["a", "b", "c"])
def test_container(self):
ns = core.Namespace("a.b.c")
container, tail = ns.container()
self.assertEqual(container, {"a": {"b": {"c": {}}}})
self.assertEqual(tail, {})
container, tail = ns.container({"d": 123})
self.assertEqual(container, {"a": {"b": {"c": {"d": 123}}}})
self.assertEqual(tail, {"d": 123})
def test_match(self):
ns = core.Namespace("a.b.c")
self.assertEqual(ns.match(["a", "b"]), True)
self.assertEqual(ns.match(["a"]), True)
self.assertEqual(ns.match(["a", "*"]), True)
self.assertEqual(ns.match(["a", "*", "c"]), True)
self.assertEqual(ns.match(["a", "b", "c"]), True)
self.assertEqual(ns.match(["a", "*", "c", "d"]), False)
self.assertEqual(ns.match(["a", "b", "c", "d"]), False)
self.assertEqual(ns.match(["b"]), False)
self.assertEqual(ns.match(["a", "c"]), False)
|
class TestNamespace(unittest.TestCase):
def test_init(self):
pass
def test_append(self):
pass
def test_iter(self):
pass
def test_container(self):
pass
def test_match(self):
pass
| 6 | 0 | 9 | 1 | 8 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 5 | 0 | 5 | 77 | 50 | 11 | 39 | 14 | 33 | 0 | 39 | 14 | 33 | 1 | 2 | 0 | 5 |
281 |
20c/grainy
|
src/grainy/core.py
|
src.grainy.core.PermissionSet
|
class PermissionSet:
"""
Holds a set of Namespaces and permissions to run permission checks
on
Can also be applied to a data dict to remove keys that are not
accessible according the permissions in the set
# Instanced Attributes
- permissions (`dict`): permissions in this set
- index (`dict`): permission index
- read_access_map (`dict`)
"""
def __init__(self, rules: dict[str, int] | list[Permission] | None = None) -> None:
"""
**Keyword Arguments**
- rules (`list<Permission>`|`dict<str,int>`): list of `Permission` objects
or `dict` of `namspace(str)`:`permission(int)` pairs
"""
if rules is None:
rules = []
self.permissions = {}
self.index = {}
self.read_access_map = {}
if type(rules) == list:
for permission in rules:
self.__add__(permission)
elif type(rules) == dict:
for ns, p in list(rules.items()):
self.__add__(Permission(ns, p))
@property
def namespaces(self) -> list[str]:
"""
`list` of all namespaces registered in this permission set
"""
return list(self.permissions.keys())
def __iter__(self):
yield from list(self.permissions.values())
def __contains__(self, item: str) -> bool:
return item in self.permissions
def __radd__(self, other):
self.__add__(other)
def __add__(self, other: Permission) -> None:
if isinstance(other, Permission):
self.permissions[str(other.namespace)] = other
self.update_index()
def __setitem__(
self, key: str, other: Permission | int, reindex: bool = True
) -> None:
if isinstance(other, Permission):
self.permissions[key] = other
elif isinstance(other, int):
self.permissions[key] = Permission(key, other)
else:
raise TypeError(
"Value needs to be a Permission instance or a permission flag"
)
if reindex:
self.update_index()
def __delitem__(self, namespace: str) -> None:
if namespace in self.permissions:
del self.permissions[namespace]
else:
raise KeyError("No permission registered under namespace '%s'" % namespace)
self.update_index()
def update(self, permissions: dict, override: bool = True) -> None:
"""
Update the permissionset with a dict of namespace<str>:permission<Permission|int|long>
pairs
??? note "Examples"
```py
pset.update(
{
"a" : const.PERM_READ,
"b" : Permission("b", const.PERM_RW)
}
)
```
**Arguments**
- permissions (`dict`): dict mapping namespaces (`str`) to permission (`Permission` or `int`)
- override (`bool`=True): if True will override existing namespaces if they exist
"""
for k, v in list(permissions.items()):
if not override and k in self.permissions:
continue
self.__setitem__(k, v, reindex=False)
self.update_index()
def update_index(self) -> dict:
"""
Regenerates the permission index for this set
Called everytime a rule is added / removed / modified in
the set
"""
# update index
idx = {}
for _, p in sorted(list(self.permissions.items()), key=lambda x: str(x[0])):
branch = idx
parent_p = None
for k in p.namespace.keys:
if k not in branch:
branch[k] = {"__": parent_p}
branch[k].update(__implicit=True)
branch = branch[k]
parent_p = branch["__"]
branch["__"] = p.value
branch["__implicit"] = False
self.index = idx
# update read access map
ramap = {}
def update_ramap(branch_idx: dict) -> dict[str, bool]:
r = {"__": False}
for k, v in list(branch_idx.items()):
if k != "__" and k != "__implicit":
r[k] = update_ramap(v)
if (
branch_idx["__"] is not None
and (branch_idx["__"] & const.PERM_READ) != 0
):
r["__"] = True
return r
for k, v in list(idx.items()):
ramap[k] = update_ramap(v)
self.read_access_map = ramap
return self.index
def _check(
self,
keys: list[str],
branch: dict,
flags: int | None = None,
i: int = 0,
explicit: bool = False,
length: int = 0,
) -> tuple[int, int, bool]:
implicit = branch.get("__implicit")
if not length:
length = len(keys)
# debug = getattr(self, "debug", False)
try:
key = keys[i]
except IndexError:
return flags, i, implicit
# permission flag for exact key match
key_flag = None
# permissions for exact key are implied (or not)
key_implicit = True
# position of match in the namespace path for
# exact key match
key_pos = 0
# permission flag for wildcard match
wc_flag = None
# permissions for wildcard match are implied (or not)
wc_implicit = True
# position of match in the namespace path for
# wildcard match
wc_pos = 0
if key in branch:
# proceed to next branch by matching key name
if explicit and branch[key].get("__implicit") and i + 1 >= length:
# explicit match required, but next branch is
# implied so we exit
key_flag, key_pos = None, 0
else:
key_flag, key_pos, key_implicit = self._check(
keys,
branch[key],
flags=branch[key].get("__", flags),
i=i + 1,
explicit=explicit,
length=length,
)
if "*" in branch:
# proceed down wildcard branch
if explicit and branch["*"].get("__implicit") and i + 1 >= length:
# explicit match required, but the next branch is
# implied so we exit
wc_flag, wc_pos = None, 0
else:
wc_flag, wc_pos, wc_implicit = self._check(
keys,
branch["*"],
flags=branch["*"].get("__", flags),
i=i + 1,
explicit=explicit,
length=length,
)
# if debug:
# print("")
# print("KEYS (inner)", keys[:i], "pos", i, "flags", flags, "length", l, "expl", explicit, "impl", implicit)
# print("key", key, "flag", key_flag, "implicit", key_implicit, "pos", key_pos, "wc flag", wc_flag, "wc implicit", wc_implicit, "wc pos", wc_pos)
# explicit namespace match required but not found
if explicit and key_pos == 0 and wc_pos == 0:
return None, i, implicit
# RETURN wildcard path permission PASS-1
# wildcard path produced a permission flag
if wc_flag is not None and (not explicit or not wc_implicit):
# RETURN wildcard path permission PASS-1-CHECK-1
#
# we check if wildcard path length is deeper
# than exact match path length.
if key_pos < wc_pos:
# 1. wildcard permission is not implied or both wildcard
# and exact permission are implied
#
# 2. current branch permission is implied or an explicit
# path is required
if (not wc_implicit or key_implicit) and (implicit or explicit):
return wc_flag, wc_pos, wc_implicit
# RETURN wildcard path permission PASS-1-CHECK-2
#
# 1. exact key path has NOT produced a permission
#
# 2. current branch permission is implied or an explicit
# path is required
if key_flag is None and (implicit or explicit):
return wc_flag, wc_pos, wc_implicit
# RETURN exact path permission PASS-1
# exact key path produced a permission flag
if key_flag is not None and (not explicit or not key_implicit):
# RETURN exact key path permission PASS-1-CHECK-1
#
# if the exact path permission is not implied or the
# current permission is also implied
if not key_implicit or implicit:
return key_flag, key_pos, key_implicit
# RETURN exact key path permission PASS-1-CHECK-2
#
# if there are no flags on the current branch (first match)
if flags is None:
return key_flag, key_pos, key_implicit
# RETURN wildcard path permission PASS-2
# wildcard produced a permission flag, lets check against
# current branch
if wc_flag is not None and (not explicit or not wc_implicit):
# RETURN wildcard path permission PASS-2-CHECK-1
#
# if the wildcard path permission is not implied or the
# current permission is also implied
if not wc_implicit or implicit:
return wc_flag, wc_pos, wc_implicit
# RETURN wildcard path permission PASS-1-CHECK-2
#
# if there are no flags on the current branch (first match)
if flags is None:
return wc_flag, wc_pos, wc_implicit
# following neither wildard nor exact match produced
# a permission flag, return current branch permissions
return flags, i, implicit
def get_permissions(
self, namespace: Namespace | str, explicit: bool = False
) -> int:
"""
Returns the permissions level for the specified namespace
**Arguments**
- namespace (`str`): permissioning namespace
**Keyword Arguments**
- explicit (`bool=False`): require explicitly set permissions to the provided namespace
**Returns**
`int`: permission mask
"""
if not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
keys = namespace.keys
p, pos, implicit = self._check(keys, self.index, explicit=explicit)
if not p or (explicit and implicit) or (explicit and pos != len(keys)):
p = 0
return p
def expandable(self, namespace: Namespace | str) -> bool:
"""
Returns whether or not the submitted namespace is expandable.
An expandable namespace is any namespace that contains "?"
keys.
**Arguments**
- namespace (`str`): permissioning namespace
**Returns**
- `bool`
"""
if not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
return "?" in namespace.keys
def expand(
self,
namespace: Namespace | str,
explicit: bool = False,
index: dict | None = None,
path: list[str] | None = None,
length: int = 0,
exact: bool = False,
) -> list[Namespace]:
"""
Expands "?" parts of a namespace into a list of namespaces
**Arguments**
- namespace (`str`): permissioning namespace
**Returns**
- `list`: list of namespaces
"""
if not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
keys = namespace.keys
if not index:
index = self.index
if not path:
path = []
if not length:
length = len(keys)
token = keys[0]
result = []
for k in list(index.keys()):
if k[0] == "_":
continue
if token == k or token == "?" or k == "*":
if k == "*" and token != "?":
_path = path + [token]
else:
_path = path + [k]
if (len(_path) == length or not exact) and (
index[k]["__"] or not explicit
):
_namespace = Namespace(_path)
if _namespace.value:
result.append(_namespace)
result += [
ns
for ns in self.expand(
keys[1:],
index=index[k],
path=_path,
length=length,
explicit=explicit,
exact=exact,
)
]
return list(set(result))
def check(self, namespace: str, level: int, explicit: bool = False) -> bool:
"""
Checks if the permset has permission to the specified namespace
at the specified level
**Arguments**
- namespace (`str`): permissioning namespace
- level (`int`): permission flag, `PERM_READ` for example
**Keyword Arguments**
- explicit (`bool=False`): require explicitly set permissions to the provided namespace
**Returns**
`bool`: `True` if permissioned `False` if not
"""
if self.expandable(namespace):
for _namespace in self.expand(namespace):
if self.get_permissions(_namespace, explicit=explicit) & level != 0:
return True
return False
return (self.get_permissions(namespace, explicit=explicit) & level) != 0
def apply(
self,
data: dict,
path: Any | None = None,
applicator: Applicator | None = None,
) -> dict:
"""
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
**Arguments**
- data (`dict`)
**Keyword Arguments**
- applicator (`Applicator=None`): allows you to specify the
applicator instance to use. If none is specified an instance
of `Applicator` will be used.
**Returns**
`dict`: cleaned data
"""
if applicator:
applicator.pset = self
else:
applicator = Applicator(self)
return applicator.apply(data, path=path)
|
class PermissionSet:
'''
Holds a set of Namespaces and permissions to run permission checks
on
Can also be applied to a data dict to remove keys that are not
accessible according the permissions in the set
# Instanced Attributes
- permissions (`dict`): permissions in this set
- index (`dict`): permission index
- read_access_map (`dict`)
'''
def __init__(self, rules: dict[str, int] | list[Permission] | None = None) -> None:
'''
**Keyword Arguments**
- rules (`list<Permission>`|`dict<str,int>`): list of `Permission` objects
or `dict` of `namspace(str)`:`permission(int)` pairs
'''
pass
@property
def namespaces(self) -> list[str]:
'''
`list` of all namespaces registered in this permission set
'''
pass
def __iter__(self):
pass
def __contains__(self, item: str) -> bool:
pass
def __radd__(self, other):
pass
def __add__(self, other: Permission) -> None:
pass
def __setitem__(
self, key: str, other: Permission | int, reindex: bool = True
) -> None:
pass
def __delitem__(self, namespace: str) -> None:
pass
def update(self, permissions: dict, override: bool = True) -> None:
'''
Update the permissionset with a dict of namespace<str>:permission<Permission|int|long>
pairs
??? note "Examples"
```py
pset.update(
{
"a" : const.PERM_READ,
"b" : Permission("b", const.PERM_RW)
}
)
```
**Arguments**
- permissions (`dict`): dict mapping namespaces (`str`) to permission (`Permission` or `int`)
- override (`bool`=True): if True will override existing namespaces if they exist
'''
pass
def update_index(self) -> dict:
'''
Regenerates the permission index for this set
Called everytime a rule is added / removed / modified in
the set
'''
pass
def update_ramap(branch_idx: dict) -> dict[str, bool]:
pass
def _check(
self,
keys: list[str],
branch: dict,
flags: int | None = None,
i: int = 0,
explicit: bool = False,
length: int = 0,
) -> tuple[int, int, bool]:
pass
def get_permissions(
self, namespace: Namespace | str, explicit: bool = False
) -> int:
'''
Returns the permissions level for the specified namespace
**Arguments**
- namespace (`str`): permissioning namespace
**Keyword Arguments**
- explicit (`bool=False`): require explicitly set permissions to the provided namespace
**Returns**
`int`: permission mask
'''
pass
def expandable(self, namespace: Namespace | str) -> bool:
'''
Returns whether or not the submitted namespace is expandable.
An expandable namespace is any namespace that contains "?"
keys.
**Arguments**
- namespace (`str`): permissioning namespace
**Returns**
- `bool`
'''
pass
def expandable(self, namespace: Namespace | str) -> bool:
'''
Expands "?" parts of a namespace into a list of namespaces
**Arguments**
- namespace (`str`): permissioning namespace
**Returns**
- `list`: list of namespaces
'''
pass
def check(self, namespace: str, level: int, explicit: bool = False) -> bool:
'''
Checks if the permset has permission to the specified namespace
at the specified level
**Arguments**
- namespace (`str`): permissioning namespace
- level (`int`): permission flag, `PERM_READ` for example
**Keyword Arguments**
- explicit (`bool=False`): require explicitly set permissions to the provided namespace
**Returns**
`bool`: `True` if permissioned `False` if not
'''
pass
def apply(
self,
data: dict,
path: Any | None = None,
applicator: Applicator | None = None,
) -> dict:
'''
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
**Arguments**
- data (`dict`)
**Keyword Arguments**
- applicator (`Applicator=None`): allows you to specify the
applicator instance to use. If none is specified an instance
of `Applicator` will be used.
**Returns**
`dict`: cleaned data
'''
pass
| 19 | 10 | 28 | 6 | 14 | 8 | 4 | 0.66 | 0 | 15 | 3 | 0 | 16 | 3 | 16 | 16 | 498 | 127 | 224 | 76 | 180 | 147 | 159 | 50 | 141 | 18 | 0 | 4 | 70 |
282 |
20c/grainy
|
src/grainy/core.py
|
src.grainy.core.Permission
|
class Permission:
"""
Permission object defined by a namespace and a permission bitmask
# Instanced Attributes
- namespace (`Namespace`)
- value (`int`): permission mask
"""
def __init__(self, namespace: Namespace | str, value: int | None) -> None:
"""
**Arguments**
- namespace (`str`|`Namespace`)
- value (`int`): permission mask
"""
if isinstance(namespace, str):
namespace = Namespace(namespace)
self.namespace = namespace
self.value = value
def __eq__(self, other: Permission) -> bool:
r = other.namespace == self.namespace and other.value == self.value
return r
def has_value(self) -> bool:
"""
Check that value has been set
**Returns**
`bool`: `True` if value has been set, `False` if not
"""
return self.value is not None
def check(self, level: int) -> bool:
"""
Check if permission mask contains the specified
permission level
**Arguments**
- level (`int`): permission flag
**Returns**
`bool`: `True` if flag is contained in mask, `False` if not
"""
if not self.has_value():
return False
return (self.value & level) != 0
|
class Permission:
'''
Permission object defined by a namespace and a permission bitmask
# Instanced Attributes
- namespace (`Namespace`)
- value (`int`): permission mask
'''
def __init__(self, namespace: Namespace | str, value: int | None) -> None:
'''
**Arguments**
- namespace (`str`|`Namespace`)
- value (`int`): permission mask
'''
pass
def __eq__(self, other: Permission) -> bool:
pass
def has_value(self) -> bool:
'''
Check that value has been set
**Returns**
`bool`: `True` if value has been set, `False` if not
'''
pass
def check(self, level: int) -> bool:
'''
Check if permission mask contains the specified
permission level
**Arguments**
- level (`int`): permission flag
**Returns**
`bool`: `True` if flag is contained in mask, `False` if not
'''
pass
| 5 | 4 | 10 | 2 | 4 | 5 | 2 | 1.6 | 0 | 4 | 1 | 0 | 4 | 2 | 4 | 4 | 53 | 14 | 15 | 8 | 10 | 24 | 15 | 8 | 10 | 2 | 0 | 1 | 6 |
283 |
20c/grainy
|
src/grainy/core.py
|
src.grainy.core.NamespaceKeyApplicator
|
class NamespaceKeyApplicator(Applicator):
"""
Applicator that looks for permission namespaces from
a specified field in the dict it is scanning
"""
# field name that holds permission namespace
namespace_key = "_grainy"
# remove the permission namespace field from the
# data during application
remove_namespace_key = True
denied = object()
def apply(self, data: list | dict | Any, **kwargs) -> Any:
if isinstance(data, list):
return self.apply_list(data)
elif isinstance(data, dict):
namespace = data.get(self.namespace_key)
explicit = False
fn = False
handler = self.find_handler(namespace)
if handler:
explicit = handler.get("explicit", False)
fn = handler.get("fn", None)
if fn:
fn(namespace, data)
if namespace and not self.pset.check(namespace, 0x01, explicit=explicit):
return self.denied
elif namespace and self.remove_namespace_key:
del data[self.namespace_key]
return self.apply_dict(data)
return data
def apply_list(self, data: list, **kwargs) -> list:
_data = []
for row in data:
_row = self.apply(row)
if _row != self.denied:
_data.append(_row)
return _data
def apply_dict(self, data: dict, **kwargs) -> dict:
_data = {}
for key, item in data.items():
_item = self.apply(item)
if _item != self.denied:
_data[key] = _item
return _data
|
class NamespaceKeyApplicator(Applicator):
'''
Applicator that looks for permission namespaces from
a specified field in the dict it is scanning
'''
def apply(self, data: list | dict | Any, **kwargs) -> Any:
pass
def apply_list(self, data: list, **kwargs) -> list:
pass
def apply_dict(self, data: dict, **kwargs) -> dict:
pass
| 4 | 1 | 13 | 2 | 11 | 0 | 4 | 0.19 | 1 | 3 | 0 | 0 | 3 | 0 | 3 | 7 | 56 | 12 | 37 | 17 | 33 | 7 | 35 | 17 | 31 | 7 | 1 | 2 | 13 |
284 |
20c/munge
|
src/munge/click.py
|
munge.click.Context
|
class Context:
# TODO replace with full logger config
log_format = "%(message)s"
log_format_debug = "%(name)s %(message)s %(filename)s:%(lineno)d"
log_format_file = "%(asctime)s %(levelname)s %(message)s"
@classmethod
def search_path(cls):
return [
"$%s_HOME" % cls.app_name.upper(),
os.path.join(".", "." + cls.app_name),
os.path.expanduser(os.path.join("~", "." + cls.app_name)),
click.get_app_dir(cls.app_name),
]
@classmethod
def option_list(cls):
return [
dict(
name="--debug",
help="enable extra debug output",
is_flag=True,
default=None,
),
dict(
name="--home",
help="specify the home directory, by default will check in order: "
+ ", ".join(cls.search_path()),
default=None,
),
dict(
name="--verbose",
help="enable more verbose output",
is_flag=True,
default=None,
),
dict(name="--quiet", help="no output at all", is_flag=True, default=None),
]
@classmethod
def options(cls, f):
f = click.option(
"--debug", help="enable extra debug output", is_flag=True, default=None
)(f)
f = click.option(
"--home",
help="specify the home directory, by default will check in order: "
+ ", ".join(cls.search_path()),
default=None,
)(f)
f = click.option(
"--verbose", help="enable more verbose output", is_flag=True, default=None
)(f)
f = click.option(
"--quiet", help="no output at all", is_flag=True, default=None
)(f)
return f
@classmethod
def pop_options(cls, kwargs):
keys = ("debug", "home", "verbose", "quiet")
return {k: kwargs.pop(k, None) for k in keys}
@classmethod
def get_options(cls, kwargs):
"""
deprecated
"""
return cls.pop_options(kwargs)
@classmethod
def pass_context(cls):
return click.make_pass_decorator(cls, ensure=True)
@property
def log(self):
if not getattr(self, "_logger", None):
self._logger = logging.getLogger(self.app_name)
return self._logger
def __init__(self, **kwargs):
self.debug = False
self.quiet = False
self.verbose = False
self.home = None
self.config = None
self.update_options(kwargs)
def update_options(self, kwargs):
opt = self.__class__.get_options(kwargs)
if opt.get("debug", None) is not None:
self.debug = opt["debug"]
if opt.get("verbose", None) is not None:
self.verbose = opt["verbose"]
if opt.get("quiet", None) is not None:
self.quiet = opt["quiet"]
# TODO - probably should warn or error if passing multiple home values in?
if opt.get("home", None) is not None:
# if home is redefined, nuke config and load a new one
self.home = opt["home"]
self.config = self.config_class(read=self.home)
# if no config and home wasn't defined, check search path
elif not self.config:
search_path = []
for path in self.__class__.search_path():
if path.startswith("$"):
if path[1:] not in os.environ:
continue
search_path.append(os.environ[path[1:]])
else:
search_path.append(path)
self.config = self.config_class(try_read=search_path)
if self.config.meta:
self.home = self.config.meta["config_dir"]
self.init()
def init(self):
"""
call after updating options
"""
# remove root logger, so we can reinit
# TODO only remove our own
# TODO move to _init, let overrides use init()
logging.getLogger().handlers = []
if self.debug:
logging.basicConfig(level=logging.DEBUG, format=self.log_format_debug)
elif self.verbose:
logging.basicConfig(level=logging.INFO, format=self.log_format)
elif not self.quiet:
logging.basicConfig(level=logging.ERROR, format=self.log_format)
else:
logging.getLogger().addHandler(logging.NullHandler())
|
class Context:
@classmethod
def search_path(cls):
pass
@classmethod
def option_list(cls):
pass
@classmethod
def options(cls, f):
pass
@classmethod
def pop_options(cls, kwargs):
pass
@classmethod
def get_options(cls, kwargs):
'''
deprecated
'''
pass
@classmethod
def pass_context(cls):
pass
@property
def log(self):
pass
def __init__(self, **kwargs):
pass
def update_options(self, kwargs):
pass
def init(self):
'''
call after updating options
'''
pass
| 18 | 2 | 12 | 1 | 10 | 1 | 2 | 0.12 | 0 | 2 | 0 | 2 | 4 | 6 | 10 | 10 | 142 | 20 | 109 | 31 | 91 | 13 | 61 | 24 | 50 | 10 | 0 | 4 | 23 |
285 |
20c/munge
|
tests/test_codecs.py
|
test_codecs.Codec
|
class Codec:
def __init__(self, cls):
self.cls = cls
# if not os.path.exists(
def find_file(self, name):
prefix = os.path.join(this_dir, name)
for ext in self.cls.extensions:
fq_name = f"{prefix}.{ext}"
print("checking", fq_name)
if os.path.exists(fq_name):
return fq_name
def open_file(self, name, *args, **kwargs):
return open(self.find_file(name), *args, **kwargs)
|
class Codec:
def __init__(self, cls):
pass
def find_file(self, name):
pass
def open_file(self, name, *args, **kwargs):
pass
| 4 | 0 | 4 | 0 | 4 | 0 | 2 | 0.08 | 0 | 0 | 0 | 0 | 3 | 1 | 3 | 3 | 15 | 2 | 12 | 8 | 8 | 1 | 12 | 8 | 8 | 3 | 0 | 2 | 5 |
286 |
20c/munge
|
src/munge/codec/json.py
|
munge.codec.json.Json
|
class Json(CodecBase):
supports_dict = True
supports_list = True
extensions = ["json"]
__kwargs = {}
def set_type(self, name, typ):
if name == "dict":
self.__kwargs["object_pairs_hook"] = typ
def load(self, fobj, **kwargs):
return json.load(fobj, **self.__kwargs)
def loads(self, input_string, **kwargs):
return json.loads(input_string, **self.__kwargs)
def dump(self, data, fobj, **kwargs):
return json.dump(data, fobj, **kwargs)
def dumps(self, data):
return json.dumps(data)
|
class Json(CodecBase):
def set_type(self, name, typ):
pass
def load(self, fobj, **kwargs):
pass
def loads(self, input_string, **kwargs):
pass
def dump(self, data, fobj, **kwargs):
pass
def dumps(self, data):
pass
| 6 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 26 | 22 | 6 | 16 | 10 | 10 | 0 | 16 | 10 | 10 | 2 | 4 | 1 | 6 |
287 |
20c/munge
|
src/munge/codec/mysql.py
|
munge.codec.mysql.MysqlEndpoint
|
class MysqlEndpoint:
def __init__(self, cxn, database, table):
self.database = database
self.table = table
self.cxn = cxn
|
class MysqlEndpoint:
def __init__(self, cxn, database, table):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 3 | 1 | 1 | 5 | 0 | 5 | 5 | 3 | 0 | 5 | 5 | 3 | 1 | 0 | 0 | 1 |
288 |
20c/munge
|
src/munge/config.py
|
munge.config.Config
|
class Config(MutableMapping):
"""
class for storing and manipulating data for config files
"""
# internal base for defaults
_base_defaults = {
"config": {},
# directory to look for config in
"config_dir": None,
# name of config file
"config_name": "config",
"codec": None,
"autowrite": False,
"validate": False,
}
def __init__(self, **kwargs):
"""
accepts kwargs to set defaults
data=dict to set initial data
read=dir to open a dir
try_read=dir to try to open a dir (and not throw if it doesn't read)
"""
# use derived class defaults if available
if hasattr(self, "defaults"):
self._defaults = self._base_defaults.copy()
self._defaults.update(self.defaults)
else:
self._defaults = self._base_defaults.copy()
# override anything passed to kwargs
for k, v in list(kwargs.items()):
if k in self._defaults:
self._defaults[k] = v
self.data = kwargs.get("data", self.default())
self._meta_config_dir = ""
if "read" in kwargs:
self.read(kwargs["read"])
if "try_read" in kwargs:
self.try_read(kwargs["try_read"])
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def copy(self):
rv = self.__class__(data=self.data.copy())
# copy meta
rv._meta_config_dir = self._meta_config_dir
return rv
def get_nested(self, *args):
"""
get a nested value, returns None if path does not exist
"""
data = self.data
for key in args:
if key not in data:
return None
data = data[key]
return data
def default(self):
return copy.deepcopy(self._defaults["config"])
def clear(self):
self.data = self.default()
self._meta_config_dir = None
@property
def meta(self):
if not self._meta_config_dir:
return {}
return {
"config_dir": self._meta_config_dir,
}
def read(self, config_dir=None, config_name=None, clear=False):
"""
read config from config_dir
if config_dir is None, clear to default config
clear will clear to default before reading new file
"""
# TODO should probably allow config_dir to be a list as well
# get name of config directory
if not config_dir:
config_dir = self._defaults.get("config_dir", None)
if not config_dir:
raise KeyError("config_dir not set")
# get name of config file
if not config_name:
config_name = self._defaults.get("config_name", None)
if not config_name:
raise KeyError("config_name not set")
conf_path = os.path.expanduser(config_dir)
if not os.path.exists(conf_path):
raise OSError(f"config dir not found at {conf_path}")
config = munge.load_datafile(config_name, conf_path, default=None)
if not config:
raise OSError(f"config file not found in {conf_path}")
if clear:
self.clear()
munge.util.recursive_update(self.data, config)
self._meta_config_dir = conf_path
return self
def try_read(self, config_dir=None, **kwargs):
"""
try reading without throwing an error
config_dir may be a list of directories to try in order, if so it
will return after the first successful read
other args will be passed direction to read()
"""
if isinstance(config_dir, str):
config_dir = (config_dir,)
for cdir in config_dir:
try:
self.read(cdir, **kwargs)
return cdir
except OSError:
pass
def write(self, config_dir=None, config_name=None, codec=None):
"""
writes config to config_dir using config_name
"""
# get name of config directory
if not config_dir:
config_dir = self._meta_config_dir
if not config_dir:
raise OSError("config_dir not set")
# get name of config file
if not config_name:
config_name = self._defaults.get("config_name", None)
if not config_name:
raise KeyError("config_name not set")
if codec:
codec = munge.get_codec(codec)()
else:
codec = munge.get_codec(self._defaults["codec"])()
config_dir = os.path.expanduser(config_dir)
if not os.path.exists(config_dir):
os.mkdir(config_dir)
codec.dumpu(self.data, os.path.join(config_dir, "config." + codec.extension))
|
class Config(MutableMapping):
'''
class for storing and manipulating data for config files
'''
def __init__(self, **kwargs):
'''
accepts kwargs to set defaults
data=dict to set initial data
read=dir to open a dir
try_read=dir to try to open a dir (and not throw if it doesn't read)
'''
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, value):
pass
def __delitem__(self, key):
pass
def __iter__(self):
pass
def __len__(self):
pass
def copy(self):
pass
def get_nested(self, *args):
'''
get a nested value, returns None if path does not exist
'''
pass
def default(self):
pass
def clear(self):
pass
@property
def meta(self):
pass
def read(self, config_dir=None, config_name=None, clear=False):
'''
read config from config_dir
if config_dir is None, clear to default config
clear will clear to default before reading new file
'''
pass
def try_read(self, config_dir=None, **kwargs):
'''
try reading without throwing an error
config_dir may be a list of directories to try in order, if so it
will return after the first successful read
other args will be passed direction to read()
'''
pass
def write(self, config_dir=None, config_name=None, codec=None):
'''
writes config to config_dir using config_name
'''
pass
| 16 | 6 | 10 | 1 | 7 | 2 | 3 | 0.36 | 1 | 4 | 0 | 3 | 14 | 3 | 14 | 55 | 173 | 33 | 103 | 27 | 87 | 37 | 91 | 26 | 76 | 8 | 7 | 2 | 38 |
289 |
20c/munge
|
src/munge/config.py
|
munge.config.MungeConfig
|
class MungeConfig(Config):
defaults = {"config": {}, "config_dir": "~/.munge", "codec": "yaml"}
|
class MungeConfig(Config):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 8 | 0 | 0 |
290 |
20c/munge
|
src/munge/config.py
|
munge.config.MungeURL
|
class MungeURL(namedtuple("MungeURL", "cls url")):
pass
|
class MungeURL(namedtuple("MungeURL", "cls url")):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
291 |
20c/munge
|
tests/test_click.py
|
test_click.Config
|
class Config(munge.Config):
pass
|
class Config(munge.Config):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 8 | 0 | 0 |
292 |
20c/munge
|
tests/test_click.py
|
test_click.Context
|
class Context(munge.click.Context):
app_name = "munge_test"
config_class = munge.Config
|
class Context(munge.click.Context):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
293 |
20c/munge
|
src/munge/cli.py
|
munge.cli.Context
|
class Context(munge.click.Context):
app_name = "munge"
|
class Context(munge.click.Context):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
294 |
20c/munge
|
tests/test_codecs.py
|
test_codecs.Datadict0
|
class Datadict0:
name = "dict0"
filename = "data/" + name
expected = {"munge": {"str0": "str0", "list0": ["item0", "item1"], "int0": 42}}
|
class Datadict0:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 1 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 0 | 0 | 0 |
295 |
20c/munge
|
tests/test_codecs.py
|
test_codecs.Datalist0
|
class Datalist0:
name = "list0"
filename = "data/" + name
expected = [{"int0": 42, "str0": "str0"}, {"int0": 1337, "str0": "fish"}]
|
class Datalist0:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 1 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 0 | 0 | 0 |
296 |
20c/munge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_munge/src/munge/codec/toml_toml.py
|
munge.codec.toml_toml.Toml
|
class Toml(CodecBase):
supports_dict = True
extensions = ["toml"]
__kwargs = {}
def set_type(self, name, typ):
pass
def load(self, fobj, **kwargs):
return toml.load(fobj, **self.__kwargs)
def loads(self, input_string, **kwargs):
return toml.loads(input_string, **self.__kwargs)
def dump(self, data, fobj, **kwargs):
return toml.dump(data, fobj, **kwargs)
def dumps(self, data):
return toml.dumps(data)
|
class Toml(CodecBase):
def set_type(self, name, typ):
pass
def load(self, fobj, **kwargs):
pass
def loads(self, input_string, **kwargs):
pass
def dump(self, data, fobj, **kwargs):
pass
def dumps(self, data):
pass
| 6 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 26 | 19 | 5 | 14 | 9 | 8 | 0 | 14 | 9 | 8 | 1 | 4 | 0 | 5 |
297 |
20c/munge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_munge/src/munge/codec/toml_tomlkit.py
|
munge.codec.toml_tomlkit.TomlKit
|
class TomlKit(CodecBase):
supports_dict = True
supports_roundtrip = True
extensions = ["toml"]
__kwargs = {}
def set_type(self, name, typ):
pass
def load(self, fobj, **kwargs):
return self.loads(fobj.read(), **self.__kwargs)
def loads(self, input_string, **kwargs):
return tomlkit.loads(input_string, **self.__kwargs)
def dump(self, data, fobj, **kwargs):
return fobj.write(self.dumps(data, **kwargs))
def dumps(self, data, **kwargs):
return tomlkit.dumps(data)
|
class TomlKit(CodecBase):
def set_type(self, name, typ):
pass
def load(self, fobj, **kwargs):
pass
def loads(self, input_string, **kwargs):
pass
def dump(self, data, fobj, **kwargs):
pass
def dumps(self, data, **kwargs):
pass
| 6 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 26 | 20 | 5 | 15 | 10 | 9 | 0 | 15 | 10 | 9 | 1 | 4 | 0 | 5 |
298 |
20c/munge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_munge/src/munge/codec/yaml.py
|
munge.codec.yaml.Yaml
|
class Yaml(CodecBase):
supports_dict = True
supports_list = True
extensions = ["yaml", "yml"]
def set_type(self, name, typ):
pass
def load(self, *args, **kwargs):
return yaml.safe_load(*args, **kwargs)
def loads(self, *args, **kwargs):
return self.load(*args, **kwargs)
def dump(self, data, fobj):
return fobj.write(
yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
)
def dumps(self, data):
return yaml.safe_dump(data, default_flow_style=False, sort_keys=False)
|
class Yaml(CodecBase):
def set_type(self, name, typ):
pass
def load(self, *args, **kwargs):
pass
def loads(self, *args, **kwargs):
pass
def dump(self, data, fobj):
pass
def dumps(self, data):
pass
| 6 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 5 | 0 | 5 | 26 | 21 | 5 | 16 | 9 | 10 | 0 | 14 | 9 | 8 | 1 | 4 | 0 | 5 |
299 |
20c/munge
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/20c_munge/tests/test_config.py
|
test_config.DefaultConfig.Defaults
|
class Defaults:
config = default_config
|
class Defaults:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 0 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.